python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "nested_trust_common.h"
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, u64);
} sk_storage_map SEC(".maps");
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(task_newtask,
* TP_PROTO(struct task_struct *p, u64 clone_flags)
*/
SEC("tp_btf/task_newtask")
__failure __msg("R2 must be")
int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_flags)
{
bpf_cpumask_test_cpu(0, task->user_cpus_ptr);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("R1 must have zero offset when passed to release func or trusted arg to kfunc")
int BPF_PROG(test_invalid_nested_offset, struct task_struct *task, u64 clone_flags)
{
bpf_cpumask_first_zero(&task->cpus_mask);
return 0;
}
/* Although R2 is of type sk_buff but sock_common is expected, we will hit untrusted ptr first. */
SEC("tp_btf/tcp_probe")
__failure __msg("R2 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_")
int BPF_PROG(test_invalid_skb_field, struct sock *sk, struct sk_buff *skb)
{
bpf_sk_storage_get(&sk_storage_map, skb->next, 0, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/nested_trust_failure.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Check if we can migrate child sockets.
*
* 1. If reuse_md->migrating_sk is NULL (SYN packet),
* return SK_PASS without selecting a listener.
* 2. If reuse_md->migrating_sk is not NULL (socket migration),
* select a listener (reuseport_map[migrate_map[cookie]])
*
* Author: Kuniyuki Iwashima <[email protected]>
*/
#include <stddef.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/in.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
__uint(max_entries, 256);
__type(key, int);
__type(value, __u64);
} reuseport_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 256);
__type(key, __u64);
__type(value, int);
} migrate_map SEC(".maps");
int migrated_at_close = 0;
int migrated_at_close_fastopen = 0;
int migrated_at_send_synack = 0;
int migrated_at_recv_ack = 0;
__be16 server_port;
SEC("xdp")
int drop_ack(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
struct ethhdr *eth = data;
struct tcphdr *tcp = NULL;
if (eth + 1 > data_end)
goto pass;
switch (bpf_ntohs(eth->h_proto)) {
case ETH_P_IP: {
struct iphdr *ip = (struct iphdr *)(eth + 1);
if (ip + 1 > data_end)
goto pass;
if (ip->protocol != IPPROTO_TCP)
goto pass;
tcp = (struct tcphdr *)((void *)ip + ip->ihl * 4);
break;
}
case ETH_P_IPV6: {
struct ipv6hdr *ipv6 = (struct ipv6hdr *)(eth + 1);
if (ipv6 + 1 > data_end)
goto pass;
if (ipv6->nexthdr != IPPROTO_TCP)
goto pass;
tcp = (struct tcphdr *)(ipv6 + 1);
break;
}
default:
goto pass;
}
if (tcp + 1 > data_end)
goto pass;
if (tcp->dest != server_port)
goto pass;
if (!tcp->syn && tcp->ack)
return XDP_DROP;
pass:
return XDP_PASS;
}
SEC("sk_reuseport/migrate")
int migrate_reuseport(struct sk_reuseport_md *reuse_md)
{
int *key, flags = 0, state, err;
__u64 cookie;
if (!reuse_md->migrating_sk)
return SK_PASS;
state = reuse_md->migrating_sk->state;
cookie = bpf_get_socket_cookie(reuse_md->sk);
key = bpf_map_lookup_elem(&migrate_map, &cookie);
if (!key)
return SK_DROP;
err = bpf_sk_select_reuseport(reuse_md, &reuseport_map, key, flags);
if (err)
return SK_PASS;
switch (state) {
case BPF_TCP_ESTABLISHED:
__sync_fetch_and_add(&migrated_at_close, 1);
break;
case BPF_TCP_SYN_RECV:
__sync_fetch_and_add(&migrated_at_close_fastopen, 1);
break;
case BPF_TCP_NEW_SYN_RECV:
if (!reuse_md->len)
__sync_fetch_and_add(&migrated_at_send_synack, 1);
else
__sync_fetch_and_add(&migrated_at_recv_ack, 1);
break;
}
return SK_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_migrate_reuseport.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020, Oracle and/or its affiliates.
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
int trace_printk_ret = 0;
int trace_printk_ran = 0;
const char fmt[] = "Testing,testing %d\n";
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int sys_enter(void *ctx)
{
trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt),
++trace_printk_ran);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/trace_printk.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("check w reg equal if r reg upper32 bits 0")
__success
__naked void subreg_equality_1(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
*(u64 *)(r10 - 8) = r0; \
r2 = *(u32 *)(r10 - 8); \
/* At this point upper 4-bytes of r2 are 0, \
* thus insn w3 = w2 should propagate reg id, \
* and w2 < 9 comparison would also propagate \
* the range for r3. \
*/ \
w3 = w2; \
if w2 < 9 goto l0_%=; \
exit; \
l0_%=: if r3 < 9 goto l1_%=; \
/* r1 read is illegal at this point */ \
r0 -= r1; \
l1_%=: exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check w reg not equal if r reg upper32 bits not 0")
__failure __msg("R1 !read_ok")
__naked void subreg_equality_2(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
r2 = r0; \
/* Upper 4-bytes of r2 may not be 0, thus insn \
* w3 = w2 should not propagate reg id, and \
* w2 < 9 comparison should not propagate \
* the range for r3 either. \
*/ \
w3 = w2; \
if w2 < 9 goto l0_%=; \
exit; \
l0_%=: if r3 < 9 goto l1_%=; \
/* r1 read is illegal at this point */ \
r0 -= r1; \
l1_%=: exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_reg_equal.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
SEC("xdp")
int xdp_context(struct xdp_md *xdp)
{
void *data = (void *)(long)xdp->data;
__u32 *metadata = (void *)(long)xdp->data_meta;
__u32 ret;
if (metadata + 1 > data)
return XDP_ABORTED;
ret = *metadata;
if (bpf_xdp_adjust_meta(xdp, 4))
return XDP_ABORTED;
return ret;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_context_test_run.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct sample {
int pid;
int seq;
long value;
char comm[16];
};
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
} ringbuf SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1000);
__type(key, struct sample);
__type(value, int);
} hash_map SEC(".maps");
/* inputs */
int pid = 0;
/* inner state */
long seq = 0;
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int test_ringbuf_mem_map_key(void *ctx)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
struct sample *sample, sample_copy;
int *lookup_val;
if (cur_pid != pid)
return 0;
sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
if (!sample)
return 0;
sample->pid = pid;
bpf_get_current_comm(sample->comm, sizeof(sample->comm));
sample->seq = ++seq;
sample->value = 42;
/* test using 'sample' (PTR_TO_MEM | MEM_ALLOC) as map key arg
*/
lookup_val = (int *)bpf_map_lookup_elem(&hash_map, sample);
__sink(lookup_val);
/* workaround - memcpy is necessary so that verifier doesn't
* complain with:
* verifier internal error: more than one arg with ref_obj_id R3
* when trying to do bpf_map_update_elem(&hash_map, sample, &sample->seq, BPF_ANY);
*
* Since bpf_map_lookup_elem above uses 'sample' as key, test using
* sample field as value below
*/
__builtin_memcpy(&sample_copy, sample, sizeof(struct sample));
bpf_map_update_elem(&hash_map, &sample_copy, &sample->seq, BPF_ANY);
bpf_ringbuf_submit(sample, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c |
#include "core_reloc_types.h"
void f(struct core_reloc_arrays___err_too_small x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_too_small.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <stdbool.h>
#include <stdint.h>
#include <linux/stddef.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
enum pkt_parse_err {
NO_ERR,
BAD_IP6_HDR,
BAD_IP4GUE_HDR,
BAD_IP6GUE_HDR,
};
enum pkt_flag {
TUNNEL = 0x1,
TCP_SYN = 0x2,
QUIC_INITIAL_FLAG = 0x4,
TCP_ACK = 0x8,
TCP_RST = 0x10
};
struct v4_lpm_key {
__u32 prefixlen;
__u32 src;
};
struct v4_lpm_val {
struct v4_lpm_key key;
__u8 val;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 16);
__type(key, struct in6_addr);
__type(value, bool);
} v6_addr_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 16);
__type(key, __u32);
__type(value, bool);
} v4_addr_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_LPM_TRIE);
__uint(max_entries, 16);
__uint(key_size, sizeof(struct v4_lpm_key));
__uint(value_size, sizeof(struct v4_lpm_val));
__uint(map_flags, BPF_F_NO_PREALLOC);
} v4_lpm_val_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 16);
__type(key, int);
__type(value, __u8);
} tcp_port_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 16);
__type(key, int);
__type(value, __u16);
} udp_port_map SEC(".maps");
enum ip_type { V4 = 1, V6 = 2 };
struct fw_match_info {
__u8 v4_src_ip_match;
__u8 v6_src_ip_match;
__u8 v4_src_prefix_match;
__u8 v4_dst_prefix_match;
__u8 tcp_dp_match;
__u16 udp_sp_match;
__u16 udp_dp_match;
bool is_tcp;
bool is_tcp_syn;
};
struct pkt_info {
enum ip_type type;
union {
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
} ip;
int sport;
int dport;
__u16 trans_hdr_offset;
__u8 proto;
__u8 flags;
};
static __always_inline struct ethhdr *parse_ethhdr(void *data, void *data_end)
{
struct ethhdr *eth = data;
if (eth + 1 > data_end)
return NULL;
return eth;
}
static __always_inline __u8 filter_ipv6_addr(const struct in6_addr *ipv6addr)
{
__u8 *leaf;
leaf = bpf_map_lookup_elem(&v6_addr_map, ipv6addr);
return leaf ? *leaf : 0;
}
static __always_inline __u8 filter_ipv4_addr(const __u32 ipaddr)
{
__u8 *leaf;
leaf = bpf_map_lookup_elem(&v4_addr_map, &ipaddr);
return leaf ? *leaf : 0;
}
static __always_inline __u8 filter_ipv4_lpm(const __u32 ipaddr)
{
struct v4_lpm_key v4_key = {};
struct v4_lpm_val *lpm_val;
v4_key.src = ipaddr;
v4_key.prefixlen = 32;
lpm_val = bpf_map_lookup_elem(&v4_lpm_val_map, &v4_key);
return lpm_val ? lpm_val->val : 0;
}
static __always_inline void
filter_src_dst_ip(struct pkt_info* info, struct fw_match_info* match_info)
{
if (info->type == V6) {
match_info->v6_src_ip_match =
filter_ipv6_addr(&info->ip.ipv6->saddr);
} else if (info->type == V4) {
match_info->v4_src_ip_match =
filter_ipv4_addr(info->ip.ipv4->saddr);
match_info->v4_src_prefix_match =
filter_ipv4_lpm(info->ip.ipv4->saddr);
match_info->v4_dst_prefix_match =
filter_ipv4_lpm(info->ip.ipv4->daddr);
}
}
static __always_inline void *
get_transport_hdr(__u16 offset, void *data, void *data_end)
{
if (offset > 255 || data + offset > data_end)
return NULL;
return data + offset;
}
static __always_inline bool tcphdr_only_contains_flag(struct tcphdr *tcp,
__u32 FLAG)
{
return (tcp_flag_word(tcp) &
(TCP_FLAG_ACK | TCP_FLAG_RST | TCP_FLAG_SYN | TCP_FLAG_FIN)) == FLAG;
}
static __always_inline void set_tcp_flags(struct pkt_info *info,
struct tcphdr *tcp) {
if (tcphdr_only_contains_flag(tcp, TCP_FLAG_SYN))
info->flags |= TCP_SYN;
else if (tcphdr_only_contains_flag(tcp, TCP_FLAG_ACK))
info->flags |= TCP_ACK;
else if (tcphdr_only_contains_flag(tcp, TCP_FLAG_RST))
info->flags |= TCP_RST;
}
static __always_inline bool
parse_tcp(struct pkt_info *info, void *transport_hdr, void *data_end)
{
struct tcphdr *tcp = transport_hdr;
if (tcp + 1 > data_end)
return false;
info->sport = bpf_ntohs(tcp->source);
info->dport = bpf_ntohs(tcp->dest);
set_tcp_flags(info, tcp);
return true;
}
static __always_inline bool
parse_udp(struct pkt_info *info, void *transport_hdr, void *data_end)
{
struct udphdr *udp = transport_hdr;
if (udp + 1 > data_end)
return false;
info->sport = bpf_ntohs(udp->source);
info->dport = bpf_ntohs(udp->dest);
return true;
}
static __always_inline __u8 filter_tcp_port(int port)
{
__u8 *leaf = bpf_map_lookup_elem(&tcp_port_map, &port);
return leaf ? *leaf : 0;
}
static __always_inline __u16 filter_udp_port(int port)
{
__u16 *leaf = bpf_map_lookup_elem(&udp_port_map, &port);
return leaf ? *leaf : 0;
}
static __always_inline bool
filter_transport_hdr(void *transport_hdr, void *data_end,
struct pkt_info *info, struct fw_match_info *match_info)
{
if (info->proto == IPPROTO_TCP) {
if (!parse_tcp(info, transport_hdr, data_end))
return false;
match_info->is_tcp = true;
match_info->is_tcp_syn = (info->flags & TCP_SYN) > 0;
match_info->tcp_dp_match = filter_tcp_port(info->dport);
} else if (info->proto == IPPROTO_UDP) {
if (!parse_udp(info, transport_hdr, data_end))
return false;
match_info->udp_dp_match = filter_udp_port(info->dport);
match_info->udp_sp_match = filter_udp_port(info->sport);
}
return true;
}
static __always_inline __u8
parse_gue_v6(struct pkt_info *info, struct ipv6hdr *ip6h, void *data_end)
{
struct udphdr *udp = (struct udphdr *)(ip6h + 1);
void *encap_data = udp + 1;
if (udp + 1 > data_end)
return BAD_IP6_HDR;
if (udp->dest != bpf_htons(6666))
return NO_ERR;
info->flags |= TUNNEL;
if (encap_data + 1 > data_end)
return BAD_IP6GUE_HDR;
if (*(__u8 *)encap_data & 0x30) {
struct ipv6hdr *inner_ip6h = encap_data;
if (inner_ip6h + 1 > data_end)
return BAD_IP6GUE_HDR;
info->type = V6;
info->proto = inner_ip6h->nexthdr;
info->ip.ipv6 = inner_ip6h;
info->trans_hdr_offset += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
} else {
struct iphdr *inner_ip4h = encap_data;
if (inner_ip4h + 1 > data_end)
return BAD_IP6GUE_HDR;
info->type = V4;
info->proto = inner_ip4h->protocol;
info->ip.ipv4 = inner_ip4h;
info->trans_hdr_offset += sizeof(struct iphdr) + sizeof(struct udphdr);
}
return NO_ERR;
}
static __always_inline __u8 parse_ipv6_gue(struct pkt_info *info,
void *data, void *data_end)
{
struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
if (ip6h + 1 > data_end)
return BAD_IP6_HDR;
info->proto = ip6h->nexthdr;
info->ip.ipv6 = ip6h;
info->type = V6;
info->trans_hdr_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
if (info->proto == IPPROTO_UDP)
return parse_gue_v6(info, ip6h, data_end);
return NO_ERR;
}
SEC("xdp")
int edgewall(struct xdp_md *ctx)
{
void *data_end = (void *)(long)(ctx->data_end);
void *data = (void *)(long)(ctx->data);
struct fw_match_info match_info = {};
struct pkt_info info = {};
void *transport_hdr;
struct ethhdr *eth;
bool filter_res;
__u32 proto;
eth = parse_ethhdr(data, data_end);
if (!eth)
return XDP_DROP;
proto = eth->h_proto;
if (proto != bpf_htons(ETH_P_IPV6))
return XDP_DROP;
if (parse_ipv6_gue(&info, data, data_end))
return XDP_DROP;
if (info.proto == IPPROTO_ICMPV6)
return XDP_PASS;
if (info.proto != IPPROTO_TCP && info.proto != IPPROTO_UDP)
return XDP_DROP;
filter_src_dst_ip(&info, &match_info);
transport_hdr = get_transport_hdr(info.trans_hdr_offset, data,
data_end);
if (!transport_hdr)
return XDP_DROP;
filter_res = filter_transport_hdr(transport_hdr, data_end,
&info, &match_info);
if (!filter_res)
return XDP_DROP;
if (match_info.is_tcp && !match_info.is_tcp_syn)
return XDP_PASS;
return XDP_DROP;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xdpwall.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
int run_cnt = 0;
SEC("perf_event")
int handler(struct pt_regs *ctx)
{
__sync_fetch_and_add(&run_cnt, 1);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_perf_link.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
SEC("tc")
int process(struct __sk_buff *skb)
{
#pragma clang loop unroll(full)
for (int i = 0; i < 5; i++) {
if (skb->cb[i] != i + 1)
return 1;
skb->cb[i]++;
}
skb->priority++;
skb->tstamp++;
skb->mark++;
if (skb->wire_len != 100)
return 1;
if (skb->gso_segs != 8)
return 1;
if (skb->gso_size != 10)
return 1;
if (skb->ingress_ifindex != 11)
return 1;
if (skb->ifindex != 1)
return 1;
if (skb->hwtstamp != 11)
return 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_skb_ctx.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define EAFNOSUPPORT 97
#define EPROTO 71
#define ENONET 64
#define EINVAL 22
#define ENOENT 2
extern unsigned long CONFIG_HZ __kconfig;
int test_einval_bpf_tuple = 0;
int test_einval_reserved = 0;
int test_einval_netns_id = 0;
int test_einval_len_opts = 0;
int test_eproto_l4proto = 0;
int test_enonet_netns_id = 0;
int test_enoent_lookup = 0;
int test_eafnosupport = 0;
int test_alloc_entry = -EINVAL;
int test_insert_entry = -EAFNOSUPPORT;
int test_succ_lookup = -ENOENT;
u32 test_delta_timeout = 0;
u32 test_status = 0;
u32 test_insert_lookup_mark = 0;
int test_snat_addr = -EINVAL;
int test_dnat_addr = -EINVAL;
__be32 saddr = 0;
__be16 sport = 0;
__be32 daddr = 0;
__be16 dport = 0;
int test_exist_lookup = -ENOENT;
u32 test_exist_lookup_mark = 0;
enum nf_nat_manip_type___local {
NF_NAT_MANIP_SRC___local,
NF_NAT_MANIP_DST___local
};
struct nf_conn;
struct bpf_ct_opts___local {
s32 netns_id;
s32 error;
u8 l4proto;
u8 reserved[3];
} __attribute__((preserve_access_index));
struct nf_conn *bpf_xdp_ct_alloc(struct xdp_md *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32) __ksym;
struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32) __ksym;
struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32) __ksym;
struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32) __ksym;
struct nf_conn *bpf_ct_insert_entry(struct nf_conn *) __ksym;
void bpf_ct_release(struct nf_conn *) __ksym;
void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym;
int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
int bpf_ct_set_nat_info(struct nf_conn *, union nf_inet_addr *,
int port, enum nf_nat_manip_type___local) __ksym;
static __always_inline void
nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32),
struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32),
void *ctx)
{
struct bpf_ct_opts___local opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 };
struct bpf_sock_tuple bpf_tuple;
struct nf_conn *ct;
__builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4));
ct = lookup_fn(ctx, NULL, 0, &opts_def, sizeof(opts_def));
if (ct)
bpf_ct_release(ct);
else
test_einval_bpf_tuple = opts_def.error;
opts_def.reserved[0] = 1;
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
opts_def.reserved[0] = 0;
opts_def.l4proto = IPPROTO_TCP;
if (ct)
bpf_ct_release(ct);
else
test_einval_reserved = opts_def.error;
opts_def.netns_id = -2;
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
opts_def.netns_id = -1;
if (ct)
bpf_ct_release(ct);
else
test_einval_netns_id = opts_def.error;
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def) - 1);
if (ct)
bpf_ct_release(ct);
else
test_einval_len_opts = opts_def.error;
opts_def.l4proto = IPPROTO_ICMP;
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
opts_def.l4proto = IPPROTO_TCP;
if (ct)
bpf_ct_release(ct);
else
test_eproto_l4proto = opts_def.error;
opts_def.netns_id = 0xf00f;
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
opts_def.netns_id = -1;
if (ct)
bpf_ct_release(ct);
else
test_enonet_netns_id = opts_def.error;
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
if (ct)
bpf_ct_release(ct);
else
test_enoent_lookup = opts_def.error;
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4) - 1, &opts_def,
sizeof(opts_def));
if (ct)
bpf_ct_release(ct);
else
test_eafnosupport = opts_def.error;
bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */
bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */
bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */
bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */
ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
if (ct) {
__u16 sport = bpf_get_prandom_u32();
__u16 dport = bpf_get_prandom_u32();
union nf_inet_addr saddr = {};
union nf_inet_addr daddr = {};
struct nf_conn *ct_ins;
bpf_ct_set_timeout(ct, 10000);
ct->mark = 77;
/* snat */
saddr.ip = bpf_get_prandom_u32();
bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local);
/* dnat */
daddr.ip = bpf_get_prandom_u32();
bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local);
ct_ins = bpf_ct_insert_entry(ct);
if (ct_ins) {
struct nf_conn *ct_lk;
ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
&opts_def, sizeof(opts_def));
if (ct_lk) {
struct nf_conntrack_tuple *tuple;
/* check snat and dnat addresses */
tuple = &ct_lk->tuplehash[IP_CT_DIR_REPLY].tuple;
if (tuple->dst.u3.ip == saddr.ip &&
tuple->dst.u.all == bpf_htons(sport))
test_snat_addr = 0;
if (tuple->src.u3.ip == daddr.ip &&
tuple->src.u.all == bpf_htons(dport))
test_dnat_addr = 0;
/* update ct entry timeout */
bpf_ct_change_timeout(ct_lk, 10000);
test_delta_timeout = ct_lk->timeout - bpf_jiffies64();
test_delta_timeout /= CONFIG_HZ;
test_insert_lookup_mark = ct_lk->mark;
bpf_ct_change_status(ct_lk,
IPS_CONFIRMED | IPS_SEEN_REPLY);
test_status = ct_lk->status;
bpf_ct_release(ct_lk);
test_succ_lookup = 0;
}
bpf_ct_release(ct_ins);
test_insert_entry = 0;
}
test_alloc_entry = 0;
}
bpf_tuple.ipv4.saddr = saddr;
bpf_tuple.ipv4.daddr = daddr;
bpf_tuple.ipv4.sport = sport;
bpf_tuple.ipv4.dport = dport;
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
if (ct) {
test_exist_lookup = 0;
if (ct->mark == 42) {
ct->mark++;
test_exist_lookup_mark = ct->mark;
}
bpf_ct_release(ct);
} else {
test_exist_lookup = opts_def.error;
}
}
SEC("xdp")
int nf_xdp_ct_test(struct xdp_md *ctx)
{
nf_ct_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx);
return 0;
}
SEC("tc")
int nf_skb_ct_test(struct __sk_buff *ctx)
{
nf_ct_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_bpf_nf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
extern const struct rq runqueues __ksym; /* struct type global var. */
extern const int bpf_prog_active __ksym; /* int type global var. */
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
struct rq *rq;
int *active;
__u32 cpu;
cpu = bpf_get_smp_processor_id();
rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, cpu);
active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
if (active) {
/* READ_ONCE */
*(volatile int *)active;
/* !rq has not been tested, so verifier should reject. */
*(volatile int *)(&rq->cpu);
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_ksyms_btf_null_check.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/ref_tracking.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
#define BPF_SK_LOOKUP(func) \
/* struct bpf_sock_tuple tuple = {} */ \
"r2 = 0;" \
"*(u32*)(r10 - 8) = r2;" \
"*(u64*)(r10 - 16) = r2;" \
"*(u64*)(r10 - 24) = r2;" \
"*(u64*)(r10 - 32) = r2;" \
"*(u64*)(r10 - 40) = r2;" \
"*(u64*)(r10 - 48) = r2;" \
/* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
"r2 = r10;" \
"r2 += -48;" \
"r3 = %[sizeof_bpf_sock_tuple];"\
"r4 = 0;" \
"r5 = 0;" \
"call %[" #func "];"
struct bpf_key {} __attribute__((preserve_access_index));
extern void bpf_key_put(struct bpf_key *key) __ksym;
extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym;
/* BTF FUNC records are not generated for kfuncs referenced
* from inline assembly. These records are necessary for
* libbpf to link the program. The function below is a hack
* to ensure that BTF FUNC records are generated.
*/
void __kfunc_btf_root(void)
{
bpf_key_put(0);
bpf_lookup_system_key(0);
bpf_lookup_user_key(0, 0);
}
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct test_val);
} map_array_48b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 4096);
} map_ringbuf SEC(".maps");
void dummy_prog_42_tc(void);
void dummy_prog_24_tc(void);
void dummy_prog_loop1_tc(void);
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 4);
__uint(key_size, sizeof(int));
__array(values, void (void));
} map_prog1_tc SEC(".maps") = {
.values = {
[0] = (void *)&dummy_prog_42_tc,
[1] = (void *)&dummy_prog_loop1_tc,
[2] = (void *)&dummy_prog_24_tc,
},
};
SEC("tc")
__auxiliary
__naked void dummy_prog_42_tc(void)
{
asm volatile ("r0 = 42; exit;");
}
SEC("tc")
__auxiliary
__naked void dummy_prog_24_tc(void)
{
asm volatile ("r0 = 24; exit;");
}
SEC("tc")
__auxiliary
__naked void dummy_prog_loop1_tc(void)
{
asm volatile (" \
r3 = 1; \
r2 = %[map_prog1_tc] ll; \
call %[bpf_tail_call]; \
r0 = 41; \
exit; \
" :
: __imm(bpf_tail_call),
__imm_addr(map_prog1_tc)
: __clobber_all);
}
SEC("tc")
__description("reference tracking: leak potential reference")
__failure __msg("Unreleased reference")
__naked void reference_tracking_leak_potential_reference(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r6 = r0; /* leak reference */ \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: leak potential reference to sock_common")
__failure __msg("Unreleased reference")
__naked void potential_reference_to_sock_common_1(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
" r6 = r0; /* leak reference */ \
exit; \
" :
: __imm(bpf_skc_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: leak potential reference on stack")
__failure __msg("Unreleased reference")
__naked void leak_potential_reference_on_stack(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r4 = r10; \
r4 += -8; \
*(u64*)(r4 + 0) = r0; \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: leak potential reference on stack 2")
__failure __msg("Unreleased reference")
__naked void potential_reference_on_stack_2(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r4 = r10; \
r4 += -8; \
*(u64*)(r4 + 0) = r0; \
r0 = 0; \
r1 = 0; \
*(u64*)(r4 + 0) = r1; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: zero potential reference")
__failure __msg("Unreleased reference")
__naked void reference_tracking_zero_potential_reference(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r0 = 0; /* leak reference */ \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: zero potential reference to sock_common")
__failure __msg("Unreleased reference")
__naked void potential_reference_to_sock_common_2(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
" r0 = 0; /* leak reference */ \
exit; \
" :
: __imm(bpf_skc_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: copy and zero potential references")
__failure __msg("Unreleased reference")
__naked void copy_and_zero_potential_references(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r7 = r0; \
r0 = 0; \
r7 = 0; /* leak reference */ \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("lsm.s/bpf")
__description("reference tracking: acquire/release user key reference")
__success
__naked void acquire_release_user_key_reference(void)
{
asm volatile (" \
r1 = -3; \
r2 = 0; \
call %[bpf_lookup_user_key]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
call %[bpf_key_put]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_key_put),
__imm(bpf_lookup_user_key)
: __clobber_all);
}
SEC("lsm.s/bpf")
__description("reference tracking: acquire/release system key reference")
__success
__naked void acquire_release_system_key_reference(void)
{
asm volatile (" \
r1 = 1; \
call %[bpf_lookup_system_key]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
call %[bpf_key_put]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_key_put),
__imm(bpf_lookup_system_key)
: __clobber_all);
}
SEC("lsm.s/bpf")
__description("reference tracking: release user key reference without check")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
__naked void user_key_reference_without_check(void)
{
asm volatile (" \
r1 = -3; \
r2 = 0; \
call %[bpf_lookup_user_key]; \
r1 = r0; \
call %[bpf_key_put]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_key_put),
__imm(bpf_lookup_user_key)
: __clobber_all);
}
SEC("lsm.s/bpf")
__description("reference tracking: release system key reference without check")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
__naked void system_key_reference_without_check(void)
{
asm volatile (" \
r1 = 1; \
call %[bpf_lookup_system_key]; \
r1 = r0; \
call %[bpf_key_put]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_key_put),
__imm(bpf_lookup_system_key)
: __clobber_all);
}
SEC("lsm.s/bpf")
__description("reference tracking: release with NULL key pointer")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
__naked void release_with_null_key_pointer(void)
{
asm volatile (" \
r1 = 0; \
call %[bpf_key_put]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_key_put)
: __clobber_all);
}
SEC("lsm.s/bpf")
__description("reference tracking: leak potential reference to user key")
__failure __msg("Unreleased reference")
__naked void potential_reference_to_user_key(void)
{
asm volatile (" \
r1 = -3; \
r2 = 0; \
call %[bpf_lookup_user_key]; \
exit; \
" :
: __imm(bpf_lookup_user_key)
: __clobber_all);
}
SEC("lsm.s/bpf")
__description("reference tracking: leak potential reference to system key")
__failure __msg("Unreleased reference")
__naked void potential_reference_to_system_key(void)
{
asm volatile (" \
r1 = 1; \
call %[bpf_lookup_system_key]; \
exit; \
" :
: __imm(bpf_lookup_system_key)
: __clobber_all);
}
SEC("tc")
__description("reference tracking: release reference without check")
__failure __msg("type=sock_or_null expected=sock")
__naked void tracking_release_reference_without_check(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" /* reference in r0 may be NULL */ \
r1 = r0; \
r2 = 0; \
call %[bpf_sk_release]; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: release reference to sock_common without check")
__failure __msg("type=sock_common_or_null expected=sock")
__naked void to_sock_common_without_check(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
" /* reference in r0 may be NULL */ \
r1 = r0; \
r2 = 0; \
call %[bpf_sk_release]; \
exit; \
" :
: __imm(bpf_sk_release),
__imm(bpf_skc_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: release reference")
__success __retval(0)
__naked void reference_tracking_release_reference(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r1 = r0; \
if r0 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: release reference to sock_common")
__success __retval(0)
__naked void release_reference_to_sock_common(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
" r1 = r0; \
if r0 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_release),
__imm(bpf_skc_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: release reference 2")
__success __retval(0)
__naked void reference_tracking_release_reference_2(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r1 = r0; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: call %[bpf_sk_release]; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: release reference twice")
__failure __msg("type=scalar expected=sock")
__naked void reference_tracking_release_reference_twice(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r1 = r0; \
r6 = r0; \
if r0 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: r1 = r6; \
call %[bpf_sk_release]; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: release reference twice inside branch")
__failure __msg("type=scalar expected=sock")
__naked void release_reference_twice_inside_branch(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r1 = r0; \
r6 = r0; \
if r0 == 0 goto l0_%=; /* goto end */ \
call %[bpf_sk_release]; \
r1 = r6; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: alloc, check, free in one subbranch")
__failure __msg("Unreleased reference")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void check_free_in_one_subbranch(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 16; \
/* if (offsetof(skb, mark) > data_len) exit; */ \
if r0 <= r3 goto l0_%=; \
exit; \
l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" if r6 == 0 goto l1_%=; /* mark == 0? */\
/* Leak reference in R0 */ \
exit; \
l1_%=: if r0 == 0 goto l2_%=; /* sk NULL? */ \
r1 = r0; \
call %[bpf_sk_release]; \
l2_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: alloc, check, free in both subbranches")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void check_free_in_both_subbranches(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 16; \
/* if (offsetof(skb, mark) > data_len) exit; */ \
if r0 <= r3 goto l0_%=; \
exit; \
l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" if r6 == 0 goto l1_%=; /* mark == 0? */\
if r0 == 0 goto l2_%=; /* sk NULL? */ \
r1 = r0; \
call %[bpf_sk_release]; \
l2_%=: exit; \
l1_%=: if r0 == 0 goto l3_%=; /* sk NULL? */ \
r1 = r0; \
call %[bpf_sk_release]; \
l3_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking in call: free reference in subprog")
__success __retval(0)
__naked void call_free_reference_in_subprog(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r1 = r0; /* unchecked reference */ \
call call_free_reference_in_subprog__1; \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
static __naked __noinline __attribute__((used))
void call_free_reference_in_subprog__1(void)
{
asm volatile (" \
/* subprog 1 */ \
r2 = r1; \
if r2 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_release)
: __clobber_all);
}
SEC("tc")
__description("reference tracking in call: free reference in subprog and outside")
__failure __msg("type=scalar expected=sock")
__naked void reference_in_subprog_and_outside(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r1 = r0; /* unchecked reference */ \
r6 = r0; \
call reference_in_subprog_and_outside__1; \
r1 = r6; \
call %[bpf_sk_release]; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
static __naked __noinline __attribute__((used))
void reference_in_subprog_and_outside__1(void)
{
asm volatile (" \
/* subprog 1 */ \
r2 = r1; \
if r2 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_release)
: __clobber_all);
}
SEC("tc")
__description("reference tracking in call: alloc & leak reference in subprog")
__failure __msg("Unreleased reference")
__naked void alloc_leak_reference_in_subprog(void)
{
asm volatile (" \
r4 = r10; \
r4 += -8; \
call alloc_leak_reference_in_subprog__1; \
r1 = r0; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
static __naked __noinline __attribute__((used))
void alloc_leak_reference_in_subprog__1(void)
{
asm volatile (" \
/* subprog 1 */ \
r6 = r4; \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" /* spill unchecked sk_ptr into stack of caller */\
*(u64*)(r6 + 0) = r0; \
r1 = r0; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking in call: alloc in subprog, release outside")
__success __retval(POINTER_VALUE)
__naked void alloc_in_subprog_release_outside(void)
{
asm volatile (" \
r4 = r10; \
call alloc_in_subprog_release_outside__1; \
r1 = r0; \
if r0 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_release)
: __clobber_all);
}
static __naked __noinline __attribute__((used))
void alloc_in_subprog_release_outside__1(void)
{
asm volatile (" \
/* subprog 1 */ \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" exit; /* return sk */ \
" :
: __imm(bpf_sk_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking in call: sk_ptr leak into caller stack")
__failure __msg("Unreleased reference")
__naked void ptr_leak_into_caller_stack(void)
{
asm volatile (" \
r4 = r10; \
r4 += -8; \
call ptr_leak_into_caller_stack__1; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
static __naked __noinline __attribute__((used))
void ptr_leak_into_caller_stack__1(void)
{
asm volatile (" \
/* subprog 1 */ \
r5 = r10; \
r5 += -8; \
*(u64*)(r5 + 0) = r4; \
call ptr_leak_into_caller_stack__2; \
/* spill unchecked sk_ptr into stack of caller */\
r5 = r10; \
r5 += -8; \
r4 = *(u64*)(r5 + 0); \
*(u64*)(r4 + 0) = r0; \
exit; \
" ::: __clobber_all);
}
static __naked __noinline __attribute__((used))
void ptr_leak_into_caller_stack__2(void)
{
asm volatile (" \
/* subprog 2 */ \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking in call: sk_ptr spill into caller stack")
__success __retval(0)
__naked void ptr_spill_into_caller_stack(void)
{
asm volatile (" \
r4 = r10; \
r4 += -8; \
call ptr_spill_into_caller_stack__1; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
static __naked __noinline __attribute__((used))
void ptr_spill_into_caller_stack__1(void)
{
asm volatile (" \
/* subprog 1 */ \
r5 = r10; \
r5 += -8; \
*(u64*)(r5 + 0) = r4; \
call ptr_spill_into_caller_stack__2; \
/* spill unchecked sk_ptr into stack of caller */\
r5 = r10; \
r5 += -8; \
r4 = *(u64*)(r5 + 0); \
*(u64*)(r4 + 0) = r0; \
if r0 == 0 goto l0_%=; \
/* now the sk_ptr is verified, free the reference */\
r1 = *(u64*)(r4 + 0); \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_release)
: __clobber_all);
}
static __naked __noinline __attribute__((used))
void ptr_spill_into_caller_stack__2(void)
{
asm volatile (" \
/* subprog 2 */ \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: allow LD_ABS")
__success __retval(0)
__naked void reference_tracking_allow_ld_abs(void)
{
asm volatile (" \
r6 = r1; \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r1 = r0; \
if r0 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: r0 = *(u8*)skb[0]; \
r0 = *(u16*)skb[0]; \
r0 = *(u32*)skb[0]; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: forbid LD_ABS while holding reference")
__failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references")
__naked void ld_abs_while_holding_reference(void)
{
asm volatile (" \
r6 = r1; \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r0 = *(u8*)skb[0]; \
r0 = *(u16*)skb[0]; \
r0 = *(u32*)skb[0]; \
r1 = r0; \
if r0 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: allow LD_IND")
__success __retval(1)
__naked void reference_tracking_allow_ld_ind(void)
{
asm volatile (" \
r6 = r1; \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r1 = r0; \
if r0 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: r7 = 1; \
.8byte %[ld_ind]; \
r0 = r7; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)),
__imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: forbid LD_IND while holding reference")
__failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references")
__naked void ld_ind_while_holding_reference(void)
{
asm volatile (" \
r6 = r1; \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r4 = r0; \
r7 = 1; \
.8byte %[ld_ind]; \
r0 = r7; \
r1 = r4; \
if r1 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)),
__imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: check reference or tail call")
__success __retval(0)
__naked void check_reference_or_tail_call(void)
{
asm volatile (" \
r7 = r1; \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" /* if (sk) bpf_sk_release() */ \
r1 = r0; \
if r1 != 0 goto l0_%=; \
/* bpf_tail_call() */ \
r3 = 3; \
r2 = %[map_prog1_tc] ll; \
r1 = r7; \
call %[bpf_tail_call]; \
r0 = 0; \
exit; \
l0_%=: call %[bpf_sk_release]; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm(bpf_tail_call),
__imm_addr(map_prog1_tc),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: release reference then tail call")
__success __retval(0)
__naked void release_reference_then_tail_call(void)
{
asm volatile (" \
r7 = r1; \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" /* if (sk) bpf_sk_release() */ \
r1 = r0; \
if r1 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: /* bpf_tail_call() */ \
r3 = 3; \
r2 = %[map_prog1_tc] ll; \
r1 = r7; \
call %[bpf_tail_call]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm(bpf_tail_call),
__imm_addr(map_prog1_tc),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: leak possible reference over tail call")
__failure __msg("tail_call would lead to reference leak")
__naked void possible_reference_over_tail_call(void)
{
asm volatile (" \
r7 = r1; \
/* Look up socket and store in REG_6 */ \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" /* bpf_tail_call() */ \
r6 = r0; \
r3 = 3; \
r2 = %[map_prog1_tc] ll; \
r1 = r7; \
call %[bpf_tail_call]; \
r0 = 0; \
/* if (sk) bpf_sk_release() */ \
r1 = r6; \
if r1 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm(bpf_tail_call),
__imm_addr(map_prog1_tc),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: leak checked reference over tail call")
__failure __msg("tail_call would lead to reference leak")
__naked void checked_reference_over_tail_call(void)
{
asm volatile (" \
r7 = r1; \
/* Look up socket and store in REG_6 */ \
" BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r6 = r0; \
/* if (!sk) goto end */ \
if r0 == 0 goto l0_%=; \
/* bpf_tail_call() */ \
r3 = 0; \
r2 = %[map_prog1_tc] ll; \
r1 = r7; \
call %[bpf_tail_call]; \
r0 = 0; \
r1 = r6; \
l0_%=: call %[bpf_sk_release]; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm(bpf_tail_call),
__imm_addr(map_prog1_tc),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: mangle and release sock_or_null")
__failure __msg("R1 pointer arithmetic on sock_or_null prohibited")
__naked void and_release_sock_or_null(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r1 = r0; \
r1 += 5; \
if r0 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: mangle and release sock")
__failure __msg("R1 pointer arithmetic on sock prohibited")
__naked void tracking_mangle_and_release_sock(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r1 = r0; \
if r0 == 0 goto l0_%=; \
r1 += 5; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: access member")
__success __retval(0)
__naked void reference_tracking_access_member(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r6 = r0; \
if r0 == 0 goto l0_%=; \
r2 = *(u32*)(r0 + 4); \
r1 = r6; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: write to member")
__failure __msg("cannot write into sock")
__naked void reference_tracking_write_to_member(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r6 = r0; \
if r0 == 0 goto l0_%=; \
r1 = r6; \
r2 = 42 ll; \
*(u32*)(r1 + %[bpf_sock_mark]) = r2; \
r1 = r6; \
l0_%=: call %[bpf_sk_release]; \
r0 = 0 ll; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: invalid 64-bit access of member")
__failure __msg("invalid sock access off=0 size=8")
__naked void _64_bit_access_of_member(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r6 = r0; \
if r0 == 0 goto l0_%=; \
r2 = *(u64*)(r0 + 0); \
r1 = r6; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: access after release")
__failure __msg("!read_ok")
__naked void reference_tracking_access_after_release(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r1 = r0; \
if r0 == 0 goto l0_%=; \
call %[bpf_sk_release]; \
r2 = *(u32*)(r1 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: direct access for lookup")
__success __retval(0)
__naked void tracking_direct_access_for_lookup(void)
{
asm volatile (" \
/* Check that the packet is at least 64B long */\
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r0 = r2; \
r0 += 64; \
if r0 > r3 goto l0_%=; \
/* sk = sk_lookup_tcp(ctx, skb->data, ...) */ \
r3 = %[sizeof_bpf_sock_tuple]; \
r4 = 0; \
r5 = 0; \
call %[bpf_sk_lookup_tcp]; \
r6 = r0; \
if r0 == 0 goto l0_%=; \
r2 = *(u32*)(r0 + 4); \
r1 = r6; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: use ptr from bpf_tcp_sock() after release")
__failure __msg("invalid mem access")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void bpf_tcp_sock_after_release(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
call %[bpf_tcp_sock]; \
if r0 != 0 goto l1_%=; \
r1 = r6; \
call %[bpf_sk_release]; \
exit; \
l1_%=: r7 = r0; \
r1 = r6; \
call %[bpf_sk_release]; \
r0 = *(u32*)(r7 + %[bpf_tcp_sock_snd_cwnd]); \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm(bpf_tcp_sock),
__imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: use ptr from bpf_sk_fullsock() after release")
__failure __msg("invalid mem access")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void bpf_sk_fullsock_after_release(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
call %[bpf_sk_fullsock]; \
if r0 != 0 goto l1_%=; \
r1 = r6; \
call %[bpf_sk_release]; \
exit; \
l1_%=: r7 = r0; \
r1 = r6; \
call %[bpf_sk_release]; \
r0 = *(u32*)(r7 + %[bpf_sock_type]); \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: use ptr from bpf_sk_fullsock(tp) after release")
__failure __msg("invalid mem access")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void sk_fullsock_tp_after_release(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
call %[bpf_tcp_sock]; \
if r0 != 0 goto l1_%=; \
r1 = r6; \
call %[bpf_sk_release]; \
exit; \
l1_%=: r1 = r0; \
call %[bpf_sk_fullsock]; \
r1 = r6; \
r6 = r0; \
call %[bpf_sk_release]; \
if r6 != 0 goto l2_%=; \
exit; \
l2_%=: r0 = *(u32*)(r6 + %[bpf_sock_type]); \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm(bpf_tcp_sock),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: use sk after bpf_sk_release(tp)")
__failure __msg("invalid mem access")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void after_bpf_sk_release_tp(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
call %[bpf_tcp_sock]; \
if r0 != 0 goto l1_%=; \
r1 = r6; \
call %[bpf_sk_release]; \
exit; \
l1_%=: r1 = r0; \
call %[bpf_sk_release]; \
r0 = *(u32*)(r6 + %[bpf_sock_type]); \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm(bpf_tcp_sock),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)")
__success __retval(0)
__naked void after_bpf_sk_release_sk(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
call %[bpf_get_listener_sock]; \
if r0 != 0 goto l1_%=; \
r1 = r6; \
call %[bpf_sk_release]; \
exit; \
l1_%=: r1 = r6; \
r6 = r0; \
call %[bpf_sk_release]; \
r0 = *(u32*)(r6 + %[bpf_sock_src_port]); \
exit; \
" :
: __imm(bpf_get_listener_sock),
__imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: bpf_sk_release(listen_sk)")
__failure __msg("R1 must be referenced when passed to release function")
__naked void bpf_sk_release_listen_sk(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
call %[bpf_get_listener_sock]; \
if r0 != 0 goto l1_%=; \
r1 = r6; \
call %[bpf_sk_release]; \
exit; \
l1_%=: r1 = r0; \
call %[bpf_sk_release]; \
r0 = *(u32*)(r6 + %[bpf_sock_type]); \
r1 = r6; \
call %[bpf_sk_release]; \
exit; \
" :
: __imm(bpf_get_listener_sock),
__imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
/* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
SEC("tc")
__description("reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)")
__failure __msg("invalid mem access")
__naked void and_bpf_tcp_sock_sk(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
call %[bpf_sk_fullsock]; \
r7 = r0; \
r1 = r6; \
call %[bpf_tcp_sock]; \
r8 = r0; \
if r7 != 0 goto l1_%=; \
r1 = r6; \
call %[bpf_sk_release]; \
exit; \
l1_%=: r0 = *(u32*)(r8 + %[bpf_tcp_sock_snd_cwnd]); \
r1 = r6; \
call %[bpf_sk_release]; \
exit; \
" :
: __imm(bpf_sk_fullsock),
__imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm(bpf_tcp_sock),
__imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: branch tracking valid pointer null comparison")
__success __retval(0)
__naked void tracking_valid_pointer_null_comparison(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r6 = r0; \
r3 = 1; \
if r6 != 0 goto l0_%=; \
r3 = 0; \
l0_%=: if r6 == 0 goto l1_%=; \
r1 = r6; \
call %[bpf_sk_release]; \
l1_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: branch tracking valid pointer value comparison")
__failure __msg("Unreleased reference")
__naked void tracking_valid_pointer_value_comparison(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" r6 = r0; \
r3 = 1; \
if r6 == 0 goto l0_%=; \
r3 = 0; \
if r6 == 1234 goto l0_%=; \
r1 = r6; \
call %[bpf_sk_release]; \
l0_%=: exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: bpf_sk_release(btf_tcp_sock)")
__success
__retval(0)
__naked void sk_release_btf_tcp_sock(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
call %[bpf_skc_to_tcp_sock]; \
if r0 != 0 goto l1_%=; \
r1 = r6; \
call %[bpf_sk_release]; \
exit; \
l1_%=: r1 = r0; \
call %[bpf_sk_release]; \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm(bpf_skc_to_tcp_sock),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("tc")
__description("reference tracking: use ptr from bpf_skc_to_tcp_sock() after release")
__failure __msg("invalid mem access")
__naked void to_tcp_sock_after_release(void)
{
asm volatile (
BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
" if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
call %[bpf_skc_to_tcp_sock]; \
if r0 != 0 goto l1_%=; \
r1 = r6; \
call %[bpf_sk_release]; \
exit; \
l1_%=: r7 = r0; \
r1 = r6; \
call %[bpf_sk_release]; \
r0 = *(u8*)(r7 + 0); \
exit; \
" :
: __imm(bpf_sk_lookup_tcp),
__imm(bpf_sk_release),
__imm(bpf_skc_to_tcp_sock),
__imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
: __clobber_all);
}
SEC("socket")
__description("reference tracking: try to leak released ptr reg")
__success __failure_unpriv __msg_unpriv("R8 !read_ok")
__retval(0)
__naked void to_leak_released_ptr_reg(void)
{
asm volatile (" \
r0 = 0; \
*(u32*)(r10 - 4) = r0; \
r2 = r10; \
r2 += -4; \
r1 = %[map_array_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r9 = r0; \
r0 = 0; \
r1 = %[map_ringbuf] ll; \
r2 = 8; \
r3 = 0; \
call %[bpf_ringbuf_reserve]; \
if r0 != 0 goto l1_%=; \
exit; \
l1_%=: r8 = r0; \
r1 = r8; \
r2 = 0; \
call %[bpf_ringbuf_discard]; \
r0 = 0; \
*(u64*)(r9 + 0) = r8; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_ringbuf_discard),
__imm(bpf_ringbuf_reserve),
__imm_addr(map_array_48b),
__imm_addr(map_ringbuf)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_ref_tracking.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
bool skip;
uint64_t my_pid_tgid;
} data = {};
struct core_reloc_kernel_output {
int valid[10];
/* we have test_progs[-flavor], so cut flavor part */
char comm[sizeof("test_progs")];
int comm_len;
bool local_task_struct_matches;
};
struct task_struct {
int pid;
int tgid;
char comm[16];
struct task_struct *group_leader;
};
struct mm_struct___wrong {
int abc_whatever_should_not_exist;
};
struct task_struct___local {
int pid;
struct mm_struct___wrong *mm;
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_kernel(void *ctx)
{
/* Support for the BPF_TYPE_MATCHES argument to the
* __builtin_preserve_type_info builtin was added at some point during
* development of clang 15 and it's what we require for this test.
*/
#if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
struct task_struct *task = (void *)bpf_get_current_task();
struct core_reloc_kernel_output *out = (void *)&data.out;
uint64_t pid_tgid = bpf_get_current_pid_tgid();
uint32_t real_tgid = (uint32_t)pid_tgid;
int pid, tgid;
if (data.my_pid_tgid != pid_tgid)
return 0;
if (CORE_READ(&pid, &task->pid) ||
CORE_READ(&tgid, &task->tgid))
return 1;
/* validate pid + tgid matches */
out->valid[0] = (((uint64_t)pid << 32) | tgid) == pid_tgid;
/* test variadic BPF_CORE_READ macros */
out->valid[1] = BPF_CORE_READ(task,
tgid) == real_tgid;
out->valid[2] = BPF_CORE_READ(task,
group_leader,
tgid) == real_tgid;
out->valid[3] = BPF_CORE_READ(task,
group_leader, group_leader,
tgid) == real_tgid;
out->valid[4] = BPF_CORE_READ(task,
group_leader, group_leader, group_leader,
tgid) == real_tgid;
out->valid[5] = BPF_CORE_READ(task,
group_leader, group_leader, group_leader,
group_leader,
tgid) == real_tgid;
out->valid[6] = BPF_CORE_READ(task,
group_leader, group_leader, group_leader,
group_leader, group_leader,
tgid) == real_tgid;
out->valid[7] = BPF_CORE_READ(task,
group_leader, group_leader, group_leader,
group_leader, group_leader, group_leader,
tgid) == real_tgid;
out->valid[8] = BPF_CORE_READ(task,
group_leader, group_leader, group_leader,
group_leader, group_leader, group_leader,
group_leader,
tgid) == real_tgid;
out->valid[9] = BPF_CORE_READ(task,
group_leader, group_leader, group_leader,
group_leader, group_leader, group_leader,
group_leader, group_leader,
tgid) == real_tgid;
/* test BPF_CORE_READ_STR_INTO() returns correct code and contents */
out->comm_len = BPF_CORE_READ_STR_INTO(
&out->comm, task,
group_leader, group_leader, group_leader, group_leader,
group_leader, group_leader, group_leader, group_leader,
comm);
out->local_task_struct_matches = bpf_core_type_matches(struct task_struct___local);
#else
data.skip = true;
#endif
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_STACK (512 - 3 * 32 + 8)
static __attribute__ ((noinline))
int f0(int var, struct __sk_buff *skb)
{
asm volatile ("");
return skb->len;
}
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
{
volatile char buf[MAX_STACK] = {};
__sink(buf[MAX_STACK - 1]);
return f0(0, skb) + skb->len;
}
int f3(int, struct __sk_buff *skb, int);
__attribute__ ((noinline))
int f2(int val, struct __sk_buff *skb)
{
return f1(skb) + f3(val, skb, 1);
}
__attribute__ ((noinline))
int f3(int val, struct __sk_buff *skb, int var)
{
volatile char buf[MAX_STACK] = {};
__sink(buf[MAX_STACK - 1]);
return skb->ifindex * val * var;
}
SEC("tc")
__failure __msg("combined stack size of 4 calls is 544")
int global_func1(struct __sk_buff *skb)
{
return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} sk_stg_map SEC(".maps");
SEC("fentry/bpf_sk_storage_free")
int BPF_PROG(trace_bpf_sk_storage_free, struct sock *sk)
{
int *value;
value = bpf_sk_storage_get(&sk_stg_map, sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (value)
*value = 1;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sk_storage_trace_itself.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
/* Read an uninitialized value from stack at a fixed offset */
SEC("socket")
__naked int read_uninit_stack_fixed_off(void *ctx)
{
asm volatile (" \
r0 = 0; \
/* force stack depth to be 128 */ \
*(u64*)(r10 - 128) = r1; \
r1 = *(u8 *)(r10 - 8 ); \
r0 += r1; \
r1 = *(u8 *)(r10 - 11); \
r1 = *(u8 *)(r10 - 13); \
r1 = *(u8 *)(r10 - 15); \
r1 = *(u16*)(r10 - 16); \
r1 = *(u32*)(r10 - 32); \
r1 = *(u64*)(r10 - 64); \
/* read from a spill of a wrong size, it is a separate \
* branch in check_stack_read_fixed_off() \
*/ \
*(u32*)(r10 - 72) = r1; \
r1 = *(u64*)(r10 - 72); \
r0 = 0; \
exit; \
"
::: __clobber_all);
}
/* Read an uninitialized value from stack at a variable offset */
SEC("socket")
__naked int read_uninit_stack_var_off(void *ctx)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
/* force stack depth to be 64 */ \
*(u64*)(r10 - 64) = r0; \
r0 = -r0; \
/* give r0 a range [-31, -1] */ \
if r0 s<= -32 goto exit_%=; \
if r0 s>= 0 goto exit_%=; \
/* access stack using r0 */ \
r1 = r10; \
r1 += r0; \
r2 = *(u8*)(r1 + 0); \
exit_%=: r0 = 0; \
exit; \
"
:
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
static __noinline void dummy(void) {}
/* Pass a pointer to uninitialized stack memory to a helper.
* Passed memory block should be marked as STACK_MISC after helper call.
*/
SEC("socket")
__log_level(7) __msg("fp-104=mmmmmmmm")
__naked int helper_uninit_to_misc(void *ctx)
{
asm volatile (" \
/* force stack depth to be 128 */ \
*(u64*)(r10 - 128) = r1; \
r1 = r10; \
r1 += -128; \
r2 = 32; \
call %[bpf_trace_printk]; \
/* Call to dummy() forces print_verifier_state(..., true), \
* thus showing the stack state, matched by __msg(). \
*/ \
call %[dummy]; \
r0 = 0; \
exit; \
"
:
: __imm(bpf_trace_printk),
__imm(dummy)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/uninit_stack.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/ld_ind.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
SEC("socket")
__description("ld_ind: check calling conv, r1")
__failure __msg("R1 !read_ok")
__failure_unpriv
__naked void ind_check_calling_conv_r1(void)
{
asm volatile (" \
r6 = r1; \
r1 = 1; \
.8byte %[ld_ind]; \
r0 = r1; \
exit; \
" :
: __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000))
: __clobber_all);
}
SEC("socket")
__description("ld_ind: check calling conv, r2")
__failure __msg("R2 !read_ok")
__failure_unpriv
__naked void ind_check_calling_conv_r2(void)
{
asm volatile (" \
r6 = r1; \
r2 = 1; \
.8byte %[ld_ind]; \
r0 = r2; \
exit; \
" :
: __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000))
: __clobber_all);
}
SEC("socket")
__description("ld_ind: check calling conv, r3")
__failure __msg("R3 !read_ok")
__failure_unpriv
__naked void ind_check_calling_conv_r3(void)
{
asm volatile (" \
r6 = r1; \
r3 = 1; \
.8byte %[ld_ind]; \
r0 = r3; \
exit; \
" :
: __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000))
: __clobber_all);
}
SEC("socket")
__description("ld_ind: check calling conv, r4")
__failure __msg("R4 !read_ok")
__failure_unpriv
__naked void ind_check_calling_conv_r4(void)
{
asm volatile (" \
r6 = r1; \
r4 = 1; \
.8byte %[ld_ind]; \
r0 = r4; \
exit; \
" :
: __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000))
: __clobber_all);
}
SEC("socket")
__description("ld_ind: check calling conv, r5")
__failure __msg("R5 !read_ok")
__failure_unpriv
__naked void ind_check_calling_conv_r5(void)
{
asm volatile (" \
r6 = r1; \
r5 = 1; \
.8byte %[ld_ind]; \
r0 = r5; \
exit; \
" :
: __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000))
: __clobber_all);
}
SEC("socket")
__description("ld_ind: check calling conv, r7")
__success __success_unpriv __retval(1)
__naked void ind_check_calling_conv_r7(void)
{
asm volatile (" \
r6 = r1; \
r7 = 1; \
.8byte %[ld_ind]; \
r0 = r7; \
exit; \
" :
: __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_ld_ind.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* 4-byte aligned .data */
static volatile int static_var1 = 5;
static volatile int static_var2 = 6;
int var2 = -1;
/* 8-byte aligned .rodata */
const volatile long rovar2;
/* same "subprog" name in both files */
static __noinline int subprog(int x)
{
/* but different formula */
return x * 3;
}
SEC("raw_tp/sys_enter")
int handler2(const void *ctx)
{
var2 = subprog(rovar2) + static_var1 + static_var2;
return 0;
}
/* different name and/or type of the variable doesn't matter */
char _license[] SEC("license") = "GPL";
int _version SEC("version") = 1;
| linux-master | tools/testing/selftests/bpf/progs/test_static_linked2.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#define TEST_STACK_DEPTH 2
#define TEST_MAX_ENTRIES 16384
typedef __u64 stack_trace_t[TEST_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, TEST_MAX_ENTRIES);
__type(key, __u32);
__type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, TEST_MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} stackid_hmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, TEST_MAX_ENTRIES);
__type(key, __u32);
__type(value, stack_trace_t);
} stack_amap SEC(".maps");
int pid = 0;
int control = 0;
int failed = 0;
SEC("tracepoint/sched/sched_switch")
int oncpu(struct trace_event_raw_sched_switch *ctx)
{
__u32 max_len = TEST_STACK_DEPTH * sizeof(__u64);
__u32 key = 0, val = 0;
__u64 *stack_p;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
if (control)
return 0;
/* it should allow skipping whole buffer size entries */
key = bpf_get_stackid(ctx, &stackmap, TEST_STACK_DEPTH);
if ((int)key >= 0) {
/* The size of stackmap and stack_amap should be the same */
bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
stack_p = bpf_map_lookup_elem(&stack_amap, &key);
if (stack_p) {
bpf_get_stack(ctx, stack_p, max_len, TEST_STACK_DEPTH);
/* it wrongly skipped all the entries and filled zero */
if (stack_p[0] == 0)
failed = 1;
}
} else {
/* old kernel doesn't support skipping that many entries */
failed = 2;
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/stacktrace_map_skip.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define barrier_var(var) /**/
#define UNROLL
#define INLINE __noinline
#include "profiler.inc.h"
| linux-master | tools/testing/selftests/bpf/progs/profiler3.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <sys/types.h>
#include <sys/socket.h>
char _license[] SEC("license") = "GPL";
__u16 g_serv_port = 0;
static inline void set_ip(__u32 *dst, const struct in6_addr *src)
{
dst[0] = src->in6_u.u6_addr32[0];
dst[1] = src->in6_u.u6_addr32[1];
dst[2] = src->in6_u.u6_addr32[2];
dst[3] = src->in6_u.u6_addr32[3];
}
static inline void set_tuple(struct bpf_sock_tuple *tuple,
const struct ipv6hdr *ip6h,
const struct tcphdr *tcph)
{
set_ip(tuple->ipv6.saddr, &ip6h->daddr);
set_ip(tuple->ipv6.daddr, &ip6h->saddr);
tuple->ipv6.sport = tcph->dest;
tuple->ipv6.dport = tcph->source;
}
static inline int is_allowed_peer_cg(struct __sk_buff *skb,
const struct ipv6hdr *ip6h,
const struct tcphdr *tcph)
{
__u64 cgid, acgid, peer_cgid, peer_acgid;
struct bpf_sock_tuple tuple;
size_t tuple_len = sizeof(tuple.ipv6);
struct bpf_sock *peer_sk;
set_tuple(&tuple, ip6h, tcph);
peer_sk = bpf_sk_lookup_tcp(skb, &tuple, tuple_len,
BPF_F_CURRENT_NETNS, 0);
if (!peer_sk)
return 0;
cgid = bpf_skb_cgroup_id(skb);
peer_cgid = bpf_sk_cgroup_id(peer_sk);
acgid = bpf_skb_ancestor_cgroup_id(skb, 2);
peer_acgid = bpf_sk_ancestor_cgroup_id(peer_sk, 2);
bpf_sk_release(peer_sk);
return cgid && cgid == peer_cgid && acgid && acgid == peer_acgid;
}
SEC("cgroup_skb/ingress")
int ingress_lookup(struct __sk_buff *skb)
{
struct ipv6hdr ip6h;
struct tcphdr tcph;
if (skb->protocol != bpf_htons(ETH_P_IPV6))
return 1;
/* For SYN packets coming to listening socket skb->remote_port will be
* zero, so IPv6/TCP headers are loaded to identify remote peer
* instead.
*/
if (bpf_skb_load_bytes(skb, 0, &ip6h, sizeof(ip6h)))
return 1;
if (ip6h.nexthdr != IPPROTO_TCP)
return 1;
if (bpf_skb_load_bytes(skb, sizeof(ip6h), &tcph, sizeof(tcph)))
return 1;
if (!g_serv_port)
return 0;
if (tcph.dest != g_serv_port)
return 1;
return is_allowed_peer_cg(skb, &ip6h, &tcph);
}
| linux-master | tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper test for multi-dimensional array output.
*
* Copyright (c) 2019 Facebook
*/
/* ----- START-EXPECTED-OUTPUT ----- */
typedef int arr_t[2];
typedef int multiarr_t[3][4][5];
typedef int *ptr_arr_t[6];
typedef int *ptr_multiarr_t[7][8][9][10];
typedef int * (*fn_ptr_arr_t[11])();
typedef int * (*fn_ptr_multiarr_t[12][13])();
struct root_struct {
arr_t _1;
multiarr_t _2;
ptr_arr_t _3;
ptr_multiarr_t _4;
fn_ptr_arr_t _5;
fn_ptr_multiarr_t _6;
};
/* ------ END-EXPECTED-OUTPUT ------ */
int f(struct root_struct *s)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Cloudflare
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} map SEC(".maps");
SEC("sockops")
int bpf_sockmap(struct bpf_sock_ops *skops)
{
__u32 key = 0;
if (skops->sk)
bpf_map_update_elem(&map, &key, skops->sk, 0);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sockmap_invalid_update.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__type(key, int);
__type(value, int);
__uint(max_entries, 2);
} sock_map SEC(".maps");
SEC("freplace/cls_redirect")
int freplace_cls_redirect_test(struct __sk_buff *skb)
{
int ret = 0;
const int zero = 0;
struct bpf_sock *sk;
sk = bpf_map_lookup_elem(&sock_map, &zero);
if (!sk)
return TC_ACT_SHOT;
ret = bpf_map_update_elem(&sock_map, &zero, sk, 0);
bpf_sk_release(sk);
return ret == 0 ? TC_ACT_OK : TC_ACT_SHOT;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/freplace_cls_redirect.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
#define NO_UNROLL
#include "pyperf.h"
| linux-master | tools/testing/selftests/bpf/progs/pyperf600_nounroll.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
SEC("socket")
__description("gotol, small_imm")
__success __success_unpriv __retval(1)
__naked void gotol_small_imm(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 == 0 goto l0_%=; \
gotol l1_%=; \
l2_%=: \
gotol l3_%=; \
l1_%=: \
r0 = 1; \
gotol l2_%=; \
l0_%=: \
r0 = 2; \
l3_%=: \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
#else
SEC("socket")
__description("cpuv4 is not supported by compiler or jit, use a dummy test")
__success
int dummy_test(void)
{
return 0;
}
#endif
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_gotol.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
{
return skb->len;
}
int f3(int, struct __sk_buff *skb);
__attribute__ ((noinline))
int f2(int val, struct __sk_buff *skb)
{
return f1(skb) + f3(val, (void *)&val); /* type mismatch */
}
__attribute__ ((noinline))
int f3(int val, struct __sk_buff *skb)
{
return skb->ifindex * val;
}
SEC("tc")
__failure __msg("expected pointer to ctx, but got PTR")
int global_func5(struct __sk_buff *skb)
{
return f1(skb) + f2(2, skb) + f3(3, skb);
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func5.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
SEC("socket")
__description("LDSX, S8")
__success __success_unpriv __retval(-2)
__naked void ldsx_s8(void)
{
asm volatile (" \
r1 = 0x3fe; \
*(u64 *)(r10 - 8) = r1; \
r0 = *(s8 *)(r10 - 8); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("LDSX, S16")
__success __success_unpriv __retval(-2)
__naked void ldsx_s16(void)
{
asm volatile (" \
r1 = 0x3fffe; \
*(u64 *)(r10 - 8) = r1; \
r0 = *(s16 *)(r10 - 8); \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("LDSX, S32")
__success __success_unpriv __retval(-1)
__naked void ldsx_s32(void)
{
asm volatile (" \
r1 = 0xfffffffe; \
*(u64 *)(r10 - 8) = r1; \
r0 = *(s32 *)(r10 - 8); \
r0 >>= 1; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("LDSX, S8 range checking, privileged")
__log_level(2) __success __retval(1)
__msg("R1_w=scalar(smin=-128,smax=127)")
__naked void ldsx_s8_range_priv(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
*(u64 *)(r10 - 8) = r0; \
r1 = *(s8 *)(r10 - 8); \
/* r1 with s8 range */ \
if r1 s> 0x7f goto l0_%=; \
if r1 s< -0x80 goto l0_%=; \
r0 = 1; \
l1_%=: \
exit; \
l0_%=: \
r0 = 2; \
goto l1_%=; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("LDSX, S16 range checking")
__success __success_unpriv __retval(1)
__naked void ldsx_s16_range(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
*(u64 *)(r10 - 8) = r0; \
r1 = *(s16 *)(r10 - 8); \
/* r1 with s16 range */ \
if r1 s> 0x7fff goto l0_%=; \
if r1 s< -0x8000 goto l0_%=; \
r0 = 1; \
l1_%=: \
exit; \
l0_%=: \
r0 = 2; \
goto l1_%=; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("LDSX, S32 range checking")
__success __success_unpriv __retval(1)
__naked void ldsx_s32_range(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
*(u64 *)(r10 - 8) = r0; \
r1 = *(s32 *)(r10 - 8); \
/* r1 with s16 range */ \
if r1 s> 0x7fffFFFF goto l0_%=; \
if r1 s< -0x80000000 goto l0_%=; \
r0 = 1; \
l1_%=: \
exit; \
l0_%=: \
r0 = 2; \
goto l1_%=; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
#else
SEC("socket")
__description("cpuv4 is not supported by compiler or jit, use a dummy test")
__success
int dummy_test(void)
{
return 0;
}
#endif
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_ldsx.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
#define MAX_STACK_TRACE_DEPTH 64
unsigned long entries[MAX_STACK_TRACE_DEPTH] = {};
#define SIZE_OF_ULONG (sizeof(unsigned long))
SEC("iter/task")
int dump_task_stack(struct bpf_iter__task *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
long i, retlen;
if (task == (void *)0)
return 0;
retlen = bpf_get_task_stack(task, entries,
MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, 0);
if (retlen < 0)
return 0;
BPF_SEQ_PRINTF(seq, "pid: %8u num_entries: %8u\n", task->pid,
retlen / SIZE_OF_ULONG);
for (i = 0; i < MAX_STACK_TRACE_DEPTH; i++) {
if (retlen > i * SIZE_OF_ULONG)
BPF_SEQ_PRINTF(seq, "[<0>] %pB\n", (void *)entries[i]);
}
BPF_SEQ_PRINTF(seq, "\n");
return 0;
}
SEC("iter/task")
int get_task_user_stacks(struct bpf_iter__task *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
uint64_t buf_sz = 0;
int64_t res;
if (task == (void *)0)
return 0;
res = bpf_get_task_stack(task, entries,
MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, BPF_F_USER_STACK);
if (res <= 0)
return 0;
buf_sz += res;
/* If the verifier doesn't refine bpf_get_task_stack res, and instead
* assumes res is entirely unknown, this program will fail to load as
* the verifier will believe that max buf_sz value allows reading
* past the end of entries in bpf_seq_write call
*/
bpf_seq_write(seq, &entries, buf_sz);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c |
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct inner_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} inner_map1 SEC(".maps"),
inner_map2 SEC(".maps");
struct inner_map_sz2 {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 2);
__type(key, int);
__type(value, int);
} inner_map_sz2 SEC(".maps");
struct outer_arr {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 3);
__type(key, int);
__type(value, int);
/* it's possible to use anonymous struct as inner map definition here */
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
/* changing max_entries to 2 will fail during load
* due to incompatibility with inner_map definition */
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
});
} outer_arr SEC(".maps") = {
/* (void *) cast is necessary because we didn't use `struct inner_map`
* in __inner(values, ...)
* Actually, a conscious effort is required to screw up initialization
* of inner map slots, which is a great thing!
*/
.values = { (void *)&inner_map1, 0, (void *)&inner_map2 },
};
struct inner_map_sz3 {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(map_flags, BPF_F_INNER_MAP);
__uint(max_entries, 3);
__type(key, int);
__type(value, int);
} inner_map3 SEC(".maps"),
inner_map4 SEC(".maps");
struct inner_map_sz4 {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(map_flags, BPF_F_INNER_MAP);
__uint(max_entries, 5);
__type(key, int);
__type(value, int);
} inner_map5 SEC(".maps");
struct outer_arr_dyn {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 3);
__type(key, int);
__type(value, int);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(map_flags, BPF_F_INNER_MAP);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
});
} outer_arr_dyn SEC(".maps") = {
.values = {
[0] = (void *)&inner_map3,
[1] = (void *)&inner_map4,
[2] = (void *)&inner_map5,
},
};
struct outer_hash {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 5);
__type(key, int);
/* Here everything works flawlessly due to reuse of struct inner_map
* and compiler will complain at the attempt to use non-inner_map
* references below. This is great experience.
*/
__array(values, struct inner_map);
} outer_hash SEC(".maps") = {
.values = {
[0] = &inner_map2,
[4] = &inner_map1,
},
};
struct sockarr_sz1 {
__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} sockarr_sz1 SEC(".maps");
struct sockarr_sz2 {
__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
__uint(max_entries, 2);
__type(key, int);
__type(value, int);
} sockarr_sz2 SEC(".maps");
struct outer_sockarr_sz1 {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
__array(values, struct sockarr_sz1);
} outer_sockarr SEC(".maps") = {
.values = { (void *)&sockarr_sz1 },
};
int input = 0;
SEC("raw_tp/sys_enter")
int handle__sys_enter(void *ctx)
{
struct inner_map *inner_map;
int key = 0, val;
inner_map = bpf_map_lookup_elem(&outer_arr, &key);
if (!inner_map)
return 1;
val = input;
bpf_map_update_elem(inner_map, &key, &val, 0);
inner_map = bpf_map_lookup_elem(&outer_hash, &key);
if (!inner_map)
return 1;
val = input + 1;
bpf_map_update_elem(inner_map, &key, &val, 0);
inner_map = bpf_map_lookup_elem(&outer_arr_dyn, &key);
if (!inner_map)
return 1;
val = input + 2;
bpf_map_update_elem(inner_map, &key, &val, 0);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_btf_map_in_map.c |
// SPDX-License-Identifier: GPL-2.0
#define IPROUTE2_HAVE_LIBBPF
#include "test_sk_assign.c"
| linux-master | tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
SEC("iter/bpf_map")
int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
{
struct seq_file *seq = ctx->meta->seq;
__u64 seq_num = ctx->meta->seq_num;
struct bpf_map *map = ctx->map;
if (map == (void *)0) {
BPF_SEQ_PRINTF(seq, " %%%%%% END %%%%%%\n");
return 0;
}
if (seq_num == 0)
BPF_SEQ_PRINTF(seq, " id refcnt usercnt locked_vm\n");
BPF_SEQ_PRINTF(seq, "%8u %8ld %8ld %10lu\n", map->id, map->refcnt.counter,
map->usercnt.counter,
0LLU);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c |
// SPDX-License-Identifier: GPL-2.0
/* This parsing logic is taken from the open source library katran, a layer 4
* load balancer.
*
* This code logic using dynptrs can be found in test_parse_tcp_hdr_opt_dynptr.c
*
* https://github.com/facebookincubator/katran/blob/main/katran/lib/bpf/pckt_parsing.h
*/
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <linux/tcp.h>
#include <stdbool.h>
#include <linux/ipv6.h>
#include <linux/if_ether.h>
#include "test_tcp_hdr_options.h"
char _license[] SEC("license") = "GPL";
/* Kind number used for experiments */
const __u32 tcp_hdr_opt_kind_tpr = 0xFD;
/* Length of the tcp header option */
const __u32 tcp_hdr_opt_len_tpr = 6;
/* maximum number of header options to check to lookup server_id */
const __u32 tcp_hdr_opt_max_opt_checks = 15;
__u32 server_id;
struct hdr_opt_state {
__u32 server_id;
__u8 byte_offset;
__u8 hdr_bytes_remaining;
};
static int parse_hdr_opt(const struct xdp_md *xdp, struct hdr_opt_state *state)
{
const void *data = (void *)(long)xdp->data;
const void *data_end = (void *)(long)xdp->data_end;
__u8 *tcp_opt, kind, hdr_len;
tcp_opt = (__u8 *)(data + state->byte_offset);
if (tcp_opt + 1 > data_end)
return -1;
kind = tcp_opt[0];
if (kind == TCPOPT_EOL)
return -1;
if (kind == TCPOPT_NOP) {
state->hdr_bytes_remaining--;
state->byte_offset++;
return 0;
}
if (state->hdr_bytes_remaining < 2 ||
tcp_opt + sizeof(__u8) + sizeof(__u8) > data_end)
return -1;
hdr_len = tcp_opt[1];
if (hdr_len > state->hdr_bytes_remaining)
return -1;
if (kind == tcp_hdr_opt_kind_tpr) {
if (hdr_len != tcp_hdr_opt_len_tpr)
return -1;
if (tcp_opt + tcp_hdr_opt_len_tpr > data_end)
return -1;
state->server_id = *(__u32 *)&tcp_opt[2];
return 1;
}
state->hdr_bytes_remaining -= hdr_len;
state->byte_offset += hdr_len;
return 0;
}
SEC("xdp")
int xdp_ingress_v6(struct xdp_md *xdp)
{
const void *data = (void *)(long)xdp->data;
const void *data_end = (void *)(long)xdp->data_end;
struct hdr_opt_state opt_state = {};
__u8 tcp_hdr_opt_len = 0;
struct tcphdr *tcp_hdr;
__u64 tcp_offset = 0;
int err;
tcp_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
tcp_hdr = (struct tcphdr *)(data + tcp_offset);
if (tcp_hdr + 1 > data_end)
return XDP_DROP;
tcp_hdr_opt_len = (tcp_hdr->doff * 4) - sizeof(struct tcphdr);
if (tcp_hdr_opt_len < tcp_hdr_opt_len_tpr)
return XDP_DROP;
opt_state.hdr_bytes_remaining = tcp_hdr_opt_len;
opt_state.byte_offset = sizeof(struct tcphdr) + tcp_offset;
/* max number of bytes of options in tcp header is 40 bytes */
for (int i = 0; i < tcp_hdr_opt_max_opt_checks; i++) {
err = parse_hdr_opt(xdp, &opt_state);
if (err || !opt_state.hdr_bytes_remaining)
break;
}
if (!opt_state.server_id)
return XDP_DROP;
server_id = opt_state.server_id;
return XDP_PASS;
}
| linux-master | tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <string.h>
#include <errno.h>
#include <netinet/in.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/if_ether.h>
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "bpf_tcp_helpers.h"
struct sockaddr_in6 srv_sa6 = {};
__u16 listen_tp_sport = 0;
__u16 req_sk_sport = 0;
__u32 recv_cookie = 0;
__u32 gen_cookie = 0;
__u32 linum = 0;
#define LOG() ({ if (!linum) linum = __LINE__; })
static void test_syncookie_helper(struct ipv6hdr *ip6h, struct tcphdr *th,
struct tcp_sock *tp,
struct __sk_buff *skb)
{
if (th->syn) {
__s64 mss_cookie;
void *data_end;
data_end = (void *)(long)(skb->data_end);
if (th->doff * 4 != 40) {
LOG();
return;
}
if ((void *)th + 40 > data_end) {
LOG();
return;
}
mss_cookie = bpf_tcp_gen_syncookie(tp, ip6h, sizeof(*ip6h),
th, 40);
if (mss_cookie < 0) {
if (mss_cookie != -ENOENT)
LOG();
} else {
gen_cookie = (__u32)mss_cookie;
}
} else if (gen_cookie) {
/* It was in cookie mode */
int ret = bpf_tcp_check_syncookie(tp, ip6h, sizeof(*ip6h),
th, sizeof(*th));
if (ret < 0) {
if (ret != -ENOENT)
LOG();
} else {
recv_cookie = bpf_ntohl(th->ack_seq) - 1;
}
}
}
static int handle_ip6_tcp(struct ipv6hdr *ip6h, struct __sk_buff *skb)
{
struct bpf_sock_tuple *tuple;
struct bpf_sock *bpf_skc;
unsigned int tuple_len;
struct tcphdr *th;
void *data_end;
data_end = (void *)(long)(skb->data_end);
th = (struct tcphdr *)(ip6h + 1);
if (th + 1 > data_end)
return TC_ACT_OK;
/* Is it the testing traffic? */
if (th->dest != srv_sa6.sin6_port)
return TC_ACT_OK;
tuple_len = sizeof(tuple->ipv6);
tuple = (struct bpf_sock_tuple *)&ip6h->saddr;
if ((void *)tuple + tuple_len > data_end) {
LOG();
return TC_ACT_OK;
}
bpf_skc = bpf_skc_lookup_tcp(skb, tuple, tuple_len,
BPF_F_CURRENT_NETNS, 0);
if (!bpf_skc) {
LOG();
return TC_ACT_OK;
}
if (bpf_skc->state == BPF_TCP_NEW_SYN_RECV) {
struct request_sock *req_sk;
req_sk = (struct request_sock *)bpf_skc_to_tcp_request_sock(bpf_skc);
if (!req_sk) {
LOG();
goto release;
}
if (bpf_sk_assign(skb, req_sk, 0)) {
LOG();
goto release;
}
req_sk_sport = req_sk->__req_common.skc_num;
bpf_sk_release(req_sk);
return TC_ACT_OK;
} else if (bpf_skc->state == BPF_TCP_LISTEN) {
struct tcp_sock *tp;
tp = bpf_skc_to_tcp_sock(bpf_skc);
if (!tp) {
LOG();
goto release;
}
if (bpf_sk_assign(skb, tp, 0)) {
LOG();
goto release;
}
listen_tp_sport = tp->inet_conn.icsk_inet.sk.__sk_common.skc_num;
test_syncookie_helper(ip6h, th, tp, skb);
bpf_sk_release(tp);
return TC_ACT_OK;
}
if (bpf_sk_assign(skb, bpf_skc, 0))
LOG();
release:
bpf_sk_release(bpf_skc);
return TC_ACT_OK;
}
SEC("tc")
int cls_ingress(struct __sk_buff *skb)
{
struct ipv6hdr *ip6h;
struct ethhdr *eth;
void *data_end;
data_end = (void *)(long)(skb->data_end);
eth = (struct ethhdr *)(long)(skb->data);
if (eth + 1 > data_end)
return TC_ACT_OK;
if (eth->h_proto != bpf_htons(ETH_P_IPV6))
return TC_ACT_OK;
ip6h = (struct ipv6hdr *)(eth + 1);
if (ip6h + 1 > data_end)
return TC_ACT_OK;
if (ip6h->nexthdr == IPPROTO_TCP)
return handle_ip6_tcp(ip6h, skb);
return TC_ACT_OK;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
SEC("iter/task")
int dump_task(struct bpf_iter__task *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
int tgid;
tgid = task->tgid;
bpf_seq_write(seq, &tgid, sizeof(tgid));
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, __u64);
} task_storage SEC(".maps");
int run_count = 0;
int valid_ptr_count = 0;
int null_ptr_count = 0;
SEC("fentry/exit_creds")
int BPF_PROG(trace_exit_creds, struct task_struct *task)
{
__u64 *ptr;
ptr = bpf_task_storage_get(&task_storage, task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
__sync_fetch_and_add(&valid_ptr_count, 1);
else
__sync_fetch_and_add(&null_ptr_count, 1);
__sync_fetch_and_add(&run_count, 1);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "nested_trust_common.h"
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, u64);
} sk_storage_map SEC(".maps");
SEC("tp_btf/task_newtask")
__success
int BPF_PROG(test_read_cpumask, struct task_struct *task, u64 clone_flags)
{
bpf_cpumask_test_cpu(0, task->cpus_ptr);
return 0;
}
SEC("tp_btf/tcp_probe")
__success
int BPF_PROG(test_skb_field, struct sock *sk, struct sk_buff *skb)
{
bpf_sk_storage_get(&sk_storage_map, skb->sk, 0, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/nested_trust_success.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <linux/bpf.h>
#include <time.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "GPL";
struct hmap_elem {
int pad; /* unused */
struct bpf_timer timer;
};
struct inner_map {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1024);
__type(key, int);
__type(value, struct hmap_elem);
} inner_htab SEC(".maps");
#define ARRAY_KEY 1
#define ARRAY_KEY2 2
#define HASH_KEY 1234
struct outer_arr {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 2);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
__array(values, struct inner_map);
} outer_arr SEC(".maps") = {
.values = { [ARRAY_KEY] = &inner_htab },
};
__u64 err;
__u64 ok;
__u64 cnt;
/* callback for inner hash map */
static int timer_cb(void *map, int *key, struct hmap_elem *val)
{
return 0;
}
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(test1, int a)
{
struct hmap_elem init = {};
struct bpf_map *inner_map, *inner_map2;
struct hmap_elem *val;
int array_key = ARRAY_KEY;
int array_key2 = ARRAY_KEY2;
int hash_key = HASH_KEY;
inner_map = bpf_map_lookup_elem(&outer_arr, &array_key);
if (!inner_map)
return 0;
inner_map2 = bpf_map_lookup_elem(&outer_arr, &array_key2);
if (!inner_map2)
return 0;
bpf_map_update_elem(inner_map, &hash_key, &init, 0);
val = bpf_map_lookup_elem(inner_map, &hash_key);
if (!val)
return 0;
bpf_timer_init(&val->timer, inner_map2, CLOCK_MONOTONIC);
if (bpf_timer_set_callback(&val->timer, timer_cb))
err |= 4;
if (bpf_timer_start(&val->timer, 0, 0))
err |= 8;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/timer_mim_reject.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Facebook
*/
#include <stddef.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define TEST_FIELD(TYPE, FIELD, MASK) \
{ \
TYPE tmp = *(volatile TYPE *)&skb->FIELD; \
if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
return TC_ACT_SHOT; \
}
#else
#define TEST_FIELD_OFFSET(a, b) ((sizeof(a) - sizeof(b)) / sizeof(b))
#define TEST_FIELD(TYPE, FIELD, MASK) \
{ \
TYPE tmp = *((volatile TYPE *)&skb->FIELD + \
TEST_FIELD_OFFSET(skb->FIELD, TYPE)); \
if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
return TC_ACT_SHOT; \
}
#endif
SEC("tc")
int test_pkt_md_access(struct __sk_buff *skb)
{
TEST_FIELD(__u8, len, 0xFF);
TEST_FIELD(__u16, len, 0xFFFF);
TEST_FIELD(__u32, len, 0xFFFFFFFF);
TEST_FIELD(__u16, protocol, 0xFFFF);
TEST_FIELD(__u32, protocol, 0xFFFFFFFF);
TEST_FIELD(__u8, hash, 0xFF);
TEST_FIELD(__u16, hash, 0xFFFF);
TEST_FIELD(__u32, hash, 0xFFFFFFFF);
return TC_ACT_OK;
}
| linux-master | tools/testing/selftests/bpf/progs/test_pkt_md_access.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/helper_packet_access.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("xdp")
__description("helper access to packet: test1, valid packet_ptr range")
__success __retval(0)
__naked void test1_valid_packet_ptr_range(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 > r3 goto l0_%=; \
r1 = %[map_hash_8b] ll; \
r3 = r2; \
r4 = 0; \
call %[bpf_map_update_elem]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_update_elem),
__imm_addr(map_hash_8b),
__imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("helper access to packet: test2, unchecked packet_ptr")
__failure __msg("invalid access to packet")
__naked void packet_test2_unchecked_packet_ptr(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(xdp_md_data, offsetof(struct xdp_md, data))
: __clobber_all);
}
SEC("xdp")
__description("helper access to packet: test3, variable add")
__success __retval(0)
__naked void to_packet_test3_variable_add(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r4 = r2; \
r4 += 8; \
if r4 > r3 goto l0_%=; \
r5 = *(u8*)(r2 + 0); \
r4 = r2; \
r4 += r5; \
r5 = r4; \
r5 += 8; \
if r5 > r3 goto l0_%=; \
r1 = %[map_hash_8b] ll; \
r2 = r4; \
call %[bpf_map_lookup_elem]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("helper access to packet: test4, packet_ptr with bad range")
__failure __msg("invalid access to packet")
__naked void packet_ptr_with_bad_range_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r4 = r2; \
r4 += 4; \
if r4 > r3 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("helper access to packet: test5, packet_ptr with too short range")
__failure __msg("invalid access to packet")
__naked void ptr_with_too_short_range_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r2 += 1; \
r4 = r2; \
r4 += 7; \
if r4 > r3 goto l0_%=; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test6, cls valid packet_ptr range")
__success __retval(0)
__naked void cls_valid_packet_ptr_range(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 > r3 goto l0_%=; \
r1 = %[map_hash_8b] ll; \
r3 = r2; \
r4 = 0; \
call %[bpf_map_update_elem]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_update_elem),
__imm_addr(map_hash_8b),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test7, cls unchecked packet_ptr")
__failure __msg("invalid access to packet")
__naked void test7_cls_unchecked_packet_ptr(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test8, cls variable add")
__success __retval(0)
__naked void packet_test8_cls_variable_add(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r4 = r2; \
r4 += 8; \
if r4 > r3 goto l0_%=; \
r5 = *(u8*)(r2 + 0); \
r4 = r2; \
r4 += r5; \
r5 = r4; \
r5 += 8; \
if r5 > r3 goto l0_%=; \
r1 = %[map_hash_8b] ll; \
r2 = r4; \
call %[bpf_map_lookup_elem]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test9, cls packet_ptr with bad range")
__failure __msg("invalid access to packet")
__naked void packet_ptr_with_bad_range_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r4 = r2; \
r4 += 4; \
if r4 > r3 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test10, cls packet_ptr with too short range")
__failure __msg("invalid access to packet")
__naked void ptr_with_too_short_range_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r2 += 1; \
r4 = r2; \
r4 += 7; \
if r4 > r3 goto l0_%=; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test11, cls unsuitable helper 1")
__failure __msg("helper access to the packet")
__naked void test11_cls_unsuitable_helper_1(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r6 += 1; \
r3 = r6; \
r3 += 7; \
if r3 > r7 goto l0_%=; \
r2 = 0; \
r4 = 42; \
r5 = 0; \
call %[bpf_skb_store_bytes]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_skb_store_bytes),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test12, cls unsuitable helper 2")
__failure __msg("helper access to the packet")
__naked void test12_cls_unsuitable_helper_2(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r3 = r6; \
r6 += 8; \
if r6 > r7 goto l0_%=; \
r2 = 0; \
r4 = 4; \
call %[bpf_skb_load_bytes]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_skb_load_bytes),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test13, cls helper ok")
__success __retval(0)
__naked void packet_test13_cls_helper_ok(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r6 += 1; \
r1 = r6; \
r1 += 7; \
if r1 > r7 goto l0_%=; \
r1 = r6; \
r2 = 4; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_csum_diff),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test14, cls helper ok sub")
__success __retval(0)
__naked void test14_cls_helper_ok_sub(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r6 += 1; \
r1 = r6; \
r1 += 7; \
if r1 > r7 goto l0_%=; \
r1 -= 4; \
r2 = 4; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_csum_diff),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test15, cls helper fail sub")
__failure __msg("invalid access to packet")
__naked void test15_cls_helper_fail_sub(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r6 += 1; \
r1 = r6; \
r1 += 7; \
if r1 > r7 goto l0_%=; \
r1 -= 12; \
r2 = 4; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_csum_diff),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test16, cls helper fail range 1")
__failure __msg("invalid access to packet")
__naked void cls_helper_fail_range_1(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r6 += 1; \
r1 = r6; \
r1 += 7; \
if r1 > r7 goto l0_%=; \
r1 = r6; \
r2 = 8; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_csum_diff),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test17, cls helper fail range 2")
__failure __msg("R2 min value is negative")
__naked void cls_helper_fail_range_2(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r6 += 1; \
r1 = r6; \
r1 += 7; \
if r1 > r7 goto l0_%=; \
r1 = r6; \
r2 = -9; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_csum_diff),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test18, cls helper fail range 3")
__failure __msg("R2 min value is negative")
__naked void cls_helper_fail_range_3(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r6 += 1; \
r1 = r6; \
r1 += 7; \
if r1 > r7 goto l0_%=; \
r1 = r6; \
r2 = %[__imm_0]; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_csum_diff),
__imm_const(__imm_0, ~0),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test19, cls helper range zero")
__success __retval(0)
__naked void test19_cls_helper_range_zero(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r6 += 1; \
r1 = r6; \
r1 += 7; \
if r1 > r7 goto l0_%=; \
r1 = r6; \
r2 = 0; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_csum_diff),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test20, pkt end as input")
__failure __msg("R1 type=pkt_end expected=fp")
__naked void test20_pkt_end_as_input(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r6 += 1; \
r1 = r6; \
r1 += 7; \
if r1 > r7 goto l0_%=; \
r1 = r7; \
r2 = 4; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_csum_diff),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
SEC("tc")
__description("helper access to packet: test21, wrong reg")
__failure __msg("invalid access to packet")
__naked void to_packet_test21_wrong_reg(void)
{
asm volatile (" \
r6 = *(u32*)(r1 + %[__sk_buff_data]); \
r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r6 += 1; \
r1 = r6; \
r1 += 7; \
if r1 > r7 goto l0_%=; \
r2 = 4; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
r0 = 0; \
l0_%=: exit; \
" :
: __imm(bpf_csum_diff),
__imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c |
// SPDX-License-Identifier: GPL-2.0
// test ir decoder
//
// Copyright (C) 2018 Sean Young <[email protected]>
#include <linux/bpf.h>
#include <linux/lirc.h>
#include <bpf/bpf_helpers.h>
SEC("lirc_mode2")
int bpf_decoder(unsigned int *sample)
{
if (LIRC_IS_PULSE(*sample)) {
unsigned int duration = LIRC_VALUE(*sample);
if (duration & 0x10000)
bpf_rc_keydown(sample, 0x40, duration & 0xffff, 0);
if (duration & 0x20000)
bpf_rc_pointer_rel(sample, (duration >> 8) & 0xff,
duration & 0xff);
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c |
#include "core_reloc_types.h"
void f(struct core_reloc_type_id___missing_targets x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_type_id___missing_targets.c |
#include "core_reloc_types.h"
void f(struct core_reloc_nesting___struct_union_mixup x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting___struct_union_mixup.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/if.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define SERV6_IP_0 0xfaceb00c /* face:b00c:1234:5678::abcd */
#define SERV6_IP_1 0x12345678
#define SERV6_IP_2 0x00000000
#define SERV6_IP_3 0x0000abcd
#define SERV6_PORT 6060
#define SERV6_REWRITE_IP_0 0x00000000
#define SERV6_REWRITE_IP_1 0x00000000
#define SERV6_REWRITE_IP_2 0x00000000
#define SERV6_REWRITE_IP_3 0x00000001
#define SERV6_REWRITE_PORT 6666
#ifndef IFNAMSIZ
#define IFNAMSIZ 16
#endif
static __inline int bind_to_device(struct bpf_sock_addr *ctx)
{
char veth1[IFNAMSIZ] = "test_sock_addr1";
char veth2[IFNAMSIZ] = "test_sock_addr2";
char missing[IFNAMSIZ] = "nonexistent_dev";
char del_bind[IFNAMSIZ] = "";
int veth1_idx, veth2_idx;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
&veth1, sizeof(veth1)))
return 1;
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
&veth1_idx, sizeof(veth1_idx)) || !veth1_idx)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
&veth2, sizeof(veth2)))
return 1;
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
&veth2_idx, sizeof(veth2_idx)) || !veth2_idx ||
veth1_idx == veth2_idx)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
&missing, sizeof(missing)) != -ENODEV)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
&veth1_idx, sizeof(veth1_idx)))
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
&del_bind, sizeof(del_bind)))
return 1;
return 0;
}
static __inline int bind_reuseport(struct bpf_sock_addr *ctx)
{
int val = 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
&val, sizeof(val)))
return 1;
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
&val, sizeof(val)) || !val)
return 1;
val = 0;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
&val, sizeof(val)))
return 1;
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
&val, sizeof(val)) || val)
return 1;
return 0;
}
static __inline int misc_opts(struct bpf_sock_addr *ctx, int opt)
{
int old, tmp, new = 0xeb9f;
/* Socket in test case has guarantee that old never equals to new. */
if (bpf_getsockopt(ctx, SOL_SOCKET, opt, &old, sizeof(old)) ||
old == new)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, opt, &new, sizeof(new)))
return 1;
if (bpf_getsockopt(ctx, SOL_SOCKET, opt, &tmp, sizeof(tmp)) ||
tmp != new)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, opt, &old, sizeof(old)))
return 1;
return 0;
}
SEC("cgroup/bind6")
int bind_v6_prog(struct bpf_sock_addr *ctx)
{
struct bpf_sock *sk;
__u32 user_ip6;
__u16 user_port;
int i;
sk = ctx->sk;
if (!sk)
return 0;
if (sk->family != AF_INET6)
return 0;
if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
return 0;
if (ctx->user_ip6[0] != bpf_htonl(SERV6_IP_0) ||
ctx->user_ip6[1] != bpf_htonl(SERV6_IP_1) ||
ctx->user_ip6[2] != bpf_htonl(SERV6_IP_2) ||
ctx->user_ip6[3] != bpf_htonl(SERV6_IP_3) ||
ctx->user_port != bpf_htons(SERV6_PORT))
return 0;
// u8 narrow loads:
for (i = 0; i < 4; i++) {
user_ip6 = 0;
user_ip6 |= ((volatile __u8 *)&ctx->user_ip6[i])[0] << 0;
user_ip6 |= ((volatile __u8 *)&ctx->user_ip6[i])[1] << 8;
user_ip6 |= ((volatile __u8 *)&ctx->user_ip6[i])[2] << 16;
user_ip6 |= ((volatile __u8 *)&ctx->user_ip6[i])[3] << 24;
if (ctx->user_ip6[i] != user_ip6)
return 0;
}
user_port = 0;
user_port |= ((volatile __u8 *)&ctx->user_port)[0] << 0;
user_port |= ((volatile __u8 *)&ctx->user_port)[1] << 8;
if (ctx->user_port != user_port)
return 0;
// u16 narrow loads:
for (i = 0; i < 4; i++) {
user_ip6 = 0;
user_ip6 |= ((volatile __u16 *)&ctx->user_ip6[i])[0] << 0;
user_ip6 |= ((volatile __u16 *)&ctx->user_ip6[i])[1] << 16;
if (ctx->user_ip6[i] != user_ip6)
return 0;
}
/* Bind to device and unbind it. */
if (bind_to_device(ctx))
return 0;
/* Test for misc socket options. */
if (misc_opts(ctx, SO_MARK) || misc_opts(ctx, SO_PRIORITY))
return 0;
/* Set reuseport and unset */
if (bind_reuseport(ctx))
return 0;
ctx->user_ip6[0] = bpf_htonl(SERV6_REWRITE_IP_0);
ctx->user_ip6[1] = bpf_htonl(SERV6_REWRITE_IP_1);
ctx->user_ip6[2] = bpf_htonl(SERV6_REWRITE_IP_2);
ctx->user_ip6[3] = bpf_htonl(SERV6_REWRITE_IP_3);
ctx->user_port = bpf_htons(SERV6_REWRITE_PORT);
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/bind6_prog.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Facebook */
#include <errno.h>
#include <string.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <linux/if_ether.h>
#include "bpf_misc.h"
#include "bpf_kfuncs.h"
char _license[] SEC("license") = "GPL";
struct test_info {
int x;
struct bpf_dynptr ptr;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct bpf_dynptr);
} array_map1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct test_info);
} array_map2 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} array_map3 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} array_map4 SEC(".maps");
struct sample {
int pid;
long value;
char comm[16];
};
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 4096);
} ringbuf SEC(".maps");
int err, val;
static int get_map_val_dynptr(struct bpf_dynptr *ptr)
{
__u32 key = 0, *map_val;
bpf_map_update_elem(&array_map3, &key, &val, 0);
map_val = bpf_map_lookup_elem(&array_map3, &key);
if (!map_val)
return -ENOENT;
bpf_dynptr_from_mem(map_val, sizeof(*map_val), 0, ptr);
return 0;
}
/* Every bpf_ringbuf_reserve_dynptr call must have a corresponding
* bpf_ringbuf_submit/discard_dynptr call
*/
SEC("?raw_tp")
__failure __msg("Unreleased reference id=2")
int ringbuf_missing_release1(void *ctx)
{
struct bpf_dynptr ptr;
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
/* missing a call to bpf_ringbuf_discard/submit_dynptr */
return 0;
}
SEC("?raw_tp")
__failure __msg("Unreleased reference id=4")
int ringbuf_missing_release2(void *ctx)
{
struct bpf_dynptr ptr1, ptr2;
struct sample *sample;
bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr1);
bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
sample = bpf_dynptr_data(&ptr1, 0, sizeof(*sample));
if (!sample) {
bpf_ringbuf_discard_dynptr(&ptr1, 0);
bpf_ringbuf_discard_dynptr(&ptr2, 0);
return 0;
}
bpf_ringbuf_submit_dynptr(&ptr1, 0);
/* missing a call to bpf_ringbuf_discard/submit_dynptr on ptr2 */
return 0;
}
static int missing_release_callback_fn(__u32 index, void *data)
{
struct bpf_dynptr ptr;
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
/* missing a call to bpf_ringbuf_discard/submit_dynptr */
return 0;
}
/* Any dynptr initialized within a callback must have bpf_dynptr_put called */
SEC("?raw_tp")
__failure __msg("Unreleased reference id")
int ringbuf_missing_release_callback(void *ctx)
{
bpf_loop(10, missing_release_callback_fn, NULL, 0);
return 0;
}
/* Can't call bpf_ringbuf_submit/discard_dynptr on a non-initialized dynptr */
SEC("?raw_tp")
__failure __msg("arg 1 is an unacquired reference")
int ringbuf_release_uninit_dynptr(void *ctx)
{
struct bpf_dynptr ptr;
/* this should fail */
bpf_ringbuf_submit_dynptr(&ptr, 0);
return 0;
}
/* A dynptr can't be used after it has been invalidated */
SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #3")
int use_after_invalid(void *ctx)
{
struct bpf_dynptr ptr;
char read_data[64];
bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(read_data), 0, &ptr);
bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
bpf_ringbuf_submit_dynptr(&ptr, 0);
/* this should fail */
bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
return 0;
}
/* Can't call non-dynptr ringbuf APIs on a dynptr ringbuf sample */
SEC("?raw_tp")
__failure __msg("type=mem expected=ringbuf_mem")
int ringbuf_invalid_api(void *ctx)
{
struct bpf_dynptr ptr;
struct sample *sample;
bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
if (!sample)
goto done;
sample->pid = 123;
/* invalid API use. need to use dynptr API to submit/discard */
bpf_ringbuf_submit(sample, 0);
done:
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
/* Can't add a dynptr to a map */
SEC("?raw_tp")
__failure __msg("invalid indirect read from stack")
int add_dynptr_to_map1(void *ctx)
{
struct bpf_dynptr ptr;
int key = 0;
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
/* this should fail */
bpf_map_update_elem(&array_map1, &key, &ptr, 0);
bpf_ringbuf_submit_dynptr(&ptr, 0);
return 0;
}
/* Can't add a struct with an embedded dynptr to a map */
SEC("?raw_tp")
__failure __msg("invalid indirect read from stack")
int add_dynptr_to_map2(void *ctx)
{
struct test_info x;
int key = 0;
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &x.ptr);
/* this should fail */
bpf_map_update_elem(&array_map2, &key, &x, 0);
bpf_ringbuf_submit_dynptr(&x.ptr, 0);
return 0;
}
/* A data slice can't be accessed out of bounds */
SEC("?raw_tp")
__failure __msg("value is outside of the allowed memory range")
int data_slice_out_of_bounds_ringbuf(void *ctx)
{
struct bpf_dynptr ptr;
void *data;
bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
data = bpf_dynptr_data(&ptr, 0, 8);
if (!data)
goto done;
/* can't index out of bounds of the data slice */
val = *((char *)data + 8);
done:
bpf_ringbuf_submit_dynptr(&ptr, 0);
return 0;
}
/* A data slice can't be accessed out of bounds */
SEC("?tc")
__failure __msg("value is outside of the allowed memory range")
int data_slice_out_of_bounds_skb(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
struct ethhdr *hdr;
char buffer[sizeof(*hdr)] = {};
bpf_dynptr_from_skb(skb, 0, &ptr);
hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
if (!hdr)
return SK_DROP;
/* this should fail */
*(__u8*)(hdr + 1) = 1;
return SK_PASS;
}
SEC("?raw_tp")
__failure __msg("value is outside of the allowed memory range")
int data_slice_out_of_bounds_map_value(void *ctx)
{
__u32 map_val;
struct bpf_dynptr ptr;
void *data;
get_map_val_dynptr(&ptr);
data = bpf_dynptr_data(&ptr, 0, sizeof(map_val));
if (!data)
return 0;
/* can't index out of bounds of the data slice */
val = *((char *)data + (sizeof(map_val) + 1));
return 0;
}
/* A data slice can't be used after it has been released */
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
int data_slice_use_after_release1(void *ctx)
{
struct bpf_dynptr ptr;
struct sample *sample;
bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
if (!sample)
goto done;
sample->pid = 123;
bpf_ringbuf_submit_dynptr(&ptr, 0);
/* this should fail */
val = sample->pid;
return 0;
done:
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
/* A data slice can't be used after it has been released.
*
* This tests the case where the data slice tracks a dynptr (ptr2)
* that is at a non-zero offset from the frame pointer (ptr1 is at fp,
* ptr2 is at fp - 16).
*/
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
int data_slice_use_after_release2(void *ctx)
{
struct bpf_dynptr ptr1, ptr2;
struct sample *sample;
bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr1);
bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
sample = bpf_dynptr_data(&ptr2, 0, sizeof(*sample));
if (!sample)
goto done;
sample->pid = 23;
bpf_ringbuf_submit_dynptr(&ptr2, 0);
/* this should fail */
sample->pid = 23;
bpf_ringbuf_submit_dynptr(&ptr1, 0);
return 0;
done:
bpf_ringbuf_discard_dynptr(&ptr2, 0);
bpf_ringbuf_discard_dynptr(&ptr1, 0);
return 0;
}
/* A data slice must be first checked for NULL */
SEC("?raw_tp")
__failure __msg("invalid mem access 'mem_or_null'")
int data_slice_missing_null_check1(void *ctx)
{
struct bpf_dynptr ptr;
void *data;
bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
data = bpf_dynptr_data(&ptr, 0, 8);
/* missing if (!data) check */
/* this should fail */
*(__u8 *)data = 3;
bpf_ringbuf_submit_dynptr(&ptr, 0);
return 0;
}
/* A data slice can't be dereferenced if it wasn't checked for null */
SEC("?raw_tp")
__failure __msg("invalid mem access 'mem_or_null'")
int data_slice_missing_null_check2(void *ctx)
{
struct bpf_dynptr ptr;
__u64 *data1, *data2;
bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
data1 = bpf_dynptr_data(&ptr, 0, 8);
data2 = bpf_dynptr_data(&ptr, 0, 8);
if (data1)
/* this should fail */
*data2 = 3;
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
/* Can't pass in a dynptr as an arg to a helper function that doesn't take in a
* dynptr argument
*/
SEC("?raw_tp")
__failure __msg("invalid indirect read from stack")
int invalid_helper1(void *ctx)
{
struct bpf_dynptr ptr;
get_map_val_dynptr(&ptr);
/* this should fail */
bpf_strncmp((const char *)&ptr, sizeof(ptr), "hello!");
return 0;
}
/* A dynptr can't be passed into a helper function at a non-zero offset */
SEC("?raw_tp")
__failure __msg("cannot pass in dynptr at an offset=-8")
int invalid_helper2(void *ctx)
{
struct bpf_dynptr ptr;
char read_data[64];
get_map_val_dynptr(&ptr);
/* this should fail */
bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0, 0);
return 0;
}
/* A bpf_dynptr is invalidated if it's been written into */
SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #1")
int invalid_write1(void *ctx)
{
struct bpf_dynptr ptr;
void *data;
__u8 x = 0;
get_map_val_dynptr(&ptr);
memcpy(&ptr, &x, sizeof(x));
/* this should fail */
data = bpf_dynptr_data(&ptr, 0, 1);
__sink(data);
return 0;
}
/*
* A bpf_dynptr can't be used as a dynptr if it has been written into at a fixed
* offset
*/
SEC("?raw_tp")
__failure __msg("cannot overwrite referenced dynptr")
int invalid_write2(void *ctx)
{
struct bpf_dynptr ptr;
char read_data[64];
__u8 x = 0;
bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
memcpy((void *)&ptr + 8, &x, sizeof(x));
/* this should fail */
bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
bpf_ringbuf_submit_dynptr(&ptr, 0);
return 0;
}
/*
* A bpf_dynptr can't be used as a dynptr if it has been written into at a
* non-const offset
*/
SEC("?raw_tp")
__failure __msg("cannot overwrite referenced dynptr")
int invalid_write3(void *ctx)
{
struct bpf_dynptr ptr;
char stack_buf[16];
unsigned long len;
__u8 x = 0;
bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
memcpy(stack_buf, &val, sizeof(val));
len = stack_buf[0] & 0xf;
memcpy((void *)&ptr + len, &x, sizeof(x));
/* this should fail */
bpf_ringbuf_submit_dynptr(&ptr, 0);
return 0;
}
static int invalid_write4_callback(__u32 index, void *data)
{
*(__u32 *)data = 123;
return 0;
}
/* If the dynptr is written into in a callback function, it should
* be invalidated as a dynptr
*/
SEC("?raw_tp")
__failure __msg("cannot overwrite referenced dynptr")
int invalid_write4(void *ctx)
{
struct bpf_dynptr ptr;
bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
bpf_loop(10, invalid_write4_callback, &ptr, 0);
/* this should fail */
bpf_ringbuf_submit_dynptr(&ptr, 0);
return 0;
}
/* A globally-defined bpf_dynptr can't be used (it must reside as a stack frame) */
struct bpf_dynptr global_dynptr;
SEC("?raw_tp")
__failure __msg("type=map_value expected=fp")
int global(void *ctx)
{
/* this should fail */
bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &global_dynptr);
bpf_ringbuf_discard_dynptr(&global_dynptr, 0);
return 0;
}
/* A direct read should fail */
SEC("?raw_tp")
__failure __msg("invalid read from stack")
int invalid_read1(void *ctx)
{
struct bpf_dynptr ptr;
bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
/* this should fail */
val = *(int *)&ptr;
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
/* A direct read at an offset should fail */
SEC("?raw_tp")
__failure __msg("cannot pass in dynptr at an offset")
int invalid_read2(void *ctx)
{
struct bpf_dynptr ptr;
char read_data[64];
get_map_val_dynptr(&ptr);
/* this should fail */
bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 1, 0, 0);
return 0;
}
/* A direct read at an offset into the lower stack slot should fail */
SEC("?raw_tp")
__failure __msg("invalid read from stack")
int invalid_read3(void *ctx)
{
struct bpf_dynptr ptr1, ptr2;
bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr1);
bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr2);
/* this should fail */
memcpy(&val, (void *)&ptr1 + 8, sizeof(val));
bpf_ringbuf_discard_dynptr(&ptr1, 0);
bpf_ringbuf_discard_dynptr(&ptr2, 0);
return 0;
}
static int invalid_read4_callback(__u32 index, void *data)
{
/* this should fail */
val = *(__u32 *)data;
return 0;
}
/* A direct read within a callback function should fail */
SEC("?raw_tp")
__failure __msg("invalid read from stack")
int invalid_read4(void *ctx)
{
struct bpf_dynptr ptr;
bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
bpf_loop(10, invalid_read4_callback, &ptr, 0);
bpf_ringbuf_submit_dynptr(&ptr, 0);
return 0;
}
/* Initializing a dynptr on an offset should fail */
SEC("?raw_tp")
__failure __msg("cannot pass in dynptr at an offset=0")
int invalid_offset(void *ctx)
{
struct bpf_dynptr ptr;
/* this should fail */
bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr + 1);
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
/* Can't release a dynptr twice */
SEC("?raw_tp")
__failure __msg("arg 1 is an unacquired reference")
int release_twice(void *ctx)
{
struct bpf_dynptr ptr;
bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
bpf_ringbuf_discard_dynptr(&ptr, 0);
/* this second release should fail */
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
static int release_twice_callback_fn(__u32 index, void *data)
{
/* this should fail */
bpf_ringbuf_discard_dynptr(data, 0);
return 0;
}
/* Test that releasing a dynptr twice, where one of the releases happens
* within a callback function, fails
*/
SEC("?raw_tp")
__failure __msg("arg 1 is an unacquired reference")
int release_twice_callback(void *ctx)
{
struct bpf_dynptr ptr;
bpf_ringbuf_reserve_dynptr(&ringbuf, 32, 0, &ptr);
bpf_ringbuf_discard_dynptr(&ptr, 0);
bpf_loop(10, release_twice_callback_fn, &ptr, 0);
return 0;
}
/* Reject unsupported local mem types for dynptr_from_mem API */
SEC("?raw_tp")
__failure __msg("Unsupported reg type fp for bpf_dynptr_from_mem data")
int dynptr_from_mem_invalid_api(void *ctx)
{
struct bpf_dynptr ptr;
int x = 0;
/* this should fail */
bpf_dynptr_from_mem(&x, sizeof(x), 0, &ptr);
return 0;
}
SEC("?tc")
__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
int dynptr_pruning_overwrite(struct __sk_buff *ctx)
{
asm volatile (
"r9 = 0xeB9F; \
r6 = %[ringbuf] ll; \
r1 = r6; \
r2 = 8; \
r3 = 0; \
r4 = r10; \
r4 += -16; \
call %[bpf_ringbuf_reserve_dynptr]; \
if r0 == 0 goto pjmp1; \
goto pjmp2; \
pjmp1: \
*(u64 *)(r10 - 16) = r9; \
pjmp2: \
r1 = r10; \
r1 += -16; \
r2 = 0; \
call %[bpf_ringbuf_discard_dynptr]; "
:
: __imm(bpf_ringbuf_reserve_dynptr),
__imm(bpf_ringbuf_discard_dynptr),
__imm_addr(ringbuf)
: __clobber_all
);
return 0;
}
SEC("?tc")
__success __msg("12: safe") __log_level(2)
int dynptr_pruning_stacksafe(struct __sk_buff *ctx)
{
asm volatile (
"r9 = 0xeB9F; \
r6 = %[ringbuf] ll; \
r1 = r6; \
r2 = 8; \
r3 = 0; \
r4 = r10; \
r4 += -16; \
call %[bpf_ringbuf_reserve_dynptr]; \
if r0 == 0 goto stjmp1; \
goto stjmp2; \
stjmp1: \
r9 = r9; \
stjmp2: \
r1 = r10; \
r1 += -16; \
r2 = 0; \
call %[bpf_ringbuf_discard_dynptr]; "
:
: __imm(bpf_ringbuf_reserve_dynptr),
__imm(bpf_ringbuf_discard_dynptr),
__imm_addr(ringbuf)
: __clobber_all
);
return 0;
}
SEC("?tc")
__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
int dynptr_pruning_type_confusion(struct __sk_buff *ctx)
{
asm volatile (
"r6 = %[array_map4] ll; \
r7 = %[ringbuf] ll; \
r1 = r6; \
r2 = r10; \
r2 += -8; \
r9 = 0; \
*(u64 *)(r2 + 0) = r9; \
r3 = r10; \
r3 += -24; \
r9 = 0xeB9FeB9F; \
*(u64 *)(r10 - 16) = r9; \
*(u64 *)(r10 - 24) = r9; \
r9 = 0; \
r4 = 0; \
r8 = r2; \
call %[bpf_map_update_elem]; \
r1 = r6; \
r2 = r8; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto tjmp1; \
exit; \
tjmp1: \
r8 = r0; \
r1 = r7; \
r2 = 8; \
r3 = 0; \
r4 = r10; \
r4 += -16; \
r0 = *(u64 *)(r0 + 0); \
call %[bpf_ringbuf_reserve_dynptr]; \
if r0 == 0 goto tjmp2; \
r8 = r8; \
r8 = r8; \
r8 = r8; \
r8 = r8; \
r8 = r8; \
r8 = r8; \
r8 = r8; \
goto tjmp3; \
tjmp2: \
*(u64 *)(r10 - 8) = r9; \
*(u64 *)(r10 - 16) = r9; \
r1 = r8; \
r1 += 8; \
r2 = 0; \
r3 = 0; \
r4 = r10; \
r4 += -16; \
call %[bpf_dynptr_from_mem]; \
tjmp3: \
r1 = r10; \
r1 += -16; \
r2 = 0; \
call %[bpf_ringbuf_discard_dynptr]; "
:
: __imm(bpf_map_update_elem),
__imm(bpf_map_lookup_elem),
__imm(bpf_ringbuf_reserve_dynptr),
__imm(bpf_dynptr_from_mem),
__imm(bpf_ringbuf_discard_dynptr),
__imm_addr(array_map4),
__imm_addr(ringbuf)
: __clobber_all
);
return 0;
}
SEC("?tc")
__failure __msg("dynptr has to be at a constant offset") __log_level(2)
int dynptr_var_off_overwrite(struct __sk_buff *ctx)
{
asm volatile (
"r9 = 16; \
*(u32 *)(r10 - 4) = r9; \
r8 = *(u32 *)(r10 - 4); \
if r8 >= 0 goto vjmp1; \
r0 = 1; \
exit; \
vjmp1: \
if r8 <= 16 goto vjmp2; \
r0 = 1; \
exit; \
vjmp2: \
r8 &= 16; \
r1 = %[ringbuf] ll; \
r2 = 8; \
r3 = 0; \
r4 = r10; \
r4 += -32; \
r4 += r8; \
call %[bpf_ringbuf_reserve_dynptr]; \
r9 = 0xeB9F; \
*(u64 *)(r10 - 16) = r9; \
r1 = r10; \
r1 += -32; \
r1 += r8; \
r2 = 0; \
call %[bpf_ringbuf_discard_dynptr]; "
:
: __imm(bpf_ringbuf_reserve_dynptr),
__imm(bpf_ringbuf_discard_dynptr),
__imm_addr(ringbuf)
: __clobber_all
);
return 0;
}
SEC("?tc")
__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
int dynptr_partial_slot_invalidate(struct __sk_buff *ctx)
{
asm volatile (
"r6 = %[ringbuf] ll; \
r7 = %[array_map4] ll; \
r1 = r7; \
r2 = r10; \
r2 += -8; \
r9 = 0; \
*(u64 *)(r2 + 0) = r9; \
r3 = r2; \
r4 = 0; \
r8 = r2; \
call %[bpf_map_update_elem]; \
r1 = r7; \
r2 = r8; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto sjmp1; \
exit; \
sjmp1: \
r7 = r0; \
r1 = r6; \
r2 = 8; \
r3 = 0; \
r4 = r10; \
r4 += -24; \
call %[bpf_ringbuf_reserve_dynptr]; \
*(u64 *)(r10 - 16) = r9; \
r1 = r7; \
r2 = 8; \
r3 = 0; \
r4 = r10; \
r4 += -16; \
call %[bpf_dynptr_from_mem]; \
r1 = r10; \
r1 += -512; \
r2 = 488; \
r3 = r10; \
r3 += -24; \
r4 = 0; \
r5 = 0; \
call %[bpf_dynptr_read]; \
r8 = 1; \
if r0 != 0 goto sjmp2; \
r8 = 0; \
sjmp2: \
r1 = r10; \
r1 += -24; \
r2 = 0; \
call %[bpf_ringbuf_discard_dynptr]; "
:
: __imm(bpf_map_update_elem),
__imm(bpf_map_lookup_elem),
__imm(bpf_ringbuf_reserve_dynptr),
__imm(bpf_ringbuf_discard_dynptr),
__imm(bpf_dynptr_from_mem),
__imm(bpf_dynptr_read),
__imm_addr(ringbuf),
__imm_addr(array_map4)
: __clobber_all
);
return 0;
}
/* Test that it is allowed to overwrite unreferenced dynptr. */
SEC("?raw_tp")
__success
int dynptr_overwrite_unref(void *ctx)
{
struct bpf_dynptr ptr;
if (get_map_val_dynptr(&ptr))
return 0;
if (get_map_val_dynptr(&ptr))
return 0;
if (get_map_val_dynptr(&ptr))
return 0;
return 0;
}
/* Test that slices are invalidated on reinitializing a dynptr. */
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
int dynptr_invalidate_slice_reinit(void *ctx)
{
struct bpf_dynptr ptr;
__u8 *p;
if (get_map_val_dynptr(&ptr))
return 0;
p = bpf_dynptr_data(&ptr, 0, 1);
if (!p)
return 0;
if (get_map_val_dynptr(&ptr))
return 0;
/* this should fail */
return *p;
}
/* Invalidation of dynptr slices on destruction of dynptr should not miss
* mem_or_null pointers.
*/
SEC("?raw_tp")
__failure __msg("R1 type=scalar expected=percpu_ptr_")
int dynptr_invalidate_slice_or_null(void *ctx)
{
struct bpf_dynptr ptr;
__u8 *p;
if (get_map_val_dynptr(&ptr))
return 0;
p = bpf_dynptr_data(&ptr, 0, 1);
*(__u8 *)&ptr = 0;
/* this should fail */
bpf_this_cpu_ptr(p);
return 0;
}
/* Destruction of dynptr should also any slices obtained from it */
SEC("?raw_tp")
__failure __msg("R7 invalid mem access 'scalar'")
int dynptr_invalidate_slice_failure(void *ctx)
{
struct bpf_dynptr ptr1;
struct bpf_dynptr ptr2;
__u8 *p1, *p2;
if (get_map_val_dynptr(&ptr1))
return 0;
if (get_map_val_dynptr(&ptr2))
return 0;
p1 = bpf_dynptr_data(&ptr1, 0, 1);
if (!p1)
return 0;
p2 = bpf_dynptr_data(&ptr2, 0, 1);
if (!p2)
return 0;
*(__u8 *)&ptr1 = 0;
/* this should fail */
return *p1;
}
/* Invalidation of slices should be scoped and should not prevent dereferencing
* slices of another dynptr after destroying unrelated dynptr
*/
SEC("?raw_tp")
__success
int dynptr_invalidate_slice_success(void *ctx)
{
struct bpf_dynptr ptr1;
struct bpf_dynptr ptr2;
__u8 *p1, *p2;
if (get_map_val_dynptr(&ptr1))
return 1;
if (get_map_val_dynptr(&ptr2))
return 1;
p1 = bpf_dynptr_data(&ptr1, 0, 1);
if (!p1)
return 1;
p2 = bpf_dynptr_data(&ptr2, 0, 1);
if (!p2)
return 1;
*(__u8 *)&ptr1 = 0;
return *p2;
}
/* Overwriting referenced dynptr should be rejected */
SEC("?raw_tp")
__failure __msg("cannot overwrite referenced dynptr")
int dynptr_overwrite_ref(void *ctx)
{
struct bpf_dynptr ptr;
bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
/* this should fail */
if (get_map_val_dynptr(&ptr))
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
/* Reject writes to dynptr slot from bpf_dynptr_read */
SEC("?raw_tp")
__failure __msg("potential write to dynptr at off=-16")
int dynptr_read_into_slot(void *ctx)
{
union {
struct {
char _pad[48];
struct bpf_dynptr ptr;
};
char buf[64];
} data;
bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &data.ptr);
/* this should fail */
bpf_dynptr_read(data.buf, sizeof(data.buf), &data.ptr, 0, 0);
return 0;
}
/* bpf_dynptr_slice()s are read-only and cannot be written to */
SEC("?tc")
__failure __msg("R0 cannot write into rdonly_mem")
int skb_invalid_slice_write(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
struct ethhdr *hdr;
char buffer[sizeof(*hdr)] = {};
bpf_dynptr_from_skb(skb, 0, &ptr);
hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
if (!hdr)
return SK_DROP;
/* this should fail */
hdr->h_proto = 1;
return SK_PASS;
}
/* The read-only data slice is invalidated whenever a helper changes packet data */
SEC("?tc")
__failure __msg("invalid mem access 'scalar'")
int skb_invalid_data_slice1(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
struct ethhdr *hdr;
char buffer[sizeof(*hdr)] = {};
bpf_dynptr_from_skb(skb, 0, &ptr);
hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
if (!hdr)
return SK_DROP;
val = hdr->h_proto;
if (bpf_skb_pull_data(skb, skb->len))
return SK_DROP;
/* this should fail */
val = hdr->h_proto;
return SK_PASS;
}
/* The read-write data slice is invalidated whenever a helper changes packet data */
SEC("?tc")
__failure __msg("invalid mem access 'scalar'")
int skb_invalid_data_slice2(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
struct ethhdr *hdr;
char buffer[sizeof(*hdr)] = {};
bpf_dynptr_from_skb(skb, 0, &ptr);
hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
if (!hdr)
return SK_DROP;
hdr->h_proto = 123;
if (bpf_skb_pull_data(skb, skb->len))
return SK_DROP;
/* this should fail */
hdr->h_proto = 1;
return SK_PASS;
}
/* The read-only data slice is invalidated whenever bpf_dynptr_write() is called */
SEC("?tc")
__failure __msg("invalid mem access 'scalar'")
int skb_invalid_data_slice3(struct __sk_buff *skb)
{
char write_data[64] = "hello there, world!!";
struct bpf_dynptr ptr;
struct ethhdr *hdr;
char buffer[sizeof(*hdr)] = {};
bpf_dynptr_from_skb(skb, 0, &ptr);
hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
if (!hdr)
return SK_DROP;
val = hdr->h_proto;
bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
/* this should fail */
val = hdr->h_proto;
return SK_PASS;
}
/* The read-write data slice is invalidated whenever bpf_dynptr_write() is called */
SEC("?tc")
__failure __msg("invalid mem access 'scalar'")
int skb_invalid_data_slice4(struct __sk_buff *skb)
{
char write_data[64] = "hello there, world!!";
struct bpf_dynptr ptr;
struct ethhdr *hdr;
char buffer[sizeof(*hdr)] = {};
bpf_dynptr_from_skb(skb, 0, &ptr);
hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
if (!hdr)
return SK_DROP;
hdr->h_proto = 123;
bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
/* this should fail */
hdr->h_proto = 1;
return SK_PASS;
}
/* The read-only data slice is invalidated whenever a helper changes packet data */
SEC("?xdp")
__failure __msg("invalid mem access 'scalar'")
int xdp_invalid_data_slice1(struct xdp_md *xdp)
{
struct bpf_dynptr ptr;
struct ethhdr *hdr;
char buffer[sizeof(*hdr)] = {};
bpf_dynptr_from_xdp(xdp, 0, &ptr);
hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
if (!hdr)
return SK_DROP;
val = hdr->h_proto;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
return XDP_DROP;
/* this should fail */
val = hdr->h_proto;
return XDP_PASS;
}
/* The read-write data slice is invalidated whenever a helper changes packet data */
SEC("?xdp")
__failure __msg("invalid mem access 'scalar'")
int xdp_invalid_data_slice2(struct xdp_md *xdp)
{
struct bpf_dynptr ptr;
struct ethhdr *hdr;
char buffer[sizeof(*hdr)] = {};
bpf_dynptr_from_xdp(xdp, 0, &ptr);
hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
if (!hdr)
return SK_DROP;
hdr->h_proto = 9;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
return XDP_DROP;
/* this should fail */
hdr->h_proto = 1;
return XDP_PASS;
}
/* Only supported prog type can create skb-type dynptrs */
SEC("?raw_tp")
__failure __msg("calling kernel function bpf_dynptr_from_skb is not allowed")
int skb_invalid_ctx(void *ctx)
{
struct bpf_dynptr ptr;
/* this should fail */
bpf_dynptr_from_skb(ctx, 0, &ptr);
return 0;
}
/* Reject writes to dynptr slot for uninit arg */
SEC("?raw_tp")
__failure __msg("potential write to dynptr at off=-16")
int uninit_write_into_slot(void *ctx)
{
struct {
char buf[64];
struct bpf_dynptr ptr;
} data;
bpf_ringbuf_reserve_dynptr(&ringbuf, 80, 0, &data.ptr);
/* this should fail */
bpf_get_current_comm(data.buf, 80);
return 0;
}
/* Only supported prog type can create xdp-type dynptrs */
SEC("?raw_tp")
__failure __msg("calling kernel function bpf_dynptr_from_xdp is not allowed")
int xdp_invalid_ctx(void *ctx)
{
struct bpf_dynptr ptr;
/* this should fail */
bpf_dynptr_from_xdp(ctx, 0, &ptr);
return 0;
}
__u32 hdr_size = sizeof(struct ethhdr);
/* Can't pass in variable-sized len to bpf_dynptr_slice */
SEC("?tc")
__failure __msg("unbounded memory access")
int dynptr_slice_var_len1(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
struct ethhdr *hdr;
char buffer[sizeof(*hdr)] = {};
bpf_dynptr_from_skb(skb, 0, &ptr);
/* this should fail */
hdr = bpf_dynptr_slice(&ptr, 0, buffer, hdr_size);
if (!hdr)
return SK_DROP;
return SK_PASS;
}
/* Can't pass in variable-sized len to bpf_dynptr_slice */
SEC("?tc")
__failure __msg("must be a known constant")
int dynptr_slice_var_len2(struct __sk_buff *skb)
{
char buffer[sizeof(struct ethhdr)] = {};
struct bpf_dynptr ptr;
struct ethhdr *hdr;
bpf_dynptr_from_skb(skb, 0, &ptr);
if (hdr_size <= sizeof(buffer)) {
/* this should fail */
hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, hdr_size);
if (!hdr)
return SK_DROP;
hdr->h_proto = 12;
}
return SK_PASS;
}
static int callback(__u32 index, void *data)
{
*(__u32 *)data = 123;
return 0;
}
/* If the dynptr is written into in a callback function, its data
* slices should be invalidated as well.
*/
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
int invalid_data_slices(void *ctx)
{
struct bpf_dynptr ptr;
__u32 *slice;
if (get_map_val_dynptr(&ptr))
return 0;
slice = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
if (!slice)
return 0;
bpf_loop(10, callback, &ptr, 0);
/* this should fail */
*slice = 1;
return 0;
}
/* Program types that don't allow writes to packet data should fail if
* bpf_dynptr_slice_rdwr is called
*/
SEC("cgroup_skb/ingress")
__failure __msg("the prog does not allow writes to packet data")
int invalid_slice_rdwr_rdonly(struct __sk_buff *skb)
{
char buffer[sizeof(struct ethhdr)] = {};
struct bpf_dynptr ptr;
struct ethhdr *hdr;
bpf_dynptr_from_skb(skb, 0, &ptr);
/* this should fail since cgroup_skb doesn't allow
* changing packet data
*/
hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
__sink(hdr);
return 0;
}
/* bpf_dynptr_adjust can only be called on initialized dynptrs */
SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #1")
int dynptr_adjust_invalid(void *ctx)
{
struct bpf_dynptr ptr;
/* this should fail */
bpf_dynptr_adjust(&ptr, 1, 2);
return 0;
}
/* bpf_dynptr_is_null can only be called on initialized dynptrs */
SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #1")
int dynptr_is_null_invalid(void *ctx)
{
struct bpf_dynptr ptr;
/* this should fail */
bpf_dynptr_is_null(&ptr);
return 0;
}
/* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */
SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #1")
int dynptr_is_rdonly_invalid(void *ctx)
{
struct bpf_dynptr ptr;
/* this should fail */
bpf_dynptr_is_rdonly(&ptr);
return 0;
}
/* bpf_dynptr_size can only be called on initialized dynptrs */
SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #1")
int dynptr_size_invalid(void *ctx)
{
struct bpf_dynptr ptr;
/* this should fail */
bpf_dynptr_size(&ptr);
return 0;
}
/* Only initialized dynptrs can be cloned */
SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #1")
int clone_invalid1(void *ctx)
{
struct bpf_dynptr ptr1;
struct bpf_dynptr ptr2;
/* this should fail */
bpf_dynptr_clone(&ptr1, &ptr2);
return 0;
}
/* Can't overwrite an existing dynptr when cloning */
SEC("?xdp")
__failure __msg("cannot overwrite referenced dynptr")
int clone_invalid2(struct xdp_md *xdp)
{
struct bpf_dynptr ptr1;
struct bpf_dynptr clone;
bpf_dynptr_from_xdp(xdp, 0, &ptr1);
bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone);
/* this should fail */
bpf_dynptr_clone(&ptr1, &clone);
bpf_ringbuf_submit_dynptr(&clone, 0);
return 0;
}
/* Invalidating a dynptr should invalidate its clones */
SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #3")
int clone_invalidate1(void *ctx)
{
struct bpf_dynptr clone;
struct bpf_dynptr ptr;
char read_data[64];
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
bpf_dynptr_clone(&ptr, &clone);
bpf_ringbuf_submit_dynptr(&ptr, 0);
/* this should fail */
bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0);
return 0;
}
/* Invalidating a dynptr should invalidate its parent */
SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #3")
int clone_invalidate2(void *ctx)
{
struct bpf_dynptr ptr;
struct bpf_dynptr clone;
char read_data[64];
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
bpf_dynptr_clone(&ptr, &clone);
bpf_ringbuf_submit_dynptr(&clone, 0);
/* this should fail */
bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
return 0;
}
/* Invalidating a dynptr should invalidate its siblings */
SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #3")
int clone_invalidate3(void *ctx)
{
struct bpf_dynptr ptr;
struct bpf_dynptr clone1;
struct bpf_dynptr clone2;
char read_data[64];
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
bpf_dynptr_clone(&ptr, &clone1);
bpf_dynptr_clone(&ptr, &clone2);
bpf_ringbuf_submit_dynptr(&clone2, 0);
/* this should fail */
bpf_dynptr_read(read_data, sizeof(read_data), &clone1, 0, 0);
return 0;
}
/* Invalidating a dynptr should invalidate any data slices
* of its clones
*/
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
int clone_invalidate4(void *ctx)
{
struct bpf_dynptr ptr;
struct bpf_dynptr clone;
int *data;
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
bpf_dynptr_clone(&ptr, &clone);
data = bpf_dynptr_data(&clone, 0, sizeof(val));
if (!data)
return 0;
bpf_ringbuf_submit_dynptr(&ptr, 0);
/* this should fail */
*data = 123;
return 0;
}
/* Invalidating a dynptr should invalidate any data slices
* of its parent
*/
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
int clone_invalidate5(void *ctx)
{
struct bpf_dynptr ptr;
struct bpf_dynptr clone;
int *data;
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
data = bpf_dynptr_data(&ptr, 0, sizeof(val));
if (!data)
return 0;
bpf_dynptr_clone(&ptr, &clone);
bpf_ringbuf_submit_dynptr(&clone, 0);
/* this should fail */
*data = 123;
return 0;
}
/* Invalidating a dynptr should invalidate any data slices
* of its sibling
*/
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
int clone_invalidate6(void *ctx)
{
struct bpf_dynptr ptr;
struct bpf_dynptr clone1;
struct bpf_dynptr clone2;
int *data;
bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
bpf_dynptr_clone(&ptr, &clone1);
bpf_dynptr_clone(&ptr, &clone2);
data = bpf_dynptr_data(&clone1, 0, sizeof(val));
if (!data)
return 0;
bpf_ringbuf_submit_dynptr(&clone2, 0);
/* this should fail */
*data = 123;
return 0;
}
/* A skb clone's data slices should be invalid anytime packet data changes */
SEC("?tc")
__failure __msg("invalid mem access 'scalar'")
int clone_skb_packet_data(struct __sk_buff *skb)
{
char buffer[sizeof(__u32)] = {};
struct bpf_dynptr clone;
struct bpf_dynptr ptr;
__u32 *data;
bpf_dynptr_from_skb(skb, 0, &ptr);
bpf_dynptr_clone(&ptr, &clone);
data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
if (!data)
return XDP_DROP;
if (bpf_skb_pull_data(skb, skb->len))
return SK_DROP;
/* this should fail */
*data = 123;
return 0;
}
/* A xdp clone's data slices should be invalid anytime packet data changes */
SEC("?xdp")
__failure __msg("invalid mem access 'scalar'")
int clone_xdp_packet_data(struct xdp_md *xdp)
{
char buffer[sizeof(__u32)] = {};
struct bpf_dynptr clone;
struct bpf_dynptr ptr;
struct ethhdr *hdr;
__u32 *data;
bpf_dynptr_from_xdp(xdp, 0, &ptr);
bpf_dynptr_clone(&ptr, &clone);
data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
if (!data)
return XDP_DROP;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
return XDP_DROP;
/* this should fail */
*data = 123;
return 0;
}
/* Buffers that are provided must be sufficiently long */
SEC("?cgroup_skb/egress")
__failure __msg("memory, len pair leads to invalid memory access")
int test_dynptr_skb_small_buff(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
char buffer[8] = {};
__u64 *data;
if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
err = 1;
return 1;
}
/* This may return NULL. SKB may require a buffer */
data = bpf_dynptr_slice(&ptr, 0, buffer, 9);
return !!data;
}
| linux-master | tools/testing/selftests/bpf/progs/dynptr_fail.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022, Oracle and/or its affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
int uprobe_byname_parm1 = 0;
int uprobe_byname_ran = 0;
int uretprobe_byname_rc = 0;
int uretprobe_byname_ret = 0;
int uretprobe_byname_ran = 0;
u64 uprobe_byname2_parm1 = 0;
int uprobe_byname2_ran = 0;
u64 uretprobe_byname2_rc = 0;
int uretprobe_byname2_ran = 0;
int test_pid;
int a[8];
/* This program cannot auto-attach, but that should not stop other
* programs from attaching.
*/
SEC("uprobe")
int handle_uprobe_noautoattach(struct pt_regs *ctx)
{
return 0;
}
SEC("uprobe//proc/self/exe:autoattach_trigger_func")
int BPF_UPROBE(handle_uprobe_byname
, int arg1
, int arg2
, int arg3
#if FUNC_REG_ARG_CNT > 3
, int arg4
#endif
#if FUNC_REG_ARG_CNT > 4
, int arg5
#endif
#if FUNC_REG_ARG_CNT > 5
, int arg6
#endif
#if FUNC_REG_ARG_CNT > 6
, int arg7
#endif
#if FUNC_REG_ARG_CNT > 7
, int arg8
#endif
)
{
uprobe_byname_parm1 = PT_REGS_PARM1_CORE(ctx);
uprobe_byname_ran = 1;
a[0] = arg1;
a[1] = arg2;
a[2] = arg3;
#if FUNC_REG_ARG_CNT > 3
a[3] = arg4;
#endif
#if FUNC_REG_ARG_CNT > 4
a[4] = arg5;
#endif
#if FUNC_REG_ARG_CNT > 5
a[5] = arg6;
#endif
#if FUNC_REG_ARG_CNT > 6
a[6] = arg7;
#endif
#if FUNC_REG_ARG_CNT > 7
a[7] = arg8;
#endif
return 0;
}
SEC("uretprobe//proc/self/exe:autoattach_trigger_func")
int BPF_URETPROBE(handle_uretprobe_byname, int ret)
{
uretprobe_byname_rc = PT_REGS_RC_CORE(ctx);
uretprobe_byname_ret = ret;
uretprobe_byname_ran = 2;
return 0;
}
SEC("uprobe/libc.so.6:fopen")
int BPF_UPROBE(handle_uprobe_byname2, const char *pathname, const char *mode)
{
int pid = bpf_get_current_pid_tgid() >> 32;
/* ignore irrelevant invocations */
if (test_pid != pid)
return 0;
uprobe_byname2_parm1 = (u64)(long)pathname;
uprobe_byname2_ran = 3;
return 0;
}
SEC("uretprobe/libc.so.6:fopen")
int BPF_URETPROBE(handle_uretprobe_byname2, void *ret)
{
int pid = bpf_get_current_pid_tgid() >> 32;
/* ignore irrelevant invocations */
if (test_pid != pid)
return 0;
uretprobe_byname2_rc = (u64)(long)ret;
uretprobe_byname2_ran = 4;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c |
#include "core_reloc_types.h"
void f(struct core_reloc_enumval___diff x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___diff.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
// Copyright (c) 2019 Cloudflare
#include <string.h>
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <sys/socket.h>
#include <linux/tcp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 3);
} results SEC(".maps");
static __always_inline __s64 gen_syncookie(void *data_end, struct bpf_sock *sk,
void *iph, __u32 ip_size,
struct tcphdr *tcph)
{
__u32 thlen = tcph->doff * 4;
if (tcph->syn && !tcph->ack) {
// packet should only have an MSS option
if (thlen != 24)
return 0;
if ((void *)tcph + thlen > data_end)
return 0;
return bpf_tcp_gen_syncookie(sk, iph, ip_size, tcph, thlen);
}
return 0;
}
static __always_inline void check_syncookie(void *ctx, void *data,
void *data_end)
{
struct bpf_sock_tuple tup;
struct bpf_sock *sk;
struct ethhdr *ethh;
struct iphdr *ipv4h;
struct ipv6hdr *ipv6h;
struct tcphdr *tcph;
int ret;
__u32 key_mss = 2;
__u32 key_gen = 1;
__u32 key = 0;
__s64 seq_mss;
ethh = data;
if (ethh + 1 > data_end)
return;
switch (bpf_ntohs(ethh->h_proto)) {
case ETH_P_IP:
ipv4h = data + sizeof(struct ethhdr);
if (ipv4h + 1 > data_end)
return;
if (ipv4h->ihl != 5)
return;
tcph = data + sizeof(struct ethhdr) + sizeof(struct iphdr);
if (tcph + 1 > data_end)
return;
tup.ipv4.saddr = ipv4h->saddr;
tup.ipv4.daddr = ipv4h->daddr;
tup.ipv4.sport = tcph->source;
tup.ipv4.dport = tcph->dest;
sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv4),
BPF_F_CURRENT_NETNS, 0);
if (!sk)
return;
if (sk->state != BPF_TCP_LISTEN)
goto release;
seq_mss = gen_syncookie(data_end, sk, ipv4h, sizeof(*ipv4h),
tcph);
ret = bpf_tcp_check_syncookie(sk, ipv4h, sizeof(*ipv4h),
tcph, sizeof(*tcph));
break;
case ETH_P_IPV6:
ipv6h = data + sizeof(struct ethhdr);
if (ipv6h + 1 > data_end)
return;
if (ipv6h->nexthdr != IPPROTO_TCP)
return;
tcph = data + sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
if (tcph + 1 > data_end)
return;
memcpy(tup.ipv6.saddr, &ipv6h->saddr, sizeof(tup.ipv6.saddr));
memcpy(tup.ipv6.daddr, &ipv6h->daddr, sizeof(tup.ipv6.daddr));
tup.ipv6.sport = tcph->source;
tup.ipv6.dport = tcph->dest;
sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv6),
BPF_F_CURRENT_NETNS, 0);
if (!sk)
return;
if (sk->state != BPF_TCP_LISTEN)
goto release;
seq_mss = gen_syncookie(data_end, sk, ipv6h, sizeof(*ipv6h),
tcph);
ret = bpf_tcp_check_syncookie(sk, ipv6h, sizeof(*ipv6h),
tcph, sizeof(*tcph));
break;
default:
return;
}
if (seq_mss > 0) {
__u32 cookie = (__u32)seq_mss;
__u32 mss = seq_mss >> 32;
bpf_map_update_elem(&results, &key_gen, &cookie, 0);
bpf_map_update_elem(&results, &key_mss, &mss, 0);
}
if (ret == 0) {
__u32 cookie = bpf_ntohl(tcph->ack_seq) - 1;
bpf_map_update_elem(&results, &key, &cookie, 0);
}
release:
bpf_sk_release(sk);
}
SEC("tc")
int check_syncookie_clsact(struct __sk_buff *skb)
{
check_syncookie(skb, (void *)(long)skb->data,
(void *)(long)skb->data_end);
return TC_ACT_OK;
}
SEC("xdp")
int check_syncookie_xdp(struct xdp_md *ctx)
{
check_syncookie(ctx, (void *)(long)ctx->data,
(void *)(long)ctx->data_end);
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Bytedance */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
__u64 percpu_array_elem_sum = 0;
__u64 percpu_hash_elem_sum = 0;
__u64 percpu_lru_hash_elem_sum = 0;
const volatile int nr_cpus;
const volatile int my_pid;
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} percpu_array_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, 1);
__type(key, __u64);
__type(value, __u64);
} percpu_hash_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
__uint(max_entries, 1);
__type(key, __u64);
__type(value, __u64);
} percpu_lru_hash_map SEC(".maps");
struct read_percpu_elem_ctx {
void *map;
__u64 sum;
};
static int read_percpu_elem_callback(__u32 index, struct read_percpu_elem_ctx *ctx)
{
__u64 key = 0;
__u64 *value;
value = bpf_map_lookup_percpu_elem(ctx->map, &key, index);
if (value)
ctx->sum += *value;
return 0;
}
SEC("tp/syscalls/sys_enter_getuid")
int sysenter_getuid(const void *ctx)
{
struct read_percpu_elem_ctx map_ctx;
if (my_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
map_ctx.map = &percpu_array_map;
map_ctx.sum = 0;
bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
percpu_array_elem_sum = map_ctx.sum;
map_ctx.map = &percpu_hash_map;
map_ctx.sum = 0;
bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
percpu_hash_elem_sum = map_ctx.sum;
map_ctx.map = &percpu_lru_hash_map;
map_ctx.sum = 0;
bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
percpu_lru_hash_elem_sum = map_ctx.sum;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct Small {
long x;
};
struct Big {
long x;
long y;
};
__noinline int foo(const struct Big *big)
{
if (!big)
return 0;
return bpf_get_prandom_u32() < big->y;
}
SEC("cgroup_skb/ingress")
__failure __msg("invalid indirect access to stack")
int global_func10(struct __sk_buff *skb)
{
const struct Small small = {.x = skb->len };
return foo((struct Big *)&small) ? 1 : 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func10.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
__noinline
int test_ctx_global_func(struct __sk_buff *skb)
{
volatile int retval = 1;
return retval;
}
SEC("freplace/test_pkt_access")
int new_test_pkt_access(struct __sk_buff *skb)
{
return test_ctx_global_func(skb);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/freplace_global_func.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_devmap_val));
__uint(max_entries, 4);
} dm_ports SEC(".maps");
SEC("xdp")
int xdp_redir_prog(struct xdp_md *ctx)
{
return bpf_redirect_map(&dm_ports, 1, 0);
}
/* invalid program on DEVMAP entry;
* SEC name means expected attach type not set
*/
SEC("xdp")
int xdp_dummy_prog(struct xdp_md *ctx)
{
return XDP_PASS;
}
/* valid program on DEVMAP entry via SEC name;
* has access to egress and ingress ifindex
*/
SEC("xdp/devmap")
int xdp_dummy_dm(struct xdp_md *ctx)
{
char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
unsigned int len = data_end - data;
bpf_trace_printk(fmt, sizeof(fmt),
ctx->ingress_ifindex, ctx->egress_ifindex, len);
return XDP_PASS;
}
SEC("xdp.frags/devmap")
int xdp_dummy_dm_frags(struct xdp_md *ctx)
{
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c |
#include "core_reloc_types.h"
void f(struct core_reloc_size___diff_offs x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <linux/in6.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf_sockopt_helpers.h>
#define SERV6_IP_0 0xfaceb00c /* face:b00c:1234:5678::abcd */
#define SERV6_IP_1 0x12345678
#define SERV6_IP_2 0x00000000
#define SERV6_IP_3 0x0000abcd
#define SERV6_PORT 6060
SEC("cgroup/recvmsg6")
int recvmsg6_prog(struct bpf_sock_addr *ctx)
{
struct bpf_sock *sk;
sk = ctx->sk;
if (!sk)
return 1;
if (sk->family != AF_INET6)
return 1;
if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
return 1;
if (!get_set_sk_priority(ctx))
return 1;
ctx->user_ip6[0] = bpf_htonl(SERV6_IP_0);
ctx->user_ip6[1] = bpf_htonl(SERV6_IP_1);
ctx->user_ip6[2] = bpf_htonl(SERV6_IP_2);
ctx->user_ip6[3] = bpf_htonl(SERV6_IP_3);
ctx->user_port = bpf_htons(SERV6_PORT);
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/recvmsg6_prog.c |
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
__type(key, int);
__type(value, int);
} sock_map_rx SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
__type(key, int);
__type(value, int);
} sock_map_tx SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
__type(key, int);
__type(value, int);
} sock_map_msg SEC(".maps");
SEC("sk_skb")
int prog_skb_verdict(struct __sk_buff *skb)
{
return SK_DROP;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c |
#include "core_reloc_types.h"
void f(struct core_reloc_nesting___err_array_field x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting___err_array_field.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("xdp")
__description("XDP pkt read, pkt_end mangling, bad access 1")
__failure __msg("R3 pointer arithmetic on pkt_end")
__naked void end_mangling_bad_access_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
r3 += 8; \
if r1 > r3 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end mangling, bad access 2")
__failure __msg("R3 pointer arithmetic on pkt_end")
__naked void end_mangling_bad_access_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
r3 -= 8; \
if r1 > r3 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' > pkt_end, corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void end_corner_case_good_access_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 > r3 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' > pkt_end, bad access 1")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_end_bad_access_1_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 > r3 goto l0_%=; \
r0 = *(u64*)(r1 - 4); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' > pkt_end, bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_end_bad_access_2_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 > r3 goto l0_%=; \
l0_%=: r0 = *(u64*)(r1 - 8); \
r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' > pkt_end, corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 9; \
if r1 > r3 goto l0_%=; \
r0 = *(u64*)(r1 - 9); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 7; \
if r1 > r3 goto l0_%=; \
r0 = *(u64*)(r1 - 7); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end > pkt_data', good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void end_pkt_data_good_access_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r3 > r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u32*)(r1 - 5); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end > pkt_data', corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 6; \
if r3 > r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 6); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end > pkt_data', bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_2_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r3 > r1 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end > pkt_data', corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void data_corner_case_good_access_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 7; \
if r3 > r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 7); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end > pkt_data', corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r3 > r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' < pkt_end, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void data_pkt_end_good_access_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 < r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u32*)(r1 - 5); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_3(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 6; \
if r1 < r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 6); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' < pkt_end, bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_end_bad_access_2_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 < r3 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' < pkt_end, corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void end_corner_case_good_access_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 7; \
if r1 < r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 7); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' < pkt_end, corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_3(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 < r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end < pkt_data', corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void data_corner_case_good_access_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r3 < r1 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end < pkt_data', bad access 1")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_1_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r3 < r1 goto l0_%=; \
r0 = *(u64*)(r1 - 4); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end < pkt_data', bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_2_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r3 < r1 goto l0_%=; \
l0_%=: r0 = *(u64*)(r1 - 8); \
r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end < pkt_data', corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_4(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 9; \
if r3 < r1 goto l0_%=; \
r0 = *(u64*)(r1 - 9); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end < pkt_data', corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_4(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 7; \
if r3 < r1 goto l0_%=; \
r0 = *(u64*)(r1 - 7); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' >= pkt_end, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void data_pkt_end_good_access_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 >= r3 goto l0_%=; \
r0 = *(u32*)(r1 - 5); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_5(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 6; \
if r1 >= r3 goto l0_%=; \
r0 = *(u64*)(r1 - 6); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' >= pkt_end, bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_end_bad_access_2_3(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 >= r3 goto l0_%=; \
l0_%=: r0 = *(u32*)(r1 - 5); \
r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' >= pkt_end, corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void end_corner_case_good_access_3(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 7; \
if r1 >= r3 goto l0_%=; \
r0 = *(u64*)(r1 - 7); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_5(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 >= r3 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end >= pkt_data', corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void data_corner_case_good_access_3(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r3 >= r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end >= pkt_data', bad access 1")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_1_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r3 >= r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 4); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end >= pkt_data', bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_2_3(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r3 >= r1 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end >= pkt_data', corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_6(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 9; \
if r3 >= r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 9); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_6(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 7; \
if r3 >= r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 7); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' <= pkt_end, corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void end_corner_case_good_access_4(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 <= r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' <= pkt_end, bad access 1")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_end_bad_access_1_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 <= r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 4); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' <= pkt_end, bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_end_bad_access_2_4(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r1 <= r3 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_7(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 9; \
if r1 <= r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 9); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_7(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 7; \
if r1 <= r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 7); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end <= pkt_data', good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void end_pkt_data_good_access_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r3 <= r1 goto l0_%=; \
r0 = *(u32*)(r1 - 5); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_8(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 6; \
if r3 <= r1 goto l0_%=; \
r0 = *(u64*)(r1 - 6); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end <= pkt_data', bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_2_4(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r3 <= r1 goto l0_%=; \
l0_%=: r0 = *(u32*)(r1 - 5); \
r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end <= pkt_data', corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void data_corner_case_good_access_4(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 7; \
if r3 <= r1 goto l0_%=; \
r0 = *(u64*)(r1 - 7); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_end <= pkt_data', corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_8(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data]); \
r3 = *(u32*)(r1 + %[xdp_md_data_end]); \
r1 = r2; \
r1 += 8; \
if r3 <= r1 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' > pkt_data, corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void data_corner_case_good_access_5(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r1 > r3 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' > pkt_data, bad access 1")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_1_3(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r1 > r3 goto l0_%=; \
r0 = *(u64*)(r1 - 4); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' > pkt_data, bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_2_5(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r1 > r3 goto l0_%=; \
l0_%=: r0 = *(u64*)(r1 - 8); \
r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_9(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 9; \
if r1 > r3 goto l0_%=; \
r0 = *(u64*)(r1 - 9); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_9(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 7; \
if r1 > r3 goto l0_%=; \
r0 = *(u64*)(r1 - 7); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data > pkt_meta', good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void data_pkt_meta_good_access_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r3 > r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u32*)(r1 - 5); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_10(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 6; \
if r3 > r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 6); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data > pkt_meta', bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_meta_bad_access_2_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r3 > r1 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data > pkt_meta', corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void meta_corner_case_good_access_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 7; \
if r3 > r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 7); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data > pkt_meta', corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_10(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r3 > r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' < pkt_data, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void meta_pkt_data_good_access_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r1 < r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u32*)(r1 - 5); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_11(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 6; \
if r1 < r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 6); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' < pkt_data, bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_2_6(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r1 < r3 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' < pkt_data, corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void data_corner_case_good_access_6(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 7; \
if r1 < r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 7); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_11(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r1 < r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data < pkt_meta', corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void meta_corner_case_good_access_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r3 < r1 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data < pkt_meta', bad access 1")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_meta_bad_access_1_1(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r3 < r1 goto l0_%=; \
r0 = *(u64*)(r1 - 4); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data < pkt_meta', bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_meta_bad_access_2_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r3 < r1 goto l0_%=; \
l0_%=: r0 = *(u64*)(r1 - 8); \
r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data < pkt_meta', corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_12(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 9; \
if r3 < r1 goto l0_%=; \
r0 = *(u64*)(r1 - 9); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_12(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 7; \
if r3 < r1 goto l0_%=; \
r0 = *(u64*)(r1 - 7); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' >= pkt_data, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void meta_pkt_data_good_access_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r1 >= r3 goto l0_%=; \
r0 = *(u32*)(r1 - 5); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_13(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 6; \
if r1 >= r3 goto l0_%=; \
r0 = *(u64*)(r1 - 6); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' >= pkt_data, bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_2_7(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r1 >= r3 goto l0_%=; \
l0_%=: r0 = *(u32*)(r1 - 5); \
r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' >= pkt_data, corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void data_corner_case_good_access_7(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 7; \
if r1 >= r3 goto l0_%=; \
r0 = *(u64*)(r1 - 7); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_13(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r1 >= r3 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data >= pkt_meta', corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void meta_corner_case_good_access_3(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r3 >= r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data >= pkt_meta', bad access 1")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_meta_bad_access_1_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r3 >= r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 4); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data >= pkt_meta', bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_meta_bad_access_2_3(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r3 >= r1 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_14(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 9; \
if r3 >= r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 9); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_14(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 7; \
if r3 >= r1 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 7); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' <= pkt_data, corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void data_corner_case_good_access_8(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r1 <= r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 8); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' <= pkt_data, bad access 1")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_1_4(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r1 <= r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 4); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' <= pkt_data, bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_data_bad_access_2_8(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r1 <= r3 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_15(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 9; \
if r1 <= r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 9); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_15(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 7; \
if r1 <= r3 goto l0_%=; \
goto l1_%=; \
l0_%=: r0 = *(u64*)(r1 - 7); \
l1_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data <= pkt_meta', good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void data_pkt_meta_good_access_2(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r3 <= r1 goto l0_%=; \
r0 = *(u32*)(r1 - 5); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_bad_access_16(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 6; \
if r3 <= r1 goto l0_%=; \
r0 = *(u64*)(r1 - 6); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data <= pkt_meta', bad access 2")
__failure __msg("R1 offset is outside of the packet")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void pkt_meta_bad_access_2_4(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r3 <= r1 goto l0_%=; \
l0_%=: r0 = *(u32*)(r1 - 5); \
r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data <= pkt_meta', corner case, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void meta_corner_case_good_access_4(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 7; \
if r3 <= r1 goto l0_%=; \
r0 = *(u64*)(r1 - 7); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
SEC("xdp")
__description("XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access")
__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void corner_case_1_good_access_16(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \
r3 = *(u32*)(r1 + %[xdp_md_data]); \
r1 = r2; \
r1 += 8; \
if r3 <= r1 goto l0_%=; \
r0 = *(u64*)(r1 - 8); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
__imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022. Huawei Technologies Co., Ltd */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
extern bool CONFIG_PREEMPT __kconfig __weak;
extern const int bpf_task_storage_busy __ksym;
char _license[] SEC("license") = "GPL";
int pid = 0;
int busy = 0;
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, long);
} task SEC(".maps");
SEC("raw_tp/sys_enter")
int BPF_PROG(read_bpf_task_storage_busy)
{
int *value;
if (!CONFIG_PREEMPT)
return 0;
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
value = bpf_this_cpu_ptr(&bpf_task_storage_busy);
if (value)
busy = *value;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Intel */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "xsk_xdp_metadata.h"
struct {
__uint(type, BPF_MAP_TYPE_XSKMAP);
__uint(max_entries, 1);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
} xsk SEC(".maps");
static unsigned int idx;
int count = 0;
SEC("xdp.frags") int xsk_def_prog(struct xdp_md *xdp)
{
return bpf_redirect_map(&xsk, 0, XDP_DROP);
}
SEC("xdp.frags") int xsk_xdp_drop(struct xdp_md *xdp)
{
/* Drop every other packet */
if (idx++ % 2)
return XDP_DROP;
return bpf_redirect_map(&xsk, 0, XDP_DROP);
}
SEC("xdp.frags") int xsk_xdp_populate_metadata(struct xdp_md *xdp)
{
void *data, *data_meta;
struct xdp_info *meta;
int err;
/* Reserve enough for all custom metadata. */
err = bpf_xdp_adjust_meta(xdp, -(int)sizeof(struct xdp_info));
if (err)
return XDP_DROP;
data = (void *)(long)xdp->data;
data_meta = (void *)(long)xdp->data_meta;
if (data_meta + sizeof(struct xdp_info) > data)
return XDP_DROP;
meta = data_meta;
meta->count = count++;
return bpf_redirect_map(&xsk, 0, XDP_DROP);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xsk_xdp_progs.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "task_kfunc_common.h"
char _license[] SEC("license") = "GPL";
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(task_newtask,
* TP_PROTO(struct task_struct *p, u64 clone_flags)
*/
static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *task)
{
int status;
status = tasks_kfunc_map_insert(task);
if (status)
return NULL;
return tasks_kfunc_map_value_lookup(task);
}
SEC("tp_btf/task_newtask")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
struct __tasks_kfunc_map_value *v;
v = insert_lookup_task(task);
if (!v)
return 0;
/* Can't invoke bpf_task_acquire() on an untrusted pointer. */
acquired = bpf_task_acquire(v->task);
if (!acquired)
return 0;
bpf_task_release(acquired);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("arg#0 pointer type STRUCT task_struct must point")
int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags;
/* Can't invoke bpf_task_acquire() on a random frame pointer. */
acquired = bpf_task_acquire((struct task_struct *)&stack_task);
if (!acquired)
return 0;
bpf_task_release(acquired);
return 0;
}
SEC("kretprobe/free_task")
__failure __msg("calling kernel function bpf_task_acquire is not allowed")
int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
/* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */
acquired = bpf_task_acquire(task);
if (!acquired)
return 0;
bpf_task_release(acquired);
return 0;
}
SEC("kretprobe/free_task")
__failure __msg("calling kernel function bpf_task_acquire is not allowed")
int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe_rcu, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
bpf_rcu_read_lock();
if (!task) {
bpf_rcu_read_unlock();
return 0;
}
/* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */
acquired = bpf_task_acquire(task);
if (acquired)
bpf_task_release(acquired);
bpf_rcu_read_unlock();
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
/* Can't invoke bpf_task_acquire() on a NULL pointer. */
acquired = bpf_task_acquire(NULL);
if (!acquired)
return 0;
bpf_task_release(acquired);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("Unreleased reference")
int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
acquired = bpf_task_acquire(task);
/* Acquired task is never released. */
__sink(acquired);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("Unreleased reference")
int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags)
{
struct task_struct *kptr;
struct __tasks_kfunc_map_value *v;
v = insert_lookup_task(task);
if (!v)
return 0;
kptr = bpf_kptr_xchg(&v->task, NULL);
if (!kptr)
return 0;
/* Kptr retrieved from map is never released. */
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(task_kfunc_acquire_release_no_null_check, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
acquired = bpf_task_acquire(task);
/* Can't invoke bpf_task_release() on an acquired task without a NULL check. */
bpf_task_release(acquired);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags)
{
struct __tasks_kfunc_map_value *v;
v = insert_lookup_task(task);
if (!v)
return 0;
/* Can't invoke bpf_task_release() on an untrusted pointer. */
bpf_task_release(v->task);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("arg#0 pointer type STRUCT task_struct must point")
int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired = (struct task_struct *)&clone_flags;
/* Cannot release random frame pointer. */
bpf_task_release(acquired);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags)
{
struct __tasks_kfunc_map_value local, *v;
long status;
struct task_struct *acquired, *old;
s32 pid;
status = bpf_probe_read_kernel(&pid, sizeof(pid), &task->pid);
if (status)
return 0;
local.task = NULL;
status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST);
if (status)
return status;
v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid);
if (!v)
return -ENOENT;
acquired = bpf_task_acquire(task);
if (!acquired)
return -EEXIST;
old = bpf_kptr_xchg(&v->task, acquired);
/* old cannot be passed to bpf_task_release() without a NULL check. */
bpf_task_release(old);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("release kernel function bpf_task_release expects")
int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags)
{
/* Cannot release trusted task pointer which was not acquired. */
bpf_task_release(task);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
acquired = bpf_task_from_pid(task->pid);
/* Releasing bpf_task_from_pid() lookup without a NULL check. */
bpf_task_release(acquired);
return 0;
}
SEC("lsm/task_free")
__failure __msg("reg type unsupported for arg#0 function")
int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task)
{
struct task_struct *acquired;
/* the argument of lsm task_free hook is untrusted. */
acquired = bpf_task_acquire(task);
if (!acquired)
return 0;
bpf_task_release(acquired);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("access beyond the end of member comm")
int BPF_PROG(task_access_comm1, struct task_struct *task, u64 clone_flags)
{
bpf_strncmp(task->comm, 17, "foo");
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("access beyond the end of member comm")
int BPF_PROG(task_access_comm2, struct task_struct *task, u64 clone_flags)
{
bpf_strncmp(task->comm + 1, 16, "foo");
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("write into memory")
int BPF_PROG(task_access_comm3, struct task_struct *task, u64 clone_flags)
{
bpf_probe_read_kernel(task->comm, 16, task->comm);
return 0;
}
SEC("fentry/__set_task_comm")
__failure __msg("R1 type=ptr_ expected")
int BPF_PROG(task_access_comm4, struct task_struct *task, const char *buf, bool exec)
{
/*
* task->comm is a legacy ptr_to_btf_id. The verifier cannot guarantee
* its safety. Hence it cannot be accessed with normal load insns.
*/
bpf_strncmp(task->comm, 16, "foo");
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("R1 must be referenced or trusted")
int BPF_PROG(task_kfunc_release_in_map, struct task_struct *task, u64 clone_flags)
{
struct task_struct *local;
struct __tasks_kfunc_map_value *v;
if (tasks_kfunc_map_insert(task))
return 0;
v = tasks_kfunc_map_value_lookup(task);
if (!v)
return 0;
bpf_rcu_read_lock();
local = v->task;
if (!local) {
bpf_rcu_read_unlock();
return 0;
}
/* Can't release a kptr that's still stored in a map. */
bpf_task_release(local);
bpf_rcu_read_unlock();
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/task_kfunc_failure.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
static long stack[256];
/*
* KPROBE contexts
*/
__weak int kprobe_typedef_ctx_subprog(bpf_user_pt_regs_t *ctx)
{
return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
}
SEC("?kprobe")
__success
int kprobe_typedef_ctx(void *ctx)
{
return kprobe_typedef_ctx_subprog(ctx);
}
#define pt_regs_struct_t typeof(*(__PT_REGS_CAST((struct pt_regs *)NULL)))
__weak int kprobe_struct_ctx_subprog(pt_regs_struct_t *ctx)
{
return bpf_get_stack((void *)ctx, &stack, sizeof(stack), 0);
}
SEC("?kprobe")
__success
int kprobe_resolved_ctx(void *ctx)
{
return kprobe_struct_ctx_subprog(ctx);
}
/* this is current hack to make this work on old kernels */
struct bpf_user_pt_regs_t {};
__weak int kprobe_workaround_ctx_subprog(struct bpf_user_pt_regs_t *ctx)
{
return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
}
SEC("?kprobe")
__success
int kprobe_workaround_ctx(void *ctx)
{
return kprobe_workaround_ctx_subprog(ctx);
}
/*
* RAW_TRACEPOINT contexts
*/
__weak int raw_tp_ctx_subprog(struct bpf_raw_tracepoint_args *ctx)
{
return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
}
SEC("?raw_tp")
__success
int raw_tp_ctx(void *ctx)
{
return raw_tp_ctx_subprog(ctx);
}
/*
* RAW_TRACEPOINT_WRITABLE contexts
*/
__weak int raw_tp_writable_ctx_subprog(struct bpf_raw_tracepoint_args *ctx)
{
return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
}
SEC("?raw_tp")
__success
int raw_tp_writable_ctx(void *ctx)
{
return raw_tp_writable_ctx_subprog(ctx);
}
/*
* PERF_EVENT contexts
*/
__weak int perf_event_ctx_subprog(struct bpf_perf_event_data *ctx)
{
return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
}
SEC("?perf_event")
__success
int perf_event_ctx(void *ctx)
{
return perf_event_ctx_subprog(ctx);
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_experimental.h"
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
#include "linked_list.h"
static __always_inline
int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
{
struct bpf_list_node *n;
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 2;
bpf_spin_lock(lock);
n = bpf_list_pop_front(head);
bpf_spin_unlock(lock);
if (n) {
bpf_obj_drop(container_of(n, struct foo, node2));
bpf_obj_drop(f);
return 3;
}
bpf_spin_lock(lock);
n = bpf_list_pop_back(head);
bpf_spin_unlock(lock);
if (n) {
bpf_obj_drop(container_of(n, struct foo, node2));
bpf_obj_drop(f);
return 4;
}
bpf_spin_lock(lock);
f->data = 42;
bpf_list_push_front(head, &f->node2);
bpf_spin_unlock(lock);
if (leave_in_map)
return 0;
bpf_spin_lock(lock);
n = bpf_list_pop_back(head);
bpf_spin_unlock(lock);
if (!n)
return 5;
f = container_of(n, struct foo, node2);
if (f->data != 42) {
bpf_obj_drop(f);
return 6;
}
bpf_spin_lock(lock);
f->data = 13;
bpf_list_push_front(head, &f->node2);
bpf_spin_unlock(lock);
bpf_spin_lock(lock);
n = bpf_list_pop_front(head);
bpf_spin_unlock(lock);
if (!n)
return 7;
f = container_of(n, struct foo, node2);
if (f->data != 13) {
bpf_obj_drop(f);
return 8;
}
bpf_obj_drop(f);
bpf_spin_lock(lock);
n = bpf_list_pop_front(head);
bpf_spin_unlock(lock);
if (n) {
bpf_obj_drop(container_of(n, struct foo, node2));
return 9;
}
bpf_spin_lock(lock);
n = bpf_list_pop_back(head);
bpf_spin_unlock(lock);
if (n) {
bpf_obj_drop(container_of(n, struct foo, node2));
return 10;
}
return 0;
}
static __always_inline
int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
{
struct bpf_list_node *n;
struct foo *f[200], *pf;
int i;
/* Loop following this check adds nodes 2-at-a-time in order to
* validate multiple release_on_unlock release logic
*/
if (ARRAY_SIZE(f) % 2)
return 10;
for (i = 0; i < ARRAY_SIZE(f); i += 2) {
f[i] = bpf_obj_new(typeof(**f));
if (!f[i])
return 2;
f[i]->data = i;
f[i + 1] = bpf_obj_new(typeof(**f));
if (!f[i + 1]) {
bpf_obj_drop(f[i]);
return 9;
}
f[i + 1]->data = i + 1;
bpf_spin_lock(lock);
bpf_list_push_front(head, &f[i]->node2);
bpf_list_push_front(head, &f[i + 1]->node2);
bpf_spin_unlock(lock);
}
for (i = 0; i < ARRAY_SIZE(f); i++) {
bpf_spin_lock(lock);
n = bpf_list_pop_front(head);
bpf_spin_unlock(lock);
if (!n)
return 3;
pf = container_of(n, struct foo, node2);
if (pf->data != (ARRAY_SIZE(f) - i - 1)) {
bpf_obj_drop(pf);
return 4;
}
bpf_spin_lock(lock);
bpf_list_push_back(head, &pf->node2);
bpf_spin_unlock(lock);
}
if (leave_in_map)
return 0;
for (i = 0; i < ARRAY_SIZE(f); i++) {
bpf_spin_lock(lock);
n = bpf_list_pop_back(head);
bpf_spin_unlock(lock);
if (!n)
return 5;
pf = container_of(n, struct foo, node2);
if (pf->data != i) {
bpf_obj_drop(pf);
return 6;
}
bpf_obj_drop(pf);
}
bpf_spin_lock(lock);
n = bpf_list_pop_back(head);
bpf_spin_unlock(lock);
if (n) {
bpf_obj_drop(container_of(n, struct foo, node2));
return 7;
}
bpf_spin_lock(lock);
n = bpf_list_pop_front(head);
bpf_spin_unlock(lock);
if (n) {
bpf_obj_drop(container_of(n, struct foo, node2));
return 8;
}
return 0;
}
static __always_inline
int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
{
struct bpf_list_node *n;
struct bar *ba[8], *b;
struct foo *f;
int i;
f = bpf_obj_new(typeof(*f));
if (!f)
return 2;
for (i = 0; i < ARRAY_SIZE(ba); i++) {
b = bpf_obj_new(typeof(*b));
if (!b) {
bpf_obj_drop(f);
return 3;
}
b->data = i;
bpf_spin_lock(&f->lock);
bpf_list_push_back(&f->head, &b->node);
bpf_spin_unlock(&f->lock);
}
bpf_spin_lock(lock);
f->data = 42;
bpf_list_push_front(head, &f->node2);
bpf_spin_unlock(lock);
if (leave_in_map)
return 0;
bpf_spin_lock(lock);
n = bpf_list_pop_front(head);
bpf_spin_unlock(lock);
if (!n)
return 4;
f = container_of(n, struct foo, node2);
if (f->data != 42) {
bpf_obj_drop(f);
return 5;
}
for (i = 0; i < ARRAY_SIZE(ba); i++) {
bpf_spin_lock(&f->lock);
n = bpf_list_pop_front(&f->head);
bpf_spin_unlock(&f->lock);
if (!n) {
bpf_obj_drop(f);
return 6;
}
b = container_of(n, struct bar, node);
if (b->data != i) {
bpf_obj_drop(f);
bpf_obj_drop(b);
return 7;
}
bpf_obj_drop(b);
}
bpf_spin_lock(&f->lock);
n = bpf_list_pop_front(&f->head);
bpf_spin_unlock(&f->lock);
if (n) {
bpf_obj_drop(f);
bpf_obj_drop(container_of(n, struct bar, node));
return 8;
}
bpf_obj_drop(f);
return 0;
}
static __always_inline
int test_list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head)
{
int ret;
ret = list_push_pop(lock, head, false);
if (ret)
return ret;
return list_push_pop(lock, head, true);
}
static __always_inline
int test_list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head)
{
int ret;
ret = list_push_pop_multiple(lock, head, false);
if (ret)
return ret;
return list_push_pop_multiple(lock, head, true);
}
static __always_inline
int test_list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head)
{
int ret;
ret = list_in_list(lock, head, false);
if (ret)
return ret;
return list_in_list(lock, head, true);
}
SEC("tc")
int map_list_push_pop(void *ctx)
{
struct map_value *v;
v = bpf_map_lookup_elem(&array_map, &(int){0});
if (!v)
return 1;
return test_list_push_pop(&v->lock, &v->head);
}
SEC("tc")
int inner_map_list_push_pop(void *ctx)
{
struct map_value *v;
void *map;
map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
if (!map)
return 1;
v = bpf_map_lookup_elem(map, &(int){0});
if (!v)
return 1;
return test_list_push_pop(&v->lock, &v->head);
}
SEC("tc")
int global_list_push_pop(void *ctx)
{
return test_list_push_pop(&glock, &ghead);
}
SEC("tc")
int map_list_push_pop_multiple(void *ctx)
{
struct map_value *v;
v = bpf_map_lookup_elem(&array_map, &(int){0});
if (!v)
return 1;
return test_list_push_pop_multiple(&v->lock, &v->head);
}
SEC("tc")
int inner_map_list_push_pop_multiple(void *ctx)
{
struct map_value *v;
void *map;
map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
if (!map)
return 1;
v = bpf_map_lookup_elem(map, &(int){0});
if (!v)
return 1;
return test_list_push_pop_multiple(&v->lock, &v->head);
}
SEC("tc")
int global_list_push_pop_multiple(void *ctx)
{
int ret;
ret = list_push_pop_multiple(&glock, &ghead, false);
if (ret)
return ret;
return list_push_pop_multiple(&glock, &ghead, true);
}
SEC("tc")
int map_list_in_list(void *ctx)
{
struct map_value *v;
v = bpf_map_lookup_elem(&array_map, &(int){0});
if (!v)
return 1;
return test_list_in_list(&v->lock, &v->head);
}
SEC("tc")
int inner_map_list_in_list(void *ctx)
{
struct map_value *v;
void *map;
map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
if (!map)
return 1;
v = bpf_map_lookup_elem(map, &(int){0});
if (!v)
return 1;
return test_list_in_list(&v->lock, &v->head);
}
SEC("tc")
int global_list_in_list(void *ctx)
{
return test_list_in_list(&glock, &ghead);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/linked_list.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
/* Check that precision marks propagate through scalar IDs.
* Registers r{0,1,2} have the same scalar ID at the moment when r0 is
* marked to be precise, this mark is immediately propagated to r{1,2}.
*/
SEC("socket")
__success __log_level(2)
__msg("frame0: regs=r0,r1,r2 stack= before 4: (bf) r3 = r10")
__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void precision_same_state(void)
{
asm volatile (
/* r0 = random number up to 0xff */
"call %[bpf_ktime_get_ns];"
"r0 &= 0xff;"
/* tie r0.id == r1.id == r2.id */
"r1 = r0;"
"r2 = r0;"
/* force r0 to be precise, this immediately marks r1 and r2 as
* precise as well because of shared IDs
*/
"r3 = r10;"
"r3 += r0;"
"r0 = 0;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* Same as precision_same_state, but mark propagates through state /
* parent state boundary.
*/
SEC("socket")
__success __log_level(2)
__msg("frame0: last_idx 6 first_idx 5 subseq_idx -1")
__msg("frame0: regs=r0,r1,r2 stack= before 5: (bf) r3 = r10")
__msg("frame0: parent state regs=r0,r1,r2 stack=:")
__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0")
__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
__msg("frame0: parent state regs=r0 stack=:")
__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void precision_cross_state(void)
{
asm volatile (
/* r0 = random number up to 0xff */
"call %[bpf_ktime_get_ns];"
"r0 &= 0xff;"
/* tie r0.id == r1.id == r2.id */
"r1 = r0;"
"r2 = r0;"
/* force checkpoint */
"goto +0;"
/* force r0 to be precise, this immediately marks r1 and r2 as
* precise as well because of shared IDs
*/
"r3 = r10;"
"r3 += r0;"
"r0 = 0;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* Same as precision_same_state, but break one of the
* links, note that r1 is absent from regs=... in __msg below.
*/
SEC("socket")
__success __log_level(2)
__msg("frame0: regs=r0,r2 stack= before 5: (bf) r3 = r10")
__msg("frame0: regs=r0,r2 stack= before 4: (b7) r1 = 0")
__msg("frame0: regs=r0,r2 stack= before 3: (bf) r2 = r0")
__msg("frame0: regs=r0 stack= before 2: (bf) r1 = r0")
__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void precision_same_state_broken_link(void)
{
asm volatile (
/* r0 = random number up to 0xff */
"call %[bpf_ktime_get_ns];"
"r0 &= 0xff;"
/* tie r0.id == r1.id == r2.id */
"r1 = r0;"
"r2 = r0;"
/* break link for r1, this is the only line that differs
* compared to the previous test
*/
"r1 = 0;"
/* force r0 to be precise, this immediately marks r1 and r2 as
* precise as well because of shared IDs
*/
"r3 = r10;"
"r3 += r0;"
"r0 = 0;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* Same as precision_same_state_broken_link, but with state /
* parent state boundary.
*/
SEC("socket")
__success __log_level(2)
__msg("frame0: regs=r0,r2 stack= before 6: (bf) r3 = r10")
__msg("frame0: regs=r0,r2 stack= before 5: (b7) r1 = 0")
__msg("frame0: parent state regs=r0,r2 stack=:")
__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0")
__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
__msg("frame0: parent state regs=r0 stack=:")
__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void precision_cross_state_broken_link(void)
{
asm volatile (
/* r0 = random number up to 0xff */
"call %[bpf_ktime_get_ns];"
"r0 &= 0xff;"
/* tie r0.id == r1.id == r2.id */
"r1 = r0;"
"r2 = r0;"
/* force checkpoint, although link between r1 and r{0,2} is
* broken by the next statement current precision tracking
* algorithm can't react to it and propagates mark for r1 to
* the parent state.
*/
"goto +0;"
/* break link for r1, this is the only line that differs
* compared to precision_cross_state()
*/
"r1 = 0;"
/* force r0 to be precise, this immediately marks r1 and r2 as
* precise as well because of shared IDs
*/
"r3 = r10;"
"r3 += r0;"
"r0 = 0;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* Check that precision marks propagate through scalar IDs.
* Use the same scalar ID in multiple stack frames, check that
* precision information is propagated up the call stack.
*/
SEC("socket")
__success __log_level(2)
__msg("11: (0f) r2 += r1")
/* Current state */
__msg("frame2: last_idx 11 first_idx 10 subseq_idx -1")
__msg("frame2: regs=r1 stack= before 10: (bf) r2 = r10")
__msg("frame2: parent state regs=r1 stack=")
/* frame1.r{6,7} are marked because mark_precise_scalar_ids()
* looks for all registers with frame2.r1.id in the current state
*/
__msg("frame1: parent state regs=r6,r7 stack=")
__msg("frame0: parent state regs=r6 stack=")
/* Parent state */
__msg("frame2: last_idx 8 first_idx 8 subseq_idx 10")
__msg("frame2: regs=r1 stack= before 8: (85) call pc+1")
/* frame1.r1 is marked because of backtracking of call instruction */
__msg("frame1: parent state regs=r1,r6,r7 stack=")
__msg("frame0: parent state regs=r6 stack=")
/* Parent state */
__msg("frame1: last_idx 7 first_idx 6 subseq_idx 8")
__msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1")
__msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1")
__msg("frame1: parent state regs=r1 stack=")
__msg("frame0: parent state regs=r6 stack=")
/* Parent state */
__msg("frame1: last_idx 4 first_idx 4 subseq_idx 6")
__msg("frame1: regs=r1 stack= before 4: (85) call pc+1")
__msg("frame0: parent state regs=r1,r6 stack=")
/* Parent state */
__msg("frame0: last_idx 3 first_idx 1 subseq_idx 4")
__msg("frame0: regs=r0,r1,r6 stack= before 3: (bf) r6 = r0")
__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void precision_many_frames(void)
{
asm volatile (
/* r0 = random number up to 0xff */
"call %[bpf_ktime_get_ns];"
"r0 &= 0xff;"
/* tie r0.id == r1.id == r6.id */
"r1 = r0;"
"r6 = r0;"
"call precision_many_frames__foo;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
static __naked __noinline __used
void precision_many_frames__foo(void)
{
asm volatile (
/* conflate one of the register numbers (r6) with outer frame,
* to verify that those are tracked independently
*/
"r6 = r1;"
"r7 = r1;"
"call precision_many_frames__bar;"
"exit"
::: __clobber_all);
}
static __naked __noinline __used
void precision_many_frames__bar(void)
{
asm volatile (
/* force r1 to be precise, this immediately marks:
* - bar frame r1
* - foo frame r{1,6,7}
* - main frame r{1,6}
*/
"r2 = r10;"
"r2 += r1;"
"r0 = 0;"
"exit;"
::: __clobber_all);
}
/* Check that scalars with the same IDs are marked precise on stack as
* well as in registers.
*/
SEC("socket")
__success __log_level(2)
/* foo frame */
__msg("frame1: regs=r1 stack=-8,-16 before 9: (bf) r2 = r10")
__msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1")
__msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1")
__msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
/* main frame */
__msg("frame0: regs=r0,r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1")
__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void precision_stack(void)
{
asm volatile (
/* r0 = random number up to 0xff */
"call %[bpf_ktime_get_ns];"
"r0 &= 0xff;"
/* tie r0.id == r1.id == fp[-8].id */
"r1 = r0;"
"*(u64*)(r10 - 8) = r1;"
"call precision_stack__foo;"
"r0 = 0;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
static __naked __noinline __used
void precision_stack__foo(void)
{
asm volatile (
/* conflate one of the register numbers (r6) with outer frame,
* to verify that those are tracked independently
*/
"*(u64*)(r10 - 8) = r1;"
"*(u64*)(r10 - 16) = r1;"
/* force r1 to be precise, this immediately marks:
* - foo frame r1,fp{-8,-16}
* - main frame r1,fp{-8}
*/
"r2 = r10;"
"r2 += r1;"
"exit"
::: __clobber_all);
}
/* Use two separate scalar IDs to check that these are propagated
* independently.
*/
SEC("socket")
__success __log_level(2)
/* r{6,7} */
__msg("11: (0f) r3 += r7")
__msg("frame0: regs=r6,r7 stack= before 10: (bf) r3 = r10")
/* ... skip some insns ... */
__msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0")
__msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0")
/* r{8,9} */
__msg("12: (0f) r3 += r9")
__msg("frame0: regs=r8,r9 stack= before 11: (0f) r3 += r7")
/* ... skip some insns ... */
__msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0")
__msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void precision_two_ids(void)
{
asm volatile (
/* r6 = random number up to 0xff
* r6.id == r7.id
*/
"call %[bpf_ktime_get_ns];"
"r0 &= 0xff;"
"r6 = r0;"
"r7 = r0;"
/* same, but for r{8,9} */
"call %[bpf_ktime_get_ns];"
"r0 &= 0xff;"
"r8 = r0;"
"r9 = r0;"
/* clear r0 id */
"r0 = 0;"
/* force checkpoint */
"goto +0;"
"r3 = r10;"
/* force r7 to be precise, this also marks r6 */
"r3 += r7;"
/* force r9 to be precise, this also marks r8 */
"r3 += r9;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* Verify that check_ids() is used by regsafe() for scalars.
*
* r9 = ... some pointer with range X ...
* r6 = ... unbound scalar ID=a ...
* r7 = ... unbound scalar ID=b ...
* if (r6 > r7) goto +1
* r7 = r6
* if (r7 > X) goto exit
* r9 += r6
* ... access memory using r9 ...
*
* The memory access is safe only if r7 is bounded,
* which is true for one branch and not true for another.
*/
SEC("socket")
__failure __msg("register with unbounded min value")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void check_ids_in_regsafe(void)
{
asm volatile (
/* Bump allocated stack */
"r1 = 0;"
"*(u64*)(r10 - 8) = r1;"
/* r9 = pointer to stack */
"r9 = r10;"
"r9 += -8;"
/* r7 = ktime_get_ns() */
"call %[bpf_ktime_get_ns];"
"r7 = r0;"
/* r6 = ktime_get_ns() */
"call %[bpf_ktime_get_ns];"
"r6 = r0;"
/* if r6 > r7 is an unpredictable jump */
"if r6 > r7 goto l1_%=;"
"r7 = r6;"
"l1_%=:"
/* if r7 > 4 ...; transfers range to r6 on one execution path
* but does not transfer on another
*/
"if r7 > 4 goto l2_%=;"
/* Access memory at r9[r6], r6 is not always bounded */
"r9 += r6;"
"r0 = *(u8*)(r9 + 0);"
"l2_%=:"
"r0 = 0;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* Similar to check_ids_in_regsafe.
* The l0 could be reached in two states:
*
* (1) r6{.id=A}, r7{.id=A}, r8{.id=B}
* (2) r6{.id=B}, r7{.id=A}, r8{.id=B}
*
* Where (2) is not safe, as "r7 > 4" check won't propagate range for it.
* This example would be considered safe without changes to
* mark_chain_precision() to track scalar values with equal IDs.
*/
SEC("socket")
__failure __msg("register with unbounded min value")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void check_ids_in_regsafe_2(void)
{
asm volatile (
/* Bump allocated stack */
"r1 = 0;"
"*(u64*)(r10 - 8) = r1;"
/* r9 = pointer to stack */
"r9 = r10;"
"r9 += -8;"
/* r8 = ktime_get_ns() */
"call %[bpf_ktime_get_ns];"
"r8 = r0;"
/* r7 = ktime_get_ns() */
"call %[bpf_ktime_get_ns];"
"r7 = r0;"
/* r6 = ktime_get_ns() */
"call %[bpf_ktime_get_ns];"
"r6 = r0;"
/* scratch .id from r0 */
"r0 = 0;"
/* if r6 > r7 is an unpredictable jump */
"if r6 > r7 goto l1_%=;"
/* tie r6 and r7 .id */
"r6 = r7;"
"l0_%=:"
/* if r7 > 4 exit(0) */
"if r7 > 4 goto l2_%=;"
/* Access memory at r9[r6] */
"r9 += r6;"
"r0 = *(u8*)(r9 + 0);"
"l2_%=:"
"r0 = 0;"
"exit;"
"l1_%=:"
/* tie r6 and r8 .id */
"r6 = r8;"
"goto l0_%=;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* Check that scalar IDs *are not* generated on register to register
* assignments if source register is a constant.
*
* If such IDs *are* generated the 'l1' below would be reached in
* two states:
*
* (1) r1{.id=A}, r2{.id=A}
* (2) r1{.id=C}, r2{.id=C}
*
* Thus forcing 'if r1 == r2' verification twice.
*/
SEC("socket")
__success __log_level(2)
__msg("11: (1d) if r3 == r4 goto pc+0")
__msg("frame 0: propagating r3,r4")
__msg("11: safe")
__msg("processed 15 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void no_scalar_id_for_const(void)
{
asm volatile (
"call %[bpf_ktime_get_ns];"
/* unpredictable jump */
"if r0 > 7 goto l0_%=;"
/* possibly generate same scalar ids for r3 and r4 */
"r1 = 0;"
"r1 = r1;"
"r3 = r1;"
"r4 = r1;"
"goto l1_%=;"
"l0_%=:"
/* possibly generate different scalar ids for r3 and r4 */
"r1 = 0;"
"r2 = 0;"
"r3 = r1;"
"r4 = r2;"
"l1_%=:"
/* predictable jump, marks r3 and r4 precise */
"if r3 == r4 goto +0;"
"r0 = 0;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* Same as no_scalar_id_for_const() but for 32-bit values */
SEC("socket")
__success __log_level(2)
__msg("11: (1e) if w3 == w4 goto pc+0")
__msg("frame 0: propagating r3,r4")
__msg("11: safe")
__msg("processed 15 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void no_scalar_id_for_const32(void)
{
asm volatile (
"call %[bpf_ktime_get_ns];"
/* unpredictable jump */
"if r0 > 7 goto l0_%=;"
/* possibly generate same scalar ids for r3 and r4 */
"w1 = 0;"
"w1 = w1;"
"w3 = w1;"
"w4 = w1;"
"goto l1_%=;"
"l0_%=:"
/* possibly generate different scalar ids for r3 and r4 */
"w1 = 0;"
"w2 = 0;"
"w3 = w1;"
"w4 = w2;"
"l1_%=:"
/* predictable jump, marks r1 and r2 precise */
"if w3 == w4 goto +0;"
"r0 = 0;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* Check that unique scalar IDs are ignored when new verifier state is
* compared to cached verifier state. For this test:
* - cached state has no id on r1
* - new state has a unique id on r1
*/
SEC("socket")
__success __log_level(2)
__msg("6: (25) if r6 > 0x7 goto pc+1")
__msg("7: (57) r1 &= 255")
__msg("8: (bf) r2 = r10")
__msg("from 6 to 8: safe")
__msg("processed 12 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void ignore_unique_scalar_ids_cur(void)
{
asm volatile (
"call %[bpf_ktime_get_ns];"
"r6 = r0;"
"call %[bpf_ktime_get_ns];"
"r0 &= 0xff;"
/* r1.id == r0.id */
"r1 = r0;"
/* make r1.id unique */
"r0 = 0;"
"if r6 > 7 goto l0_%=;"
/* clear r1 id, but keep the range compatible */
"r1 &= 0xff;"
"l0_%=:"
/* get here in two states:
* - first: r1 has no id (cached state)
* - second: r1 has a unique id (should be considered equivalent)
*/
"r2 = r10;"
"r2 += r1;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* Check that unique scalar IDs are ignored when new verifier state is
* compared to cached verifier state. For this test:
* - cached state has a unique id on r1
* - new state has no id on r1
*/
SEC("socket")
__success __log_level(2)
__msg("6: (25) if r6 > 0x7 goto pc+1")
__msg("7: (05) goto pc+1")
__msg("9: (bf) r2 = r10")
__msg("9: safe")
__msg("processed 13 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void ignore_unique_scalar_ids_old(void)
{
asm volatile (
"call %[bpf_ktime_get_ns];"
"r6 = r0;"
"call %[bpf_ktime_get_ns];"
"r0 &= 0xff;"
/* r1.id == r0.id */
"r1 = r0;"
/* make r1.id unique */
"r0 = 0;"
"if r6 > 7 goto l1_%=;"
"goto l0_%=;"
"l1_%=:"
/* clear r1 id, but keep the range compatible */
"r1 &= 0xff;"
"l0_%=:"
/* get here in two states:
* - first: r1 has a unique id (cached state)
* - second: r1 has no id (should be considered equivalent)
*/
"r2 = r10;"
"r2 += r1;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* Check that two different scalar IDs in a verified state can't be
* mapped to the same scalar ID in current state.
*/
SEC("socket")
__success __log_level(2)
/* The exit instruction should be reachable from two states,
* use two matches and "processed .. insns" to ensure this.
*/
__msg("13: (95) exit")
__msg("13: (95) exit")
__msg("processed 18 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void two_old_ids_one_cur_id(void)
{
asm volatile (
/* Give unique scalar IDs to r{6,7} */
"call %[bpf_ktime_get_ns];"
"r0 &= 0xff;"
"r6 = r0;"
"call %[bpf_ktime_get_ns];"
"r0 &= 0xff;"
"r7 = r0;"
"r0 = 0;"
/* Maybe make r{6,7} IDs identical */
"if r6 > r7 goto l0_%=;"
"goto l1_%=;"
"l0_%=:"
"r6 = r7;"
"l1_%=:"
/* Mark r{6,7} precise.
* Get here in two states:
* - first: r6{.id=A}, r7{.id=B} (cached state)
* - second: r6{.id=A}, r7{.id=A}
* Currently we don't want to consider such states equivalent.
* Thus "exit;" would be verified twice.
*/
"r2 = r10;"
"r2 += r6;"
"r2 += r7;"
"exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_scalar_ids.c |
#include "core_reloc_types.h"
void f(struct core_reloc_nesting___err_array_container x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting___err_array_container.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <stdbool.h>
#include <stddef.h>
#include <linux/bpf.h>
#include <linux/ptrace.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct task_struct;
SEC("kprobe/__set_task_comm")
int BPF_KPROBE(prog1, struct task_struct *tsk, const char *buf, bool exec)
{
return !tsk;
}
SEC("kretprobe/__set_task_comm")
int BPF_KRETPROBE(prog2, int ret)
{
return ret;
}
SEC("raw_tp/task_rename")
int prog3(struct bpf_raw_tracepoint_args *ctx)
{
return !ctx->args[0];
}
SEC("fentry/__set_task_comm")
int BPF_PROG(prog4, struct task_struct *tsk, const char *buf, bool exec)
{
return 0;
}
SEC("fexit/__set_task_comm")
int BPF_PROG(prog5, struct task_struct *tsk, const char *buf, bool exec)
{
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_overhead.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define ATTR __attribute__((noinline))
#include "test_jhash.h"
SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
void *ptr;
int nh_off, i = 0;
nh_off = 32;
/* pragma unroll doesn't work on large loops */
#define C do { \
ptr = data + i; \
if (ptr + nh_off > data_end) \
break; \
ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
} while (0);
#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
C30;C30;C30; /* 90 calls */
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_verif_scale3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test weak ksyms.
*
* Copyright (c) 2021 Google
*/
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
int out__existing_typed = -1;
__u64 out__existing_typeless = -1;
__u64 out__non_existent_typeless = -1;
__u64 out__non_existent_typed = -1;
/* existing weak symbols */
/* test existing weak symbols can be resolved. */
extern const struct rq runqueues __ksym __weak; /* typed */
extern const void bpf_prog_active __ksym __weak; /* typeless */
struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
/* non-existent weak symbols. */
/* typeless symbols, default to zero. */
extern const void bpf_link_fops1 __ksym __weak;
/* typed symbols, default to zero. */
extern const int bpf_link_fops2 __ksym __weak;
void invalid_kfunc(void) __ksym __weak;
SEC("raw_tp/sys_enter")
int pass_handler(const void *ctx)
{
struct rq *rq;
/* tests existing symbols. */
rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
if (rq && bpf_ksym_exists(&runqueues))
out__existing_typed = rq->cpu;
out__existing_typeless = (__u64)&bpf_prog_active;
/* tests non-existent symbols. */
out__non_existent_typeless = (__u64)&bpf_link_fops1;
/* tests non-existent symbols. */
out__non_existent_typed = (__u64)&bpf_link_fops2;
if (&bpf_link_fops2) /* can't happen */
out__non_existent_typed = (__u64)bpf_per_cpu_ptr(&bpf_link_fops2, 0);
if (!bpf_ksym_exists(bpf_task_acquire))
/* dead code won't be seen by the verifier */
bpf_task_acquire(0);
if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc))
/* dead code won't be seen by the verifier */
bpf_testmod_test_mod_kfunc(0);
if (bpf_ksym_exists(invalid_kfunc))
/* dead code won't be seen by the verifier */
invalid_kfunc();
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_ksyms_weak.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct htab_val {
struct bpf_spin_lock lock;
unsigned int data;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 64);
__type(key, unsigned int);
__type(value, struct htab_val);
__uint(map_flags, BPF_F_NO_PREALLOC);
} htab SEC(".maps");
| linux-master | tools/testing/selftests/bpf/progs/htab_reuse.c |
// SPDX-License-Identifier: GPL-2.0
#include <netinet/in.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
__u32 page_size = 0;
SEC("cgroup/getsockopt")
int _getsockopt_child(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
if (optval[0] != 0x80)
return 0; /* EPERM, unexpected optval from the kernel */
ctx->retval = 0; /* Reset system call return value to zero */
optval[0] = 0x90;
ctx->optlen = 1;
return 1;
out:
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 1;
}
SEC("cgroup/getsockopt")
int _getsockopt_parent(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
if (optval[0] != 0x90)
return 0; /* EPERM, unexpected optval from the kernel */
ctx->retval = 0; /* Reset system call return value to zero */
optval[0] = 0xA0;
ctx->optlen = 1;
return 1;
out:
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 1;
}
SEC("cgroup/setsockopt")
int _setsockopt(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
optval[0] += 0x10;
ctx->optlen = 1;
return 1;
out:
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/sockopt_multi.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct key_t {
int a;
int b;
int c;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 3);
__type(key, __u32);
__type(value, __u64);
} arraymap1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 10);
__type(key, __u64);
__type(value, __u32);
} hashmap1 SEC(".maps");
__u32 key_sum = 0;
__u64 val_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx)
{
__u32 *hmap_val, *key = ctx->key;
__u64 *val = ctx->value;
if (key == (void *)0 || val == (void *)0)
return 0;
bpf_seq_write(ctx->meta->seq, key, sizeof(__u32));
bpf_seq_write(ctx->meta->seq, val, sizeof(__u64));
key_sum += *key;
val_sum += *val;
/* workaround - It's necessary to do this convoluted (val, key)
* write into hashmap1, instead of simply doing
* bpf_map_update_elem(&hashmap1, val, key, BPF_ANY);
* because key has MEM_RDONLY flag and bpf_map_update elem expects
* types without this flag
*/
bpf_map_update_elem(&hashmap1, val, val, BPF_ANY);
hmap_val = bpf_map_lookup_elem(&hashmap1, val);
if (hmap_val)
*hmap_val = *key;
*val = *key;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright Amazon.com Inc. or its affiliates. */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
static long sock_i_ino(const struct sock *sk)
{
const struct socket *sk_socket = sk->sk_socket;
const struct inode *inode;
unsigned long ino;
if (!sk_socket)
return 0;
inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
return ino;
}
SEC("iter/unix")
int dump_unix(struct bpf_iter__unix *ctx)
{
struct unix_sock *unix_sk = ctx->unix_sk;
struct sock *sk = (struct sock *)unix_sk;
struct seq_file *seq;
__u32 seq_num;
if (!unix_sk)
return 0;
seq = ctx->meta->seq;
seq_num = ctx->meta->seq_num;
if (seq_num == 0)
BPF_SEQ_PRINTF(seq, "Num RefCount Protocol Flags Type St Inode Path\n");
BPF_SEQ_PRINTF(seq, "%pK: %08X %08X %08X %04X %02X %8lu",
unix_sk,
sk->sk_refcnt.refs.counter,
0,
sk->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
sk->sk_type,
sk->sk_socket ?
(sk->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
(sk->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
sock_i_ino(sk));
if (unix_sk->addr) {
if (unix_sk->addr->name->sun_path[0]) {
BPF_SEQ_PRINTF(seq, " %s", unix_sk->addr->name->sun_path);
} else {
/* The name of the abstract UNIX domain socket starts
* with '\0' and can contain '\0'. The null bytes
* should be escaped as done in unix_seq_show().
*/
__u64 i, len;
len = unix_sk->addr->len - sizeof(short);
BPF_SEQ_PRINTF(seq, " @");
for (i = 1; i < len; i++) {
/* unix_validate_addr() tests this upper bound. */
if (i >= sizeof(struct sockaddr_un))
break;
BPF_SEQ_PRINTF(seq, "%c",
unix_sk->addr->name->sun_path[i] ?:
'@');
}
}
}
BPF_SEQ_PRINTF(seq, "\n");
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_unix.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 Google LLC.
*/
#include <linux/bpf.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} secure_exec_task_map SEC(".maps");
SEC("lsm/bprm_creds_for_exec")
int BPF_PROG(secure_exec, struct linux_binprm *bprm)
{
int *secureexec;
secureexec = bpf_task_storage_get(&secure_exec_task_map,
bpf_get_current_task_btf(), 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (secureexec && *secureexec)
bpf_bprm_opts_set(bprm, BPF_F_BPRM_SECUREEXEC);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bprm_opts.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <linux/bpf.h>
#include <time.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "GPL";
struct hmap_elem {
int pad; /* unused */
struct bpf_timer timer;
};
struct inner_map {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1024);
__type(key, int);
__type(value, struct hmap_elem);
} inner_htab SEC(".maps");
#define ARRAY_KEY 1
#define HASH_KEY 1234
struct outer_arr {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 2);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
__array(values, struct inner_map);
} outer_arr SEC(".maps") = {
.values = { [ARRAY_KEY] = &inner_htab },
};
__u64 err;
__u64 ok;
__u64 cnt;
static int timer_cb1(void *map, int *key, struct hmap_elem *val);
static int timer_cb2(void *map, int *key, struct hmap_elem *val)
{
cnt++;
bpf_timer_set_callback(&val->timer, timer_cb1);
if (bpf_timer_start(&val->timer, 1000, 0))
err |= 1;
ok |= 1;
return 0;
}
/* callback for inner hash map */
static int timer_cb1(void *map, int *key, struct hmap_elem *val)
{
cnt++;
bpf_timer_set_callback(&val->timer, timer_cb2);
if (bpf_timer_start(&val->timer, 1000, 0))
err |= 2;
/* Do a lookup to make sure 'map' and 'key' pointers are correct */
bpf_map_lookup_elem(map, key);
ok |= 2;
return 0;
}
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(test1, int a)
{
struct hmap_elem init = {};
struct bpf_map *inner_map;
struct hmap_elem *val;
int array_key = ARRAY_KEY;
int hash_key = HASH_KEY;
inner_map = bpf_map_lookup_elem(&outer_arr, &array_key);
if (!inner_map)
return 0;
bpf_map_update_elem(inner_map, &hash_key, &init, 0);
val = bpf_map_lookup_elem(inner_map, &hash_key);
if (!val)
return 0;
bpf_timer_init(&val->timer, inner_map, CLOCK_MONOTONIC);
if (bpf_timer_set_callback(&val->timer, timer_cb1))
err |= 4;
if (bpf_timer_start(&val->timer, 0, 0))
err |= 8;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/timer_mim.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
bool prog1_called = false;
bool prog2_called = false;
bool prog3_called = false;
SEC("raw_tp/sys_enter")
int prog1(const void *ctx)
{
prog1_called = true;
return 0;
}
SEC("raw_tp/sys_exit")
int prog2(const void *ctx)
{
prog2_called = true;
return 0;
}
struct fake_kernel_struct {
int whatever;
} __attribute__((preserve_access_index));
SEC("fentry/unexisting-kprobe-will-fail-if-loaded")
int prog3(const void *ctx)
{
struct fake_kernel_struct *fake = (void *)ctx;
fake->whatever = 123;
prog3_called = true;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_autoload.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "cpumask_common.h"
char _license[] SEC("license") = "GPL";
int pid, nr_cpus;
static bool is_test_task(void)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
return pid == cur_pid;
}
static bool create_cpumask_set(struct bpf_cpumask **out1,
struct bpf_cpumask **out2,
struct bpf_cpumask **out3,
struct bpf_cpumask **out4)
{
struct bpf_cpumask *mask1, *mask2, *mask3, *mask4;
mask1 = create_cpumask();
if (!mask1)
return false;
mask2 = create_cpumask();
if (!mask2) {
bpf_cpumask_release(mask1);
err = 3;
return false;
}
mask3 = create_cpumask();
if (!mask3) {
bpf_cpumask_release(mask1);
bpf_cpumask_release(mask2);
err = 4;
return false;
}
mask4 = create_cpumask();
if (!mask4) {
bpf_cpumask_release(mask1);
bpf_cpumask_release(mask2);
bpf_cpumask_release(mask3);
err = 5;
return false;
}
*out1 = mask1;
*out2 = mask2;
*out3 = mask3;
*out4 = mask4;
return true;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
if (!is_test_task())
return 0;
cpumask = create_cpumask();
if (!cpumask)
return 0;
bpf_cpumask_release(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
if (!is_test_task())
return 0;
cpumask = create_cpumask();
if (!cpumask)
return 0;
bpf_cpumask_set_cpu(0, cpumask);
if (!bpf_cpumask_test_cpu(0, cast(cpumask))) {
err = 3;
goto release_exit;
}
bpf_cpumask_clear_cpu(0, cpumask);
if (bpf_cpumask_test_cpu(0, cast(cpumask))) {
err = 4;
goto release_exit;
}
release_exit:
bpf_cpumask_release(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
if (!is_test_task())
return 0;
cpumask = create_cpumask();
if (!cpumask)
return 0;
bpf_cpumask_setall(cpumask);
if (!bpf_cpumask_full(cast(cpumask))) {
err = 3;
goto release_exit;
}
bpf_cpumask_clear(cpumask);
if (!bpf_cpumask_empty(cast(cpumask))) {
err = 4;
goto release_exit;
}
release_exit:
bpf_cpumask_release(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
if (!is_test_task())
return 0;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (bpf_cpumask_first(cast(cpumask)) < nr_cpus) {
err = 3;
goto release_exit;
}
if (bpf_cpumask_first_zero(cast(cpumask)) != 0) {
bpf_printk("first zero: %d", bpf_cpumask_first_zero(cast(cpumask)));
err = 4;
goto release_exit;
}
bpf_cpumask_set_cpu(0, cpumask);
if (bpf_cpumask_first(cast(cpumask)) != 0) {
err = 5;
goto release_exit;
}
if (bpf_cpumask_first_zero(cast(cpumask)) != 1) {
err = 6;
goto release_exit;
}
release_exit:
bpf_cpumask_release(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *mask1, *mask2;
u32 first;
if (!is_test_task())
return 0;
mask1 = create_cpumask();
if (!mask1)
return 0;
mask2 = create_cpumask();
if (!mask2)
goto release_exit;
bpf_cpumask_set_cpu(0, mask1);
bpf_cpumask_set_cpu(1, mask2);
first = bpf_cpumask_first_and(cast(mask1), cast(mask2));
if (first <= 1)
err = 3;
release_exit:
if (mask1)
bpf_cpumask_release(mask1);
if (mask2)
bpf_cpumask_release(mask2);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
if (!is_test_task())
return 0;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (bpf_cpumask_test_and_set_cpu(0, cpumask)) {
err = 3;
goto release_exit;
}
if (!bpf_cpumask_test_and_set_cpu(0, cpumask)) {
err = 4;
goto release_exit;
}
if (!bpf_cpumask_test_and_clear_cpu(0, cpumask)) {
err = 5;
goto release_exit;
}
release_exit:
bpf_cpumask_release(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
if (!is_test_task())
return 0;
if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
return 0;
bpf_cpumask_set_cpu(0, mask1);
bpf_cpumask_set_cpu(1, mask2);
if (bpf_cpumask_and(dst1, cast(mask1), cast(mask2))) {
err = 6;
goto release_exit;
}
if (!bpf_cpumask_empty(cast(dst1))) {
err = 7;
goto release_exit;
}
bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
if (!bpf_cpumask_test_cpu(0, cast(dst1))) {
err = 8;
goto release_exit;
}
if (!bpf_cpumask_test_cpu(1, cast(dst1))) {
err = 9;
goto release_exit;
}
bpf_cpumask_xor(dst2, cast(mask1), cast(mask2));
if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
err = 10;
goto release_exit;
}
release_exit:
bpf_cpumask_release(mask1);
bpf_cpumask_release(mask2);
bpf_cpumask_release(dst1);
bpf_cpumask_release(dst2);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
if (!is_test_task())
return 0;
if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
return 0;
bpf_cpumask_set_cpu(0, mask1);
bpf_cpumask_set_cpu(1, mask2);
if (bpf_cpumask_intersects(cast(mask1), cast(mask2))) {
err = 6;
goto release_exit;
}
bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
if (!bpf_cpumask_subset(cast(mask1), cast(dst1))) {
err = 7;
goto release_exit;
}
if (!bpf_cpumask_subset(cast(mask2), cast(dst1))) {
err = 8;
goto release_exit;
}
if (bpf_cpumask_subset(cast(dst1), cast(mask1))) {
err = 9;
goto release_exit;
}
release_exit:
bpf_cpumask_release(mask1);
bpf_cpumask_release(mask2);
bpf_cpumask_release(dst1);
bpf_cpumask_release(dst2);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
u32 cpu;
if (!is_test_task())
return 0;
if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
return 0;
bpf_cpumask_set_cpu(0, mask1);
bpf_cpumask_set_cpu(1, mask2);
bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
cpu = bpf_cpumask_any_distribute(cast(mask1));
if (cpu != 0) {
err = 6;
goto release_exit;
}
cpu = bpf_cpumask_any_distribute(cast(dst2));
if (cpu < nr_cpus) {
err = 7;
goto release_exit;
}
bpf_cpumask_copy(dst2, cast(dst1));
if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
err = 8;
goto release_exit;
}
cpu = bpf_cpumask_any_distribute(cast(dst2));
if (cpu > 1) {
err = 9;
goto release_exit;
}
cpu = bpf_cpumask_any_and_distribute(cast(mask1), cast(mask2));
if (cpu < nr_cpus) {
err = 10;
goto release_exit;
}
release_exit:
bpf_cpumask_release(mask1);
bpf_cpumask_release(mask2);
bpf_cpumask_release(dst1);
bpf_cpumask_release(dst2);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (cpumask_map_insert(cpumask))
err = 3;
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
struct __cpumask_map_value *v;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (cpumask_map_insert(cpumask)) {
err = 3;
return 0;
}
v = cpumask_map_value_lookup();
if (!v) {
err = 4;
return 0;
}
cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
if (cpumask)
bpf_cpumask_release(cpumask);
else
err = 5;
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *local, *prev;
if (!is_test_task())
return 0;
local = create_cpumask();
if (!local)
return 0;
prev = bpf_kptr_xchg(&global_mask, local);
if (prev) {
bpf_cpumask_release(prev);
err = 3;
return 0;
}
bpf_rcu_read_lock();
local = global_mask;
if (!local) {
err = 4;
bpf_rcu_read_unlock();
return 0;
}
bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
bpf_rcu_read_unlock();
return 0;
}
SEC("tp_btf/task_newtask")
__success
int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *mask1, *mask2;
mask1 = bpf_cpumask_create();
mask2 = bpf_cpumask_create();
if (!mask1 || !mask2)
goto free_masks_return;
bpf_cpumask_test_cpu(0, (const struct cpumask *)mask1);
bpf_cpumask_test_cpu(0, (const struct cpumask *)mask2);
free_masks_return:
if (mask1)
bpf_cpumask_release(mask1);
if (mask2)
bpf_cpumask_release(mask2);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/cpumask_success.c |
/* Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
/* This program shows clang/llvm is able to generate code pattern
* like:
* _tcp_send_active_reset:
* 0: bf 16 00 00 00 00 00 00 r6 = r1
* ......
* 335: b7 01 00 00 0f 00 00 00 r1 = 15
* 336: 05 00 48 00 00 00 00 00 goto 72
*
* LBB0_3:
* 337: b7 01 00 00 01 00 00 00 r1 = 1
* 338: 63 1a d0 ff 00 00 00 00 *(u32 *)(r10 - 48) = r1
* 408: b7 01 00 00 03 00 00 00 r1 = 3
*
* LBB0_4:
* 409: 71 a2 fe ff 00 00 00 00 r2 = *(u8 *)(r10 - 2)
* 410: bf a7 00 00 00 00 00 00 r7 = r10
* 411: 07 07 00 00 b8 ff ff ff r7 += -72
* 412: bf 73 00 00 00 00 00 00 r3 = r7
* 413: 0f 13 00 00 00 00 00 00 r3 += r1
* 414: 73 23 2d 00 00 00 00 00 *(u8 *)(r3 + 45) = r2
*
* From the above code snippet, the code generated by the compiler
* is reasonable. The "r1" is assigned to different values in basic
* blocks "_tcp_send_active_reset" and "LBB0_3", and used in "LBB0_4".
* The verifier should be able to handle such code patterns.
*/
#include <string.h>
#include <linux/bpf.h>
#include <linux/ipv6.h>
#include <linux/version.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#define _(P) ({typeof(P) val = 0; bpf_probe_read_kernel(&val, sizeof(val), &P); val;})
#define TCP_ESTATS_MAGIC 0xBAADBEEF
/* This test case needs "sock" and "pt_regs" data structure.
* Recursively, "sock" needs "sock_common" and "inet_sock".
* However, this is a unit test case only for
* verifier purpose without bpf program execution.
* We can safely mock much simpler data structures, basically
* only taking the necessary fields from kernel headers.
*/
typedef __u32 __bitwise __portpair;
typedef __u64 __bitwise __addrpair;
struct sock_common {
unsigned short skc_family;
union {
__addrpair skc_addrpair;
struct {
__be32 skc_daddr;
__be32 skc_rcv_saddr;
};
};
union {
__portpair skc_portpair;
struct {
__be16 skc_dport;
__u16 skc_num;
};
};
struct in6_addr skc_v6_daddr;
struct in6_addr skc_v6_rcv_saddr;
};
struct sock {
struct sock_common __sk_common;
#define sk_family __sk_common.skc_family
#define sk_v6_daddr __sk_common.skc_v6_daddr
#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
};
struct inet_sock {
struct sock sk;
#define inet_daddr sk.__sk_common.skc_daddr
#define inet_dport sk.__sk_common.skc_dport
__be32 inet_saddr;
__be16 inet_sport;
};
struct pt_regs {
long di;
};
static inline struct inet_sock *inet_sk(const struct sock *sk)
{
return (struct inet_sock *)sk;
}
/* Define various data structures for state recording.
* Some fields are not used due to test simplification.
*/
enum tcp_estats_addrtype {
TCP_ESTATS_ADDRTYPE_IPV4 = 1,
TCP_ESTATS_ADDRTYPE_IPV6 = 2
};
enum tcp_estats_event_type {
TCP_ESTATS_ESTABLISH,
TCP_ESTATS_PERIODIC,
TCP_ESTATS_TIMEOUT,
TCP_ESTATS_RETRANSMIT_TIMEOUT,
TCP_ESTATS_RETRANSMIT_OTHER,
TCP_ESTATS_SYN_RETRANSMIT,
TCP_ESTATS_SYNACK_RETRANSMIT,
TCP_ESTATS_TERM,
TCP_ESTATS_TX_RESET,
TCP_ESTATS_RX_RESET,
TCP_ESTATS_WRITE_TIMEOUT,
TCP_ESTATS_CONN_TIMEOUT,
TCP_ESTATS_ACK_LATENCY,
TCP_ESTATS_NEVENTS,
};
struct tcp_estats_event {
int pid;
int cpu;
unsigned long ts;
unsigned int magic;
enum tcp_estats_event_type event_type;
};
/* The below data structure is packed in order for
* llvm compiler to generate expected code.
*/
struct tcp_estats_conn_id {
unsigned int localaddressType;
struct {
unsigned char data[16];
} localaddress;
struct {
unsigned char data[16];
} remaddress;
unsigned short localport;
unsigned short remport;
} __attribute__((__packed__));
struct tcp_estats_basic_event {
struct tcp_estats_event event;
struct tcp_estats_conn_id conn_id;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1024);
__type(key, __u32);
__type(value, struct tcp_estats_basic_event);
} ev_record_map SEC(".maps");
struct dummy_tracepoint_args {
unsigned long long pad;
struct sock *sock;
};
static __always_inline void tcp_estats_ev_init(struct tcp_estats_event *event,
enum tcp_estats_event_type type)
{
event->magic = TCP_ESTATS_MAGIC;
event->ts = bpf_ktime_get_ns();
event->event_type = type;
}
static __always_inline void unaligned_u32_set(unsigned char *to, __u8 *from)
{
to[0] = _(from[0]);
to[1] = _(from[1]);
to[2] = _(from[2]);
to[3] = _(from[3]);
}
static __always_inline void conn_id_ipv4_init(struct tcp_estats_conn_id *conn_id,
__be32 *saddr, __be32 *daddr)
{
conn_id->localaddressType = TCP_ESTATS_ADDRTYPE_IPV4;
unaligned_u32_set(conn_id->localaddress.data, (__u8 *)saddr);
unaligned_u32_set(conn_id->remaddress.data, (__u8 *)daddr);
}
static __always_inline void conn_id_ipv6_init(struct tcp_estats_conn_id *conn_id,
__be32 *saddr, __be32 *daddr)
{
conn_id->localaddressType = TCP_ESTATS_ADDRTYPE_IPV6;
unaligned_u32_set(conn_id->localaddress.data, (__u8 *)saddr);
unaligned_u32_set(conn_id->localaddress.data + sizeof(__u32),
(__u8 *)(saddr + 1));
unaligned_u32_set(conn_id->localaddress.data + sizeof(__u32) * 2,
(__u8 *)(saddr + 2));
unaligned_u32_set(conn_id->localaddress.data + sizeof(__u32) * 3,
(__u8 *)(saddr + 3));
unaligned_u32_set(conn_id->remaddress.data,
(__u8 *)(daddr));
unaligned_u32_set(conn_id->remaddress.data + sizeof(__u32),
(__u8 *)(daddr + 1));
unaligned_u32_set(conn_id->remaddress.data + sizeof(__u32) * 2,
(__u8 *)(daddr + 2));
unaligned_u32_set(conn_id->remaddress.data + sizeof(__u32) * 3,
(__u8 *)(daddr + 3));
}
static __always_inline void tcp_estats_conn_id_init(struct tcp_estats_conn_id *conn_id,
struct sock *sk)
{
conn_id->localport = _(inet_sk(sk)->inet_sport);
conn_id->remport = _(inet_sk(sk)->inet_dport);
if (_(sk->sk_family) == AF_INET6)
conn_id_ipv6_init(conn_id,
sk->sk_v6_rcv_saddr.s6_addr32,
sk->sk_v6_daddr.s6_addr32);
else
conn_id_ipv4_init(conn_id,
&inet_sk(sk)->inet_saddr,
&inet_sk(sk)->inet_daddr);
}
static __always_inline void tcp_estats_init(struct sock *sk,
struct tcp_estats_event *event,
struct tcp_estats_conn_id *conn_id,
enum tcp_estats_event_type type)
{
tcp_estats_ev_init(event, type);
tcp_estats_conn_id_init(conn_id, sk);
}
static __always_inline void send_basic_event(struct sock *sk,
enum tcp_estats_event_type type)
{
struct tcp_estats_basic_event ev;
__u32 key = bpf_get_prandom_u32();
memset(&ev, 0, sizeof(ev));
tcp_estats_init(sk, &ev.event, &ev.conn_id, type);
bpf_map_update_elem(&ev_record_map, &key, &ev, BPF_ANY);
}
SEC("tp/dummy/tracepoint")
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
{
if (!arg->sock)
return 0;
send_basic_event(arg->sock, TCP_ESTATS_TX_RESET);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tcp_estats.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, long);
} enter_id SEC(".maps");
#define MAGIC_VALUE 0xabcd1234
pid_t target_pid = 0;
int mismatch_cnt = 0;
int enter_cnt = 0;
int exit_cnt = 0;
SEC("tp_btf/sys_enter")
int BPF_PROG(on_enter, struct pt_regs *regs, long id)
{
struct task_struct *task;
long *ptr;
task = bpf_get_current_task_btf();
if (task->pid != target_pid)
return 0;
ptr = bpf_task_storage_get(&enter_id, task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!ptr)
return 0;
__sync_fetch_and_add(&enter_cnt, 1);
*ptr = MAGIC_VALUE + enter_cnt;
return 0;
}
SEC("tp_btf/sys_exit")
int BPF_PROG(on_exit, struct pt_regs *regs, long id)
{
struct task_struct *task;
long *ptr;
task = bpf_get_current_task_btf();
if (task->pid != target_pid)
return 0;
ptr = bpf_task_storage_get(&enter_id, task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!ptr)
return 0;
__sync_fetch_and_add(&exit_cnt, 1);
if (*ptr != MAGIC_VALUE + exit_cnt)
__sync_fetch_and_add(&mismatch_cnt, 1);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/task_local_storage.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP);
__uint(max_entries, 8);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
} tx_port SEC(".maps");
SEC("redirect_map_0")
int xdp_redirect_map_0(struct xdp_md *xdp)
{
return bpf_redirect_map(&tx_port, 0, 0);
}
SEC("redirect_map_1")
int xdp_redirect_map_1(struct xdp_md *xdp)
{
return bpf_redirect_map(&tx_port, 1, 0);
}
SEC("redirect_map_2")
int xdp_redirect_map_2(struct xdp_md *xdp)
{
return bpf_redirect_map(&tx_port, 2, 0);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xdp_redirect_map.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct my_key { long x; };
struct my_value { long x; };
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, struct my_key);
__type(value, struct my_value);
__uint(max_entries, 16);
} map1 SEC(".maps");
/* Matches map2 definition in linked_maps2.c. Order of the attributes doesn't
* matter.
*/
typedef struct {
__uint(max_entries, 8);
__type(key, int);
__type(value, int);
__uint(type, BPF_MAP_TYPE_ARRAY);
} map2_t;
extern map2_t map2 SEC(".maps");
/* This should be the winning map definition, but we have no way of verifying,
* so we just make sure that it links and works without errors
*/
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, int);
__uint(max_entries, 16);
} map_weak __weak SEC(".maps");
int output_first1;
int output_second1;
int output_weak1;
SEC("raw_tp/sys_enter")
int BPF_PROG(handler_enter1)
{
/* update values with key = 1 */
int key = 1, val = 1;
struct my_key key_struct = { .x = 1 };
struct my_value val_struct = { .x = 1000 };
bpf_map_update_elem(&map1, &key_struct, &val_struct, 0);
bpf_map_update_elem(&map2, &key, &val, 0);
bpf_map_update_elem(&map_weak, &key, &val, 0);
return 0;
}
SEC("raw_tp/sys_exit")
int BPF_PROG(handler_exit1)
{
/* lookup values with key = 2, set in another file */
int key = 2, *val;
struct my_key key_struct = { .x = 2 };
struct my_value *value_struct;
value_struct = bpf_map_lookup_elem(&map1, &key_struct);
if (value_struct)
output_first1 = value_struct->x;
val = bpf_map_lookup_elem(&map2, &key);
if (val)
output_second1 = *val;
val = bpf_map_lookup_elem(&map_weak, &key);
if (val)
output_weak1 = *val;
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/linked_maps1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Bytedance */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
unsigned long span = 0;
SEC("fentry/load_balance")
int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq,
struct sched_domain *sd)
{
span = sd->span[0];
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_access_variable_array.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/cgroup_storage.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__uint(max_entries, 0);
__type(key, struct bpf_cgroup_storage_key);
__type(value, char[TEST_DATA_LEN]);
} cgroup_storage SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
__uint(max_entries, 0);
__type(key, struct bpf_cgroup_storage_key);
__type(value, char[64]);
} percpu_cgroup_storage SEC(".maps");
SEC("cgroup/skb")
__description("valid cgroup storage access")
__success __success_unpriv __retval(0)
__naked void valid_cgroup_storage_access(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid cgroup storage access 1")
__failure __msg("cannot pass map_type 1 into func bpf_get_local_storage")
__failure_unpriv
__naked void invalid_cgroup_storage_access_1(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[map_hash_8b] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid cgroup storage access 2")
__failure __msg("fd 1 is not pointing to valid bpf_map")
__failure_unpriv
__naked void invalid_cgroup_storage_access_2(void)
{
asm volatile (" \
r2 = 0; \
.8byte %[ld_map_fd]; \
.8byte 0; \
call %[bpf_get_local_storage]; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 1))
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid cgroup storage access 3")
__failure __msg("invalid access to map value, value_size=64 off=256 size=4")
__failure_unpriv
__naked void invalid_cgroup_storage_access_3(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 256); \
r1 += 1; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid cgroup storage access 4")
__failure __msg("invalid access to map value, value_size=64 off=-2 size=4")
__failure_unpriv
__flag(BPF_F_ANY_ALIGNMENT)
__naked void invalid_cgroup_storage_access_4(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 - 2); \
r0 = r1; \
r1 += 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid cgroup storage access 5")
__failure __msg("get_local_storage() doesn't support non-zero flags")
__failure_unpriv
__naked void invalid_cgroup_storage_access_5(void)
{
asm volatile (" \
r2 = 7; \
r1 = %[cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid cgroup storage access 6")
__failure __msg("get_local_storage() doesn't support non-zero flags")
__msg_unpriv("R2 leaks addr into helper function")
__naked void invalid_cgroup_storage_access_6(void)
{
asm volatile (" \
r2 = r1; \
r1 = %[cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("valid per-cpu cgroup storage access")
__success __success_unpriv __retval(0)
__naked void per_cpu_cgroup_storage_access(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[percpu_cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(percpu_cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid per-cpu cgroup storage access 1")
__failure __msg("cannot pass map_type 1 into func bpf_get_local_storage")
__failure_unpriv
__naked void cpu_cgroup_storage_access_1(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[map_hash_8b] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid per-cpu cgroup storage access 2")
__failure __msg("fd 1 is not pointing to valid bpf_map")
__failure_unpriv
__naked void cpu_cgroup_storage_access_2(void)
{
asm volatile (" \
r2 = 0; \
.8byte %[ld_map_fd]; \
.8byte 0; \
call %[bpf_get_local_storage]; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 1))
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid per-cpu cgroup storage access 3")
__failure __msg("invalid access to map value, value_size=64 off=256 size=4")
__failure_unpriv
__naked void cpu_cgroup_storage_access_3(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[percpu_cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 256); \
r1 += 1; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(percpu_cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid per-cpu cgroup storage access 4")
__failure __msg("invalid access to map value, value_size=64 off=-2 size=4")
__failure_unpriv
__flag(BPF_F_ANY_ALIGNMENT)
__naked void cpu_cgroup_storage_access_4(void)
{
asm volatile (" \
r2 = 0; \
r1 = %[cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 - 2); \
r0 = r1; \
r1 += 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid per-cpu cgroup storage access 5")
__failure __msg("get_local_storage() doesn't support non-zero flags")
__failure_unpriv
__naked void cpu_cgroup_storage_access_5(void)
{
asm volatile (" \
r2 = 7; \
r1 = %[percpu_cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(percpu_cgroup_storage)
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid per-cpu cgroup storage access 6")
__failure __msg("get_local_storage() doesn't support non-zero flags")
__msg_unpriv("R2 leaks addr into helper function")
__naked void cpu_cgroup_storage_access_6(void)
{
asm volatile (" \
r2 = r1; \
r1 = %[percpu_cgroup_storage] ll; \
call %[bpf_get_local_storage]; \
r1 = *(u32*)(r0 + 0); \
r0 = r1; \
r0 &= 1; \
exit; \
" :
: __imm(bpf_get_local_storage),
__imm_addr(percpu_cgroup_storage)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
int bpf_sock_destroy(struct sock_common *sk) __ksym;
SEC("tp_btf/tcp_destroy_sock")
__failure __msg("calling kernel function bpf_sock_destroy is not allowed")
int BPF_PROG(trace_tcp_destroy_sock, struct sock *sk)
{
/* should not load */
bpf_sock_destroy((struct sock_common *)sk);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 Google LLC.
*/
#include "vmlinux.h"
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
#define DUMMY_STORAGE_VALUE 0xdeadbeef
int monitored_pid = 0;
int inode_storage_result = -1;
int sk_storage_result = -1;
int task_storage_result = -1;
struct local_storage {
struct inode *exec_inode;
__u32 value;
};
struct {
__uint(type, BPF_MAP_TYPE_INODE_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct local_storage);
} inode_storage_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE);
__type(key, int);
__type(value, struct local_storage);
} sk_storage_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE);
__type(key, int);
__type(value, struct local_storage);
} sk_storage_map2 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct local_storage);
} task_storage_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct local_storage);
} task_storage_map2 SEC(".maps");
SEC("lsm/inode_unlink")
int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct bpf_local_storage *local_storage;
struct local_storage *storage;
struct task_struct *task;
bool is_self_unlink;
if (pid != monitored_pid)
return 0;
task = bpf_get_current_task_btf();
if (!task)
return 0;
task_storage_result = -1;
storage = bpf_task_storage_get(&task_storage_map, task, 0, 0);
if (!storage)
return 0;
/* Don't let an executable delete itself */
is_self_unlink = storage->exec_inode == victim->d_inode;
storage = bpf_task_storage_get(&task_storage_map2, task, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage || storage->value)
return 0;
if (bpf_task_storage_delete(&task_storage_map, task))
return 0;
/* Ensure that the task_storage_map is disconnected from the storage.
* The storage memory should not be freed back to the
* bpf_mem_alloc.
*/
local_storage = task->bpf_storage;
if (!local_storage || local_storage->smap)
return 0;
task_storage_result = 0;
return is_self_unlink ? -EPERM : 0;
}
SEC("lsm.s/inode_rename")
int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
struct local_storage *storage;
int err;
/* new_dentry->d_inode can be NULL when the inode is renamed to a file
* that did not exist before. The helper should be able to handle this
* NULL pointer.
*/
bpf_inode_storage_get(&inode_storage_map, new_dentry->d_inode, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
storage = bpf_inode_storage_get(&inode_storage_map, old_dentry->d_inode,
0, 0);
if (!storage)
return 0;
if (storage->value != DUMMY_STORAGE_VALUE)
inode_storage_result = -1;
err = bpf_inode_storage_delete(&inode_storage_map, old_dentry->d_inode);
if (!err)
inode_storage_result = err;
return 0;
}
SEC("lsm.s/socket_bind")
int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
int addrlen)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
if (pid != monitored_pid)
return 0;
storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, 0);
if (!storage)
return 0;
sk_storage_result = -1;
if (storage->value != DUMMY_STORAGE_VALUE)
return 0;
/* This tests that we can associate multiple elements
* with the local storage.
*/
storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
if (bpf_sk_storage_delete(&sk_storage_map2, sock->sk))
return 0;
storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
if (bpf_sk_storage_delete(&sk_storage_map, sock->sk))
return 0;
/* Ensure that the sk_storage_map is disconnected from the storage. */
if (!sock->sk->sk_bpf_storage || sock->sk->sk_bpf_storage->smap)
return 0;
sk_storage_result = 0;
return 0;
}
SEC("lsm.s/socket_post_create")
int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
int protocol, int kern)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
if (pid != monitored_pid)
return 0;
storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
storage->value = DUMMY_STORAGE_VALUE;
return 0;
}
/* This uses the local storage to remember the inode of the binary that a
* process was originally executing.
*/
SEC("lsm.s/bprm_committed_creds")
void BPF_PROG(exec, struct linux_binprm *bprm)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
if (pid != monitored_pid)
return;
storage = bpf_task_storage_get(&task_storage_map,
bpf_get_current_task_btf(), 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (storage)
storage->exec_inode = bprm->file->f_inode;
storage = bpf_inode_storage_get(&inode_storage_map, bprm->file->f_inode,
0, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return;
storage->value = DUMMY_STORAGE_VALUE;
}
| linux-master | tools/testing/selftests/bpf/progs/local_storage.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 2);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
#define TAIL_FUNC(x) \
SEC("tc") \
int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
TAIL_FUNC(0)
TAIL_FUNC(1)
static __noinline
int subprog_tail(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
return skb->len * 2;
}
SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 1);
return subprog_tail(skb);
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
#include <stdbool.h>
#include <linux/types.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#define STRNCMP_STR_SZ 8
const char target[STRNCMP_STR_SZ] = "EEEEEEE";
char str[STRNCMP_STR_SZ];
int cmp_ret = 0;
int target_pid = 0;
const char no_str_target[STRNCMP_STR_SZ] = "12345678";
char writable_target[STRNCMP_STR_SZ];
unsigned int no_const_str_size = STRNCMP_STR_SZ;
char _license[] SEC("license") = "GPL";
SEC("?tp/syscalls/sys_enter_nanosleep")
int do_strncmp(void *ctx)
{
if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
return 0;
cmp_ret = bpf_strncmp(str, STRNCMP_STR_SZ, target);
return 0;
}
SEC("?tp/syscalls/sys_enter_nanosleep")
int strncmp_bad_not_const_str_size(void *ctx)
{
/* The value of string size is not const, so will fail */
cmp_ret = bpf_strncmp(str, no_const_str_size, target);
return 0;
}
SEC("?tp/syscalls/sys_enter_nanosleep")
int strncmp_bad_writable_target(void *ctx)
{
/* Compared target is not read-only, so will fail */
cmp_ret = bpf_strncmp(str, STRNCMP_STR_SZ, writable_target);
return 0;
}
SEC("?tp/syscalls/sys_enter_nanosleep")
int strncmp_bad_not_null_term_target(void *ctx)
{
/* Compared target is not null-terminated, so will fail */
cmp_ret = bpf_strncmp(str, STRNCMP_STR_SZ, no_str_target);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/strncmp_test.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
} data = {};
struct core_reloc_arrays_output {
int a2;
char b123;
int c1c;
int d00d;
int f01c;
};
struct core_reloc_arrays_substruct {
int c;
int d;
};
struct core_reloc_arrays {
int a[5];
char b[2][3][4];
struct core_reloc_arrays_substruct c[3];
struct core_reloc_arrays_substruct d[1][2];
struct core_reloc_arrays_substruct f[][2];
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_arrays(void *ctx)
{
struct core_reloc_arrays *in = (void *)&data.in;
struct core_reloc_arrays_output *out = (void *)&data.out;
if (CORE_READ(&out->a2, &in->a[2]))
return 1;
if (CORE_READ(&out->b123, &in->b[1][2][3]))
return 1;
if (CORE_READ(&out->c1c, &in->c[1].c))
return 1;
if (CORE_READ(&out->d00d, &in->d[0][0].d))
return 1;
if (CORE_READ(&out->f01c, &in->f[0][1].c))
return 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC.
*/
#include <errno.h>
#include <linux/bpf.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include "progs/cg_storage_multi.h"
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, struct cgroup_value);
} cgroup_storage SEC(".maps");
__u32 invocations = 0;
SEC("cgroup_skb/egress")
int egress(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/cg_storage_multi_egress_only.c |
#include "core_reloc_types.h"
void f(struct core_reloc_arrays___fixed_arr x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___fixed_arr.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_experimental.h"
struct foo {
struct bpf_spin_lock lock;
int data;
};
struct array_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct foo);
__uint(max_entries, 1);
} array_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
__array(values, struct array_map);
} map_of_maps SEC(".maps") = {
.values = {
[0] = &array_map,
},
};
SEC(".data.A") struct bpf_spin_lock lockA;
SEC(".data.B") struct bpf_spin_lock lockB;
SEC("?tc")
int lock_id_kptr_preserve(void *ctx)
{
struct foo *f;
f = bpf_obj_new(typeof(*f));
if (!f)
return 0;
bpf_this_cpu_ptr(f);
return 0;
}
SEC("?tc")
int lock_id_global_zero(void *ctx)
{
bpf_this_cpu_ptr(&lockA);
return 0;
}
SEC("?tc")
int lock_id_mapval_preserve(void *ctx)
{
struct foo *f;
int key = 0;
f = bpf_map_lookup_elem(&array_map, &key);
if (!f)
return 0;
bpf_this_cpu_ptr(f);
return 0;
}
SEC("?tc")
int lock_id_innermapval_preserve(void *ctx)
{
struct foo *f;
int key = 0;
void *map;
map = bpf_map_lookup_elem(&map_of_maps, &key);
if (!map)
return 0;
f = bpf_map_lookup_elem(map, &key);
if (!f)
return 0;
bpf_this_cpu_ptr(f);
return 0;
}
#define CHECK(test, A, B) \
SEC("?tc") \
int lock_id_mismatch_##test(void *ctx) \
{ \
struct foo *f1, *f2, *v, *iv; \
int key = 0; \
void *map; \
\
map = bpf_map_lookup_elem(&map_of_maps, &key); \
if (!map) \
return 0; \
iv = bpf_map_lookup_elem(map, &key); \
if (!iv) \
return 0; \
v = bpf_map_lookup_elem(&array_map, &key); \
if (!v) \
return 0; \
f1 = bpf_obj_new(typeof(*f1)); \
if (!f1) \
return 0; \
f2 = bpf_obj_new(typeof(*f2)); \
if (!f2) { \
bpf_obj_drop(f1); \
return 0; \
} \
bpf_spin_lock(A); \
bpf_spin_unlock(B); \
return 0; \
}
CHECK(kptr_kptr, &f1->lock, &f2->lock);
CHECK(kptr_global, &f1->lock, &lockA);
CHECK(kptr_mapval, &f1->lock, &v->lock);
CHECK(kptr_innermapval, &f1->lock, &iv->lock);
CHECK(global_global, &lockA, &lockB);
CHECK(global_kptr, &lockA, &f1->lock);
CHECK(global_mapval, &lockA, &v->lock);
CHECK(global_innermapval, &lockA, &iv->lock);
SEC("?tc")
int lock_id_mismatch_mapval_mapval(void *ctx)
{
struct foo *f1, *f2;
int key = 0;
f1 = bpf_map_lookup_elem(&array_map, &key);
if (!f1)
return 0;
f2 = bpf_map_lookup_elem(&array_map, &key);
if (!f2)
return 0;
bpf_spin_lock(&f1->lock);
f1->data = 42;
bpf_spin_unlock(&f2->lock);
return 0;
}
CHECK(mapval_kptr, &v->lock, &f1->lock);
CHECK(mapval_global, &v->lock, &lockB);
CHECK(mapval_innermapval, &v->lock, &iv->lock);
SEC("?tc")
int lock_id_mismatch_innermapval_innermapval1(void *ctx)
{
struct foo *f1, *f2;
int key = 0;
void *map;
map = bpf_map_lookup_elem(&map_of_maps, &key);
if (!map)
return 0;
f1 = bpf_map_lookup_elem(map, &key);
if (!f1)
return 0;
f2 = bpf_map_lookup_elem(map, &key);
if (!f2)
return 0;
bpf_spin_lock(&f1->lock);
f1->data = 42;
bpf_spin_unlock(&f2->lock);
return 0;
}
SEC("?tc")
int lock_id_mismatch_innermapval_innermapval2(void *ctx)
{
struct foo *f1, *f2;
int key = 0;
void *map;
map = bpf_map_lookup_elem(&map_of_maps, &key);
if (!map)
return 0;
f1 = bpf_map_lookup_elem(map, &key);
if (!f1)
return 0;
map = bpf_map_lookup_elem(&map_of_maps, &key);
if (!map)
return 0;
f2 = bpf_map_lookup_elem(map, &key);
if (!f2)
return 0;
bpf_spin_lock(&f1->lock);
f1->data = 42;
bpf_spin_unlock(&f2->lock);
return 0;
}
CHECK(innermapval_kptr, &iv->lock, &f1->lock);
CHECK(innermapval_global, &iv->lock, &lockA);
CHECK(innermapval_mapval, &iv->lock, &v->lock);
#undef CHECK
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_spin_lock_fail.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf_sockopt_helpers.h>
#define SERV4_IP 0xc0a801feU /* 192.168.1.254 */
#define SERV4_PORT 4040
SEC("cgroup/recvmsg4")
int recvmsg4_prog(struct bpf_sock_addr *ctx)
{
struct bpf_sock *sk;
sk = ctx->sk;
if (!sk)
return 1;
if (sk->family != AF_INET)
return 1;
if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
return 1;
if (!get_set_sk_priority(ctx))
return 1;
ctx->user_ip4 = bpf_htonl(SERV4_IP);
ctx->user_port = bpf_htons(SERV4_PORT);
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/recvmsg4_prog.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
const volatile struct {
/* thread to activate trace programs for */
pid_t tgid;
/* return error from __init function */
int inject_error;
/* uffd monitored range start address */
void *fault_addr;
} bpf_mod_race_config = { -1 };
int bpf_blocking = 0;
int res_try_get_module = -1;
static __always_inline bool check_thread_id(void)
{
struct task_struct *task = bpf_get_current_task_btf();
return task->tgid == bpf_mod_race_config.tgid;
}
/* The trace of execution is something like this:
*
* finit_module()
* load_module()
* prepare_coming_module()
* notifier_call(MODULE_STATE_COMING)
* btf_parse_module()
* btf_alloc_id() // Visible to userspace at this point
* list_add(btf_mod->list, &btf_modules)
* do_init_module()
* freeinit = kmalloc()
* ret = mod->init()
* bpf_prog_widen_race()
* bpf_copy_from_user()
* ...<sleep>...
* if (ret < 0)
* ...
* free_module()
* return ret
*
* At this point, module loading thread is blocked, we now load the program:
*
* bpf_check
* add_kfunc_call/check_pseudo_btf_id
* btf_try_get_module
* try_get_module_live == false
* return -ENXIO
*
* Without the fix (try_get_module_live in btf_try_get_module):
*
* bpf_check
* add_kfunc_call/check_pseudo_btf_id
* btf_try_get_module
* try_get_module == true
* <store module reference in btf_kfunc_tab or used_btf array>
* ...
* return fd
*
* Now, if we inject an error in the blocked program, our module will be freed
* (going straight from MODULE_STATE_COMING to MODULE_STATE_GOING).
* Later, when bpf program is freed, it will try to module_put already freed
* module. This is why try_get_module_live returns false if mod->state is not
* MODULE_STATE_LIVE.
*/
SEC("fmod_ret.s/bpf_fentry_test1")
int BPF_PROG(widen_race, int a, int ret)
{
char dst;
if (!check_thread_id())
return 0;
/* Indicate that we will attempt to block */
bpf_blocking = 1;
bpf_copy_from_user(&dst, 1, bpf_mod_race_config.fault_addr);
return bpf_mod_race_config.inject_error;
}
SEC("fexit/do_init_module")
int BPF_PROG(fexit_init_module, struct module *mod, int ret)
{
if (!check_thread_id())
return 0;
/* Indicate that we finished blocking */
bpf_blocking = 2;
return 0;
}
SEC("fexit/btf_try_get_module")
int BPF_PROG(fexit_module_get, const struct btf *btf, struct module *mod)
{
res_try_get_module = !!mod;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/bpf_mod_race.c |
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
const char LICENSE[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} array SEC(".maps");
__noinline int sub1(int x)
{
int key = 0;
bpf_map_lookup_elem(&array, &key);
return x + 1;
}
static __noinline int sub5(int v);
__noinline int sub2(int y)
{
return sub5(y + 2);
}
static __noinline int sub3(int z)
{
return z + 3 + sub1(4);
}
static __noinline int sub4(int w)
{
int key = 0;
bpf_map_lookup_elem(&array, &key);
return w + sub3(5) + sub1(6);
}
/* sub5() is an identitify function, just to test weirder functions layout and
* call patterns
*/
static __noinline int sub5(int v)
{
return sub1(v) - 1; /* compensates sub1()'s + 1 */
}
/* unfortunately verifier rejects `struct task_struct *t` as an unknown pointer
* type, so we need to accept pointer as integer and then cast it inside the
* function
*/
__noinline int get_task_tgid(uintptr_t t)
{
/* this ensures that CO-RE relocs work in multi-subprogs .text */
return BPF_CORE_READ((struct task_struct *)(void *)t, tgid);
}
int res1 = 0;
int res2 = 0;
int res3 = 0;
int res4 = 0;
SEC("raw_tp/sys_enter")
int prog1(void *ctx)
{
/* perform some CO-RE relocations to ensure they work with multi-prog
* sections correctly
*/
struct task_struct *t = (void *)bpf_get_current_task();
if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
return 1;
res1 = sub1(1) + sub3(2); /* (1 + 1) + (2 + 3 + (4 + 1)) = 12 */
return 0;
}
SEC("raw_tp/sys_exit")
int prog2(void *ctx)
{
struct task_struct *t = (void *)bpf_get_current_task();
if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
return 1;
res2 = sub2(3) + sub3(4); /* (3 + 2) + (4 + 3 + (4 + 1)) = 17 */
return 0;
}
static int empty_callback(__u32 index, void *data)
{
return 0;
}
/* prog3 has the same section name as prog1 */
SEC("raw_tp/sys_enter")
int prog3(void *ctx)
{
struct task_struct *t = (void *)bpf_get_current_task();
if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
return 1;
/* test that ld_imm64 with BPF_PSEUDO_FUNC doesn't get blinded */
bpf_loop(1, empty_callback, NULL, 0);
res3 = sub3(5) + 6; /* (5 + 3 + (4 + 1)) + 6 = 19 */
return 0;
}
/* prog4 has the same section name as prog2 */
SEC("raw_tp/sys_exit")
int prog4(void *ctx)
{
struct task_struct *t = (void *)bpf_get_current_task();
if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
return 1;
res4 = sub4(7) + sub1(8); /* (7 + (5 + 3 + (4 + 1)) + (6 + 1)) + (8 + 1) = 36 */
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_subprogs.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
int count, which;
SEC("tc")
int classifier_0(struct __sk_buff *skb)
{
count++;
if (__builtin_constant_p(which))
__bpf_unreachable();
bpf_tail_call(skb, &jmp_table, which);
return 1;
}
SEC("tc")
int entry(struct __sk_buff *skb)
{
if (__builtin_constant_p(which))
__bpf_unreachable();
bpf_tail_call(skb, &jmp_table, which);
return 0;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/tailcall6.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
/* WARNING: This implemenation is not necessarily the same
* as the tcp_dctcp.c. The purpose is mainly for testing
* the kernel BPF logic.
*/
#include <stddef.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/tcp.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "GPL";
volatile const char fallback[TCP_CA_NAME_MAX];
const char bpf_dctcp[] = "bpf_dctcp";
const char tcp_cdg[] = "cdg";
char cc_res[TCP_CA_NAME_MAX];
int tcp_cdg_res = 0;
int stg_result = 0;
int ebusy_cnt = 0;
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} sk_stg_map SEC(".maps");
#define DCTCP_MAX_ALPHA 1024U
struct dctcp {
__u32 old_delivered;
__u32 old_delivered_ce;
__u32 prior_rcv_nxt;
__u32 dctcp_alpha;
__u32 next_seq;
__u32 ce_state;
__u32 loss_cwnd;
};
static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
static __always_inline void dctcp_reset(const struct tcp_sock *tp,
struct dctcp *ca)
{
ca->next_seq = tp->snd_nxt;
ca->old_delivered = tp->delivered;
ca->old_delivered_ce = tp->delivered_ce;
}
SEC("struct_ops/dctcp_init")
void BPF_PROG(dctcp_init, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct dctcp *ca = inet_csk_ca(sk);
int *stg;
if (!(tp->ecn_flags & TCP_ECN_OK) && fallback[0]) {
/* Switch to fallback */
if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
(void *)fallback, sizeof(fallback)) == -EBUSY)
ebusy_cnt++;
/* Switch back to myself and the recurred dctcp_init()
* will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION),
* except the last "cdg" one.
*/
if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
(void *)bpf_dctcp, sizeof(bpf_dctcp)) == -EBUSY)
ebusy_cnt++;
/* Switch back to fallback */
if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
(void *)fallback, sizeof(fallback)) == -EBUSY)
ebusy_cnt++;
/* Expecting -ENOTSUPP for tcp_cdg_res */
tcp_cdg_res = bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
(void *)tcp_cdg, sizeof(tcp_cdg));
bpf_getsockopt(sk, SOL_TCP, TCP_CONGESTION,
(void *)cc_res, sizeof(cc_res));
return;
}
ca->prior_rcv_nxt = tp->rcv_nxt;
ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
ca->loss_cwnd = 0;
ca->ce_state = 0;
stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0);
if (stg) {
stg_result = *stg;
bpf_sk_storage_delete(&sk_stg_map, (void *)tp);
}
dctcp_reset(tp, ca);
}
SEC("struct_ops/dctcp_ssthresh")
__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
{
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
ca->loss_cwnd = tp->snd_cwnd;
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
}
SEC("struct_ops/dctcp_update_alpha")
void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct dctcp *ca = inet_csk_ca(sk);
/* Expired RTT */
if (!before(tp->snd_una, ca->next_seq)) {
__u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
__u32 alpha = ca->dctcp_alpha;
/* alpha = (1 - g) * alpha + g * F */
alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
if (delivered_ce) {
__u32 delivered = tp->delivered - ca->old_delivered;
/* If dctcp_shift_g == 1, a 32bit value would overflow
* after 8 M packets.
*/
delivered_ce <<= (10 - dctcp_shift_g);
delivered_ce /= max(1U, delivered);
alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
}
ca->dctcp_alpha = alpha;
dctcp_reset(tp, ca);
}
}
static __always_inline void dctcp_react_to_loss(struct sock *sk)
{
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
ca->loss_cwnd = tp->snd_cwnd;
tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
}
SEC("struct_ops/dctcp_state")
void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
{
if (new_state == TCP_CA_Recovery &&
new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
dctcp_react_to_loss(sk);
/* We handle RTO in dctcp_cwnd_event to ensure that we perform only
* one loss-adjustment per RTT.
*/
}
static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
{
struct tcp_sock *tp = tcp_sk(sk);
if (ce_state == 1)
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
else
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
/* Minimal DCTP CE state machine:
*
* S: 0 <- last pkt was non-CE
* 1 <- last pkt was CE
*/
static __always_inline
void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
__u32 *prior_rcv_nxt, __u32 *ce_state)
{
__u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
if (*ce_state != new_ce_state) {
/* CE state has changed, force an immediate ACK to
* reflect the new CE state. If an ACK was delayed,
* send that first to reflect the prior CE state.
*/
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
dctcp_ece_ack_cwr(sk, *ce_state);
bpf_tcp_send_ack(sk, *prior_rcv_nxt);
}
inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
*prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
*ce_state = new_ce_state;
dctcp_ece_ack_cwr(sk, new_ce_state);
}
SEC("struct_ops/dctcp_cwnd_event")
void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
{
struct dctcp *ca = inet_csk_ca(sk);
switch (ev) {
case CA_EVENT_ECN_IS_CE:
case CA_EVENT_ECN_NO_CE:
dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
break;
case CA_EVENT_LOSS:
dctcp_react_to_loss(sk);
break;
default:
/* Don't care for the rest. */
break;
}
}
SEC("struct_ops/dctcp_cwnd_undo")
__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
{
const struct dctcp *ca = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}
extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
SEC("struct_ops/dctcp_reno_cong_avoid")
void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{
tcp_reno_cong_avoid(sk, ack, acked);
}
SEC(".struct_ops")
struct tcp_congestion_ops dctcp_nouse = {
.init = (void *)dctcp_init,
.set_state = (void *)dctcp_state,
.flags = TCP_CONG_NEEDS_ECN,
.name = "bpf_dctcp_nouse",
};
SEC(".struct_ops")
struct tcp_congestion_ops dctcp = {
.init = (void *)dctcp_init,
.in_ack_event = (void *)dctcp_update_alpha,
.cwnd_event = (void *)dctcp_cwnd_event,
.ssthresh = (void *)dctcp_ssthresh,
.cong_avoid = (void *)dctcp_cong_avoid,
.undo_cwnd = (void *)dctcp_cwnd_undo,
.set_state = (void *)dctcp_state,
.flags = TCP_CONG_NEEDS_ECN,
.name = "bpf_dctcp",
};
| linux-master | tools/testing/selftests/bpf/progs/bpf_dctcp.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define VERDICT_REJECT 0
#define VERDICT_PROCEED 1
SEC("cgroup/connect4")
int connect_v4_dropper(struct bpf_sock_addr *ctx)
{
if (ctx->type != SOCK_STREAM)
return VERDICT_PROCEED;
if (ctx->user_port == bpf_htons(60120))
return VERDICT_REJECT;
return VERDICT_PROCEED;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/connect4_dropper.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2022 Sony Group Corporation */
#include <vmlinux.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
int arg1 = 0;
unsigned long arg2 = 0;
unsigned long arg3 = 0;
unsigned long arg4_cx = 0;
unsigned long arg4 = 0;
unsigned long arg5 = 0;
int arg1_core = 0;
unsigned long arg2_core = 0;
unsigned long arg3_core = 0;
unsigned long arg4_core_cx = 0;
unsigned long arg4_core = 0;
unsigned long arg5_core = 0;
int option_syscall = 0;
unsigned long arg2_syscall = 0;
unsigned long arg3_syscall = 0;
unsigned long arg4_syscall = 0;
unsigned long arg5_syscall = 0;
const volatile pid_t filter_pid = 0;
SEC("kprobe/" SYS_PREFIX "sys_prctl")
int BPF_KPROBE(handle_sys_prctl)
{
struct pt_regs *real_regs;
pid_t pid = bpf_get_current_pid_tgid() >> 32;
unsigned long tmp = 0;
if (pid != filter_pid)
return 0;
real_regs = PT_REGS_SYSCALL_REGS(ctx);
/* test for PT_REGS_PARM */
#if !defined(bpf_target_arm64) && !defined(bpf_target_s390)
bpf_probe_read_kernel(&tmp, sizeof(tmp), &PT_REGS_PARM1_SYSCALL(real_regs));
#endif
arg1 = tmp;
bpf_probe_read_kernel(&arg2, sizeof(arg2), &PT_REGS_PARM2_SYSCALL(real_regs));
bpf_probe_read_kernel(&arg3, sizeof(arg3), &PT_REGS_PARM3_SYSCALL(real_regs));
bpf_probe_read_kernel(&arg4_cx, sizeof(arg4_cx), &PT_REGS_PARM4(real_regs));
bpf_probe_read_kernel(&arg4, sizeof(arg4), &PT_REGS_PARM4_SYSCALL(real_regs));
bpf_probe_read_kernel(&arg5, sizeof(arg5), &PT_REGS_PARM5_SYSCALL(real_regs));
/* test for the CORE variant of PT_REGS_PARM */
arg1_core = PT_REGS_PARM1_CORE_SYSCALL(real_regs);
arg2_core = PT_REGS_PARM2_CORE_SYSCALL(real_regs);
arg3_core = PT_REGS_PARM3_CORE_SYSCALL(real_regs);
arg4_core_cx = PT_REGS_PARM4_CORE(real_regs);
arg4_core = PT_REGS_PARM4_CORE_SYSCALL(real_regs);
arg5_core = PT_REGS_PARM5_CORE_SYSCALL(real_regs);
return 0;
}
SEC("ksyscall/prctl")
int BPF_KSYSCALL(prctl_enter, int option, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5)
{
pid_t pid = bpf_get_current_pid_tgid() >> 32;
if (pid != filter_pid)
return 0;
option_syscall = option;
arg2_syscall = arg2;
arg3_syscall = arg3;
arg4_syscall = arg4;
arg5_syscall = arg5;
return 0;
}
__u64 splice_fd_in;
__u64 splice_off_in;
__u64 splice_fd_out;
__u64 splice_off_out;
__u64 splice_len;
__u64 splice_flags;
SEC("ksyscall/splice")
int BPF_KSYSCALL(splice_enter, int fd_in, loff_t *off_in, int fd_out,
loff_t *off_out, size_t len, unsigned int flags)
{
pid_t pid = bpf_get_current_pid_tgid() >> 32;
if (pid != filter_pid)
return 0;
splice_fd_in = fd_in;
splice_off_in = (__u64)off_in;
splice_fd_out = fd_out;
splice_off_out = (__u64)off_out;
splice_len = len;
splice_flags = flags;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/bpf_syscall_macro.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
bool skip;
} data = {};
struct a_struct {
int x;
};
struct a_complex_struct {
union {
struct a_struct *a;
void *b;
} x;
volatile long y;
};
union a_union {
int y;
int z;
};
typedef struct a_struct named_struct_typedef;
typedef struct { int x, y, z; } anon_struct_typedef;
typedef struct {
int a, b, c;
} *struct_ptr_typedef;
enum an_enum {
AN_ENUM_VAL1 = 1,
AN_ENUM_VAL2 = 2,
AN_ENUM_VAL3 = 3,
};
typedef int int_typedef;
typedef enum { TYPEDEF_ENUM_VAL1, TYPEDEF_ENUM_VAL2 } enum_typedef;
typedef void *void_ptr_typedef;
typedef int *restrict restrict_ptr_typedef;
typedef int (*func_proto_typedef)(long);
typedef char arr_typedef[20];
struct core_reloc_type_based_output {
bool struct_exists;
bool complex_struct_exists;
bool union_exists;
bool enum_exists;
bool typedef_named_struct_exists;
bool typedef_anon_struct_exists;
bool typedef_struct_ptr_exists;
bool typedef_int_exists;
bool typedef_enum_exists;
bool typedef_void_ptr_exists;
bool typedef_restrict_ptr_exists;
bool typedef_func_proto_exists;
bool typedef_arr_exists;
bool struct_matches;
bool complex_struct_matches;
bool union_matches;
bool enum_matches;
bool typedef_named_struct_matches;
bool typedef_anon_struct_matches;
bool typedef_struct_ptr_matches;
bool typedef_int_matches;
bool typedef_enum_matches;
bool typedef_void_ptr_matches;
bool typedef_restrict_ptr_matches;
bool typedef_func_proto_matches;
bool typedef_arr_matches;
int struct_sz;
int union_sz;
int enum_sz;
int typedef_named_struct_sz;
int typedef_anon_struct_sz;
int typedef_struct_ptr_sz;
int typedef_int_sz;
int typedef_enum_sz;
int typedef_void_ptr_sz;
int typedef_func_proto_sz;
int typedef_arr_sz;
};
SEC("raw_tracepoint/sys_enter")
int test_core_type_based(void *ctx)
{
/* Support for the BPF_TYPE_MATCHES argument to the
* __builtin_preserve_type_info builtin was added at some point during
* development of clang 15 and it's what we require for this test. Part of it
* could run with merely __builtin_preserve_type_info (which could be checked
* separately), but we have to find an upper bound.
*/
#if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
struct core_reloc_type_based_output *out = (void *)&data.out;
out->struct_exists = bpf_core_type_exists(struct a_struct);
out->complex_struct_exists = bpf_core_type_exists(struct a_complex_struct);
out->union_exists = bpf_core_type_exists(union a_union);
out->enum_exists = bpf_core_type_exists(enum an_enum);
out->typedef_named_struct_exists = bpf_core_type_exists(named_struct_typedef);
out->typedef_anon_struct_exists = bpf_core_type_exists(anon_struct_typedef);
out->typedef_struct_ptr_exists = bpf_core_type_exists(struct_ptr_typedef);
out->typedef_int_exists = bpf_core_type_exists(int_typedef);
out->typedef_enum_exists = bpf_core_type_exists(enum_typedef);
out->typedef_void_ptr_exists = bpf_core_type_exists(void_ptr_typedef);
out->typedef_restrict_ptr_exists = bpf_core_type_exists(restrict_ptr_typedef);
out->typedef_func_proto_exists = bpf_core_type_exists(func_proto_typedef);
out->typedef_arr_exists = bpf_core_type_exists(arr_typedef);
out->struct_matches = bpf_core_type_matches(struct a_struct);
out->complex_struct_matches = bpf_core_type_matches(struct a_complex_struct);
out->union_matches = bpf_core_type_matches(union a_union);
out->enum_matches = bpf_core_type_matches(enum an_enum);
out->typedef_named_struct_matches = bpf_core_type_matches(named_struct_typedef);
out->typedef_anon_struct_matches = bpf_core_type_matches(anon_struct_typedef);
out->typedef_struct_ptr_matches = bpf_core_type_matches(struct_ptr_typedef);
out->typedef_int_matches = bpf_core_type_matches(int_typedef);
out->typedef_enum_matches = bpf_core_type_matches(enum_typedef);
out->typedef_void_ptr_matches = bpf_core_type_matches(void_ptr_typedef);
out->typedef_restrict_ptr_matches = bpf_core_type_matches(restrict_ptr_typedef);
out->typedef_func_proto_matches = bpf_core_type_matches(func_proto_typedef);
out->typedef_arr_matches = bpf_core_type_matches(arr_typedef);
out->struct_sz = bpf_core_type_size(struct a_struct);
out->union_sz = bpf_core_type_size(union a_union);
out->enum_sz = bpf_core_type_size(enum an_enum);
out->typedef_named_struct_sz = bpf_core_type_size(named_struct_typedef);
out->typedef_anon_struct_sz = bpf_core_type_size(anon_struct_typedef);
out->typedef_struct_ptr_sz = bpf_core_type_size(struct_ptr_typedef);
out->typedef_int_sz = bpf_core_type_size(int_typedef);
out->typedef_enum_sz = bpf_core_type_size(enum_typedef);
out->typedef_void_ptr_sz = bpf_core_type_size(void_ptr_typedef);
out->typedef_func_proto_sz = bpf_core_type_size(func_proto_typedef);
out->typedef_arr_sz = bpf_core_type_size(arr_typedef);
#else
data.skip = true;
#endif
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c |
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_legacy.h"
struct ipv_counts {
unsigned int v4;
unsigned int v6;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 4);
__type(key, int);
__type(value, struct ipv_counts);
} btf_map SEC(".maps");
__attribute__((noinline))
int test_long_fname_2(void)
{
struct ipv_counts *counts;
int key = 0;
counts = bpf_map_lookup_elem(&btf_map, &key);
if (!counts)
return 0;
counts->v6++;
return 0;
}
__attribute__((noinline))
int test_long_fname_1(void)
{
return test_long_fname_2();
}
SEC("dummy_tracepoint")
int _dummy_tracepoint(void *arg)
{
return test_long_fname_1();
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_btf_newkv.c |
#include "core_reloc_types.h"
void f(struct core_reloc_enum64val x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.