python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/* In-place tunneling */
#include <stdbool.h>
#include <string.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/mpls.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/pkt_cls.h>
#include <linux/types.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
static const int cfg_port = 8000;
static const int cfg_udp_src = 20000;
#define L2_PAD_SZ (sizeof(struct vxlanhdr) + ETH_HLEN)
#define UDP_PORT 5555
#define MPLS_OVER_UDP_PORT 6635
#define ETH_OVER_UDP_PORT 7777
#define VXLAN_UDP_PORT 8472
#define EXTPROTO_VXLAN 0x1
#define VXLAN_N_VID (1u << 24)
#define VXLAN_VNI_MASK bpf_htonl((VXLAN_N_VID - 1) << 8)
#define VXLAN_FLAGS 0x8
#define VXLAN_VNI 1
#ifndef NEXTHDR_DEST
#define NEXTHDR_DEST 60
#endif
/* MPLS label 1000 with S bit (last label) set and ttl of 255. */
static const __u32 mpls_label = __bpf_constant_htonl(1000 << 12 |
MPLS_LS_S_MASK | 0xff);
struct vxlanhdr {
__be32 vx_flags;
__be32 vx_vni;
} __attribute__((packed));
struct gre_hdr {
__be16 flags;
__be16 protocol;
} __attribute__((packed));
union l4hdr {
struct udphdr udp;
struct gre_hdr gre;
};
struct v4hdr {
struct iphdr ip;
union l4hdr l4hdr;
__u8 pad[L2_PAD_SZ]; /* space for L2 header / vxlan header ... */
} __attribute__((packed));
struct v6hdr {
struct ipv6hdr ip;
union l4hdr l4hdr;
__u8 pad[L2_PAD_SZ]; /* space for L2 header / vxlan header ... */
} __attribute__((packed));
static __always_inline void set_ipv4_csum(struct iphdr *iph)
{
__u16 *iph16 = (__u16 *)iph;
__u32 csum;
int i;
iph->check = 0;
#pragma clang loop unroll(full)
for (i = 0, csum = 0; i < sizeof(*iph) >> 1; i++)
csum += *iph16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
}
static __always_inline int __encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
__u16 l2_proto, __u16 ext_proto)
{
__u16 udp_dst = UDP_PORT;
struct iphdr iph_inner;
struct v4hdr h_outer;
struct tcphdr tcph;
int olen, l2_len;
__u8 *l2_hdr = NULL;
int tcp_off;
__u64 flags;
/* Most tests encapsulate a packet into a tunnel with the same
* network protocol, and derive the outer header fields from
* the inner header.
*
* The 6in4 case tests different inner and outer protocols. As
* the inner is ipv6, but the outer expects an ipv4 header as
* input, manually build a struct iphdr based on the ipv6hdr.
*/
if (encap_proto == IPPROTO_IPV6) {
const __u32 saddr = (192 << 24) | (168 << 16) | (1 << 8) | 1;
const __u32 daddr = (192 << 24) | (168 << 16) | (1 << 8) | 2;
struct ipv6hdr iph6_inner;
/* Read the IPv6 header */
if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph6_inner,
sizeof(iph6_inner)) < 0)
return TC_ACT_OK;
/* Derive the IPv4 header fields from the IPv6 header */
memset(&iph_inner, 0, sizeof(iph_inner));
iph_inner.version = 4;
iph_inner.ihl = 5;
iph_inner.tot_len = bpf_htons(sizeof(iph6_inner) +
bpf_ntohs(iph6_inner.payload_len));
iph_inner.ttl = iph6_inner.hop_limit - 1;
iph_inner.protocol = iph6_inner.nexthdr;
iph_inner.saddr = __bpf_constant_htonl(saddr);
iph_inner.daddr = __bpf_constant_htonl(daddr);
tcp_off = sizeof(iph6_inner);
} else {
if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
sizeof(iph_inner)) < 0)
return TC_ACT_OK;
tcp_off = sizeof(iph_inner);
}
/* filter only packets we want */
if (iph_inner.ihl != 5 || iph_inner.protocol != IPPROTO_TCP)
return TC_ACT_OK;
if (bpf_skb_load_bytes(skb, ETH_HLEN + tcp_off,
&tcph, sizeof(tcph)) < 0)
return TC_ACT_OK;
if (tcph.dest != __bpf_constant_htons(cfg_port))
return TC_ACT_OK;
olen = sizeof(h_outer.ip);
l2_len = 0;
flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV4;
switch (l2_proto) {
case ETH_P_MPLS_UC:
l2_len = sizeof(mpls_label);
udp_dst = MPLS_OVER_UDP_PORT;
break;
case ETH_P_TEB:
l2_len = ETH_HLEN;
if (ext_proto & EXTPROTO_VXLAN) {
udp_dst = VXLAN_UDP_PORT;
l2_len += sizeof(struct vxlanhdr);
} else
udp_dst = ETH_OVER_UDP_PORT;
break;
}
flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
switch (encap_proto) {
case IPPROTO_GRE:
flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE;
olen += sizeof(h_outer.l4hdr.gre);
h_outer.l4hdr.gre.protocol = bpf_htons(l2_proto);
h_outer.l4hdr.gre.flags = 0;
break;
case IPPROTO_UDP:
flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP;
olen += sizeof(h_outer.l4hdr.udp);
h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
h_outer.l4hdr.udp.dest = bpf_htons(udp_dst);
h_outer.l4hdr.udp.check = 0;
h_outer.l4hdr.udp.len = bpf_htons(bpf_ntohs(iph_inner.tot_len) +
sizeof(h_outer.l4hdr.udp) +
l2_len);
break;
case IPPROTO_IPIP:
case IPPROTO_IPV6:
break;
default:
return TC_ACT_OK;
}
/* add L2 encap (if specified) */
l2_hdr = (__u8 *)&h_outer + olen;
switch (l2_proto) {
case ETH_P_MPLS_UC:
*(__u32 *)l2_hdr = mpls_label;
break;
case ETH_P_TEB:
flags |= BPF_F_ADJ_ROOM_ENCAP_L2_ETH;
if (ext_proto & EXTPROTO_VXLAN) {
struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr;
vxlan_hdr->vx_flags = VXLAN_FLAGS;
vxlan_hdr->vx_vni = bpf_htonl((VXLAN_VNI & VXLAN_VNI_MASK) << 8);
l2_hdr += sizeof(struct vxlanhdr);
}
if (bpf_skb_load_bytes(skb, 0, l2_hdr, ETH_HLEN))
return TC_ACT_SHOT;
break;
}
olen += l2_len;
/* add room between mac and network header */
if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
return TC_ACT_SHOT;
/* prepare new outer network header */
h_outer.ip = iph_inner;
h_outer.ip.tot_len = bpf_htons(olen +
bpf_ntohs(h_outer.ip.tot_len));
h_outer.ip.protocol = encap_proto;
set_ipv4_csum((void *)&h_outer.ip);
/* store new outer network header */
if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
BPF_F_INVALIDATE_HASH) < 0)
return TC_ACT_SHOT;
/* if changing outer proto type, update eth->h_proto */
if (encap_proto == IPPROTO_IPV6) {
struct ethhdr eth;
if (bpf_skb_load_bytes(skb, 0, ð, sizeof(eth)) < 0)
return TC_ACT_SHOT;
eth.h_proto = bpf_htons(ETH_P_IP);
if (bpf_skb_store_bytes(skb, 0, ð, sizeof(eth), 0) < 0)
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
__u16 l2_proto)
{
return __encap_ipv4(skb, encap_proto, l2_proto, 0);
}
static __always_inline int __encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
__u16 l2_proto, __u16 ext_proto)
{
__u16 udp_dst = UDP_PORT;
struct ipv6hdr iph_inner;
struct v6hdr h_outer;
struct tcphdr tcph;
int olen, l2_len;
__u8 *l2_hdr = NULL;
__u16 tot_len;
__u64 flags;
if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
sizeof(iph_inner)) < 0)
return TC_ACT_OK;
/* filter only packets we want */
if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(iph_inner),
&tcph, sizeof(tcph)) < 0)
return TC_ACT_OK;
if (tcph.dest != __bpf_constant_htons(cfg_port))
return TC_ACT_OK;
olen = sizeof(h_outer.ip);
l2_len = 0;
flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6;
switch (l2_proto) {
case ETH_P_MPLS_UC:
l2_len = sizeof(mpls_label);
udp_dst = MPLS_OVER_UDP_PORT;
break;
case ETH_P_TEB:
l2_len = ETH_HLEN;
if (ext_proto & EXTPROTO_VXLAN) {
udp_dst = VXLAN_UDP_PORT;
l2_len += sizeof(struct vxlanhdr);
} else
udp_dst = ETH_OVER_UDP_PORT;
break;
}
flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
switch (encap_proto) {
case IPPROTO_GRE:
flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE;
olen += sizeof(h_outer.l4hdr.gre);
h_outer.l4hdr.gre.protocol = bpf_htons(l2_proto);
h_outer.l4hdr.gre.flags = 0;
break;
case IPPROTO_UDP:
flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP;
olen += sizeof(h_outer.l4hdr.udp);
h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
h_outer.l4hdr.udp.dest = bpf_htons(udp_dst);
tot_len = bpf_ntohs(iph_inner.payload_len) + sizeof(iph_inner) +
sizeof(h_outer.l4hdr.udp) + l2_len;
h_outer.l4hdr.udp.check = 0;
h_outer.l4hdr.udp.len = bpf_htons(tot_len);
break;
case IPPROTO_IPV6:
break;
default:
return TC_ACT_OK;
}
/* add L2 encap (if specified) */
l2_hdr = (__u8 *)&h_outer + olen;
switch (l2_proto) {
case ETH_P_MPLS_UC:
*(__u32 *)l2_hdr = mpls_label;
break;
case ETH_P_TEB:
flags |= BPF_F_ADJ_ROOM_ENCAP_L2_ETH;
if (ext_proto & EXTPROTO_VXLAN) {
struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr;
vxlan_hdr->vx_flags = VXLAN_FLAGS;
vxlan_hdr->vx_vni = bpf_htonl((VXLAN_VNI & VXLAN_VNI_MASK) << 8);
l2_hdr += sizeof(struct vxlanhdr);
}
if (bpf_skb_load_bytes(skb, 0, l2_hdr, ETH_HLEN))
return TC_ACT_SHOT;
break;
}
olen += l2_len;
/* add room between mac and network header */
if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
return TC_ACT_SHOT;
/* prepare new outer network header */
h_outer.ip = iph_inner;
h_outer.ip.payload_len = bpf_htons(olen +
bpf_ntohs(h_outer.ip.payload_len));
h_outer.ip.nexthdr = encap_proto;
/* store new outer network header */
if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
BPF_F_INVALIDATE_HASH) < 0)
return TC_ACT_SHOT;
return TC_ACT_OK;
}
static int encap_ipv6_ipip6(struct __sk_buff *skb)
{
struct iphdr iph_inner;
struct v6hdr h_outer;
struct tcphdr tcph;
struct ethhdr eth;
__u64 flags;
int olen;
if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
sizeof(iph_inner)) < 0)
return TC_ACT_OK;
/* filter only packets we want */
if (bpf_skb_load_bytes(skb, ETH_HLEN + (iph_inner.ihl << 2),
&tcph, sizeof(tcph)) < 0)
return TC_ACT_OK;
if (tcph.dest != __bpf_constant_htons(cfg_port))
return TC_ACT_OK;
olen = sizeof(h_outer.ip);
flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6;
/* add room between mac and network header */
if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
return TC_ACT_SHOT;
/* prepare new outer network header */
memset(&h_outer.ip, 0, sizeof(h_outer.ip));
h_outer.ip.version = 6;
h_outer.ip.hop_limit = iph_inner.ttl;
h_outer.ip.saddr.s6_addr[1] = 0xfd;
h_outer.ip.saddr.s6_addr[15] = 1;
h_outer.ip.daddr.s6_addr[1] = 0xfd;
h_outer.ip.daddr.s6_addr[15] = 2;
h_outer.ip.payload_len = iph_inner.tot_len;
h_outer.ip.nexthdr = IPPROTO_IPIP;
/* store new outer network header */
if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
BPF_F_INVALIDATE_HASH) < 0)
return TC_ACT_SHOT;
/* update eth->h_proto */
if (bpf_skb_load_bytes(skb, 0, ð, sizeof(eth)) < 0)
return TC_ACT_SHOT;
eth.h_proto = bpf_htons(ETH_P_IPV6);
if (bpf_skb_store_bytes(skb, 0, ð, sizeof(eth), 0) < 0)
return TC_ACT_SHOT;
return TC_ACT_OK;
}
static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
__u16 l2_proto)
{
return __encap_ipv6(skb, encap_proto, l2_proto, 0);
}
SEC("encap_ipip_none")
int __encap_ipip_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
return encap_ipv4(skb, IPPROTO_IPIP, ETH_P_IP);
else
return TC_ACT_OK;
}
SEC("encap_gre_none")
int __encap_gre_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
return encap_ipv4(skb, IPPROTO_GRE, ETH_P_IP);
else
return TC_ACT_OK;
}
SEC("encap_gre_mpls")
int __encap_gre_mpls(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
return encap_ipv4(skb, IPPROTO_GRE, ETH_P_MPLS_UC);
else
return TC_ACT_OK;
}
SEC("encap_gre_eth")
int __encap_gre_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
return encap_ipv4(skb, IPPROTO_GRE, ETH_P_TEB);
else
return TC_ACT_OK;
}
SEC("encap_udp_none")
int __encap_udp_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
return encap_ipv4(skb, IPPROTO_UDP, ETH_P_IP);
else
return TC_ACT_OK;
}
SEC("encap_udp_mpls")
int __encap_udp_mpls(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
return encap_ipv4(skb, IPPROTO_UDP, ETH_P_MPLS_UC);
else
return TC_ACT_OK;
}
SEC("encap_udp_eth")
int __encap_udp_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
return encap_ipv4(skb, IPPROTO_UDP, ETH_P_TEB);
else
return TC_ACT_OK;
}
SEC("encap_vxlan_eth")
int __encap_vxlan_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
return __encap_ipv4(skb, IPPROTO_UDP,
ETH_P_TEB,
EXTPROTO_VXLAN);
else
return TC_ACT_OK;
}
SEC("encap_sit_none")
int __encap_sit_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
return encap_ipv4(skb, IPPROTO_IPV6, ETH_P_IP);
else
return TC_ACT_OK;
}
SEC("encap_ip6tnl_none")
int __encap_ip6tnl_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
return encap_ipv6(skb, IPPROTO_IPV6, ETH_P_IPV6);
else
return TC_ACT_OK;
}
SEC("encap_ipip6_none")
int __encap_ipip6_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
return encap_ipv6_ipip6(skb);
else
return TC_ACT_OK;
}
SEC("encap_ip6gre_none")
int __encap_ip6gre_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
return encap_ipv6(skb, IPPROTO_GRE, ETH_P_IPV6);
else
return TC_ACT_OK;
}
SEC("encap_ip6gre_mpls")
int __encap_ip6gre_mpls(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
return encap_ipv6(skb, IPPROTO_GRE, ETH_P_MPLS_UC);
else
return TC_ACT_OK;
}
SEC("encap_ip6gre_eth")
int __encap_ip6gre_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
return encap_ipv6(skb, IPPROTO_GRE, ETH_P_TEB);
else
return TC_ACT_OK;
}
SEC("encap_ip6udp_none")
int __encap_ip6udp_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
return encap_ipv6(skb, IPPROTO_UDP, ETH_P_IPV6);
else
return TC_ACT_OK;
}
SEC("encap_ip6udp_mpls")
int __encap_ip6udp_mpls(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
return encap_ipv6(skb, IPPROTO_UDP, ETH_P_MPLS_UC);
else
return TC_ACT_OK;
}
SEC("encap_ip6udp_eth")
int __encap_ip6udp_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
return encap_ipv6(skb, IPPROTO_UDP, ETH_P_TEB);
else
return TC_ACT_OK;
}
SEC("encap_ip6vxlan_eth")
int __encap_ip6vxlan_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
return __encap_ipv6(skb, IPPROTO_UDP,
ETH_P_TEB,
EXTPROTO_VXLAN);
else
return TC_ACT_OK;
}
static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
{
__u64 flags = BPF_F_ADJ_ROOM_FIXED_GSO;
struct ipv6_opt_hdr ip6_opt_hdr;
struct gre_hdr greh;
struct udphdr udph;
int olen = len;
switch (proto) {
case IPPROTO_IPIP:
flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV4;
break;
case IPPROTO_IPV6:
flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV6;
break;
case NEXTHDR_DEST:
if (bpf_skb_load_bytes(skb, off + len, &ip6_opt_hdr,
sizeof(ip6_opt_hdr)) < 0)
return TC_ACT_OK;
switch (ip6_opt_hdr.nexthdr) {
case IPPROTO_IPIP:
flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV4;
break;
case IPPROTO_IPV6:
flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV6;
break;
default:
return TC_ACT_OK;
}
break;
case IPPROTO_GRE:
olen += sizeof(struct gre_hdr);
if (bpf_skb_load_bytes(skb, off + len, &greh, sizeof(greh)) < 0)
return TC_ACT_OK;
switch (bpf_ntohs(greh.protocol)) {
case ETH_P_MPLS_UC:
olen += sizeof(mpls_label);
break;
case ETH_P_TEB:
olen += ETH_HLEN;
break;
}
break;
case IPPROTO_UDP:
olen += sizeof(struct udphdr);
if (bpf_skb_load_bytes(skb, off + len, &udph, sizeof(udph)) < 0)
return TC_ACT_OK;
switch (bpf_ntohs(udph.dest)) {
case MPLS_OVER_UDP_PORT:
olen += sizeof(mpls_label);
break;
case ETH_OVER_UDP_PORT:
olen += ETH_HLEN;
break;
case VXLAN_UDP_PORT:
olen += ETH_HLEN + sizeof(struct vxlanhdr);
break;
}
break;
default:
return TC_ACT_OK;
}
if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC, flags))
return TC_ACT_SHOT;
return TC_ACT_OK;
}
static int decap_ipv4(struct __sk_buff *skb)
{
struct iphdr iph_outer;
if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_outer,
sizeof(iph_outer)) < 0)
return TC_ACT_OK;
if (iph_outer.ihl != 5)
return TC_ACT_OK;
return decap_internal(skb, ETH_HLEN, sizeof(iph_outer),
iph_outer.protocol);
}
static int decap_ipv6(struct __sk_buff *skb)
{
struct ipv6hdr iph_outer;
if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_outer,
sizeof(iph_outer)) < 0)
return TC_ACT_OK;
return decap_internal(skb, ETH_HLEN, sizeof(iph_outer),
iph_outer.nexthdr);
}
SEC("decap")
int decap_f(struct __sk_buff *skb)
{
switch (skb->protocol) {
case __bpf_constant_htons(ETH_P_IP):
return decap_ipv4(skb);
case __bpf_constant_htons(ETH_P_IPV6):
return decap_ipv6(skb);
default:
/* does not match, ignore */
return TC_ACT_OK;
}
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tc_tunnel.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/masking.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("masking, test out of bounds 1")
__success __success_unpriv __retval(0)
__naked void test_out_of_bounds_1(void)
{
asm volatile (" \
w1 = 5; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 5 - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test out of bounds 2")
__success __success_unpriv __retval(0)
__naked void test_out_of_bounds_2(void)
{
asm volatile (" \
w1 = 1; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 1 - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test out of bounds 3")
__success __success_unpriv __retval(0)
__naked void test_out_of_bounds_3(void)
{
asm volatile (" \
w1 = 0xffffffff; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 0xffffffff - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test out of bounds 4")
__success __success_unpriv __retval(0)
__naked void test_out_of_bounds_4(void)
{
asm volatile (" \
w1 = 0xffffffff; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 1 - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test out of bounds 5")
__success __success_unpriv __retval(0)
__naked void test_out_of_bounds_5(void)
{
asm volatile (" \
w1 = -1; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 1 - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test out of bounds 6")
__success __success_unpriv __retval(0)
__naked void test_out_of_bounds_6(void)
{
asm volatile (" \
w1 = -1; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 0xffffffff - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test out of bounds 7")
__success __success_unpriv __retval(0)
__naked void test_out_of_bounds_7(void)
{
asm volatile (" \
r1 = 5; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 5 - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test out of bounds 8")
__success __success_unpriv __retval(0)
__naked void test_out_of_bounds_8(void)
{
asm volatile (" \
r1 = 1; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 1 - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test out of bounds 9")
__success __success_unpriv __retval(0)
__naked void test_out_of_bounds_9(void)
{
asm volatile (" \
r1 = 0xffffffff; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 0xffffffff - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test out of bounds 10")
__success __success_unpriv __retval(0)
__naked void test_out_of_bounds_10(void)
{
asm volatile (" \
r1 = 0xffffffff; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 1 - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test out of bounds 11")
__success __success_unpriv __retval(0)
__naked void test_out_of_bounds_11(void)
{
asm volatile (" \
r1 = -1; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 1 - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test out of bounds 12")
__success __success_unpriv __retval(0)
__naked void test_out_of_bounds_12(void)
{
asm volatile (" \
r1 = -1; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 0xffffffff - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test in bounds 1")
__success __success_unpriv __retval(4)
__naked void masking_test_in_bounds_1(void)
{
asm volatile (" \
w1 = 4; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 5 - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test in bounds 2")
__success __success_unpriv __retval(0)
__naked void masking_test_in_bounds_2(void)
{
asm volatile (" \
w1 = 0; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 0xffffffff - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test in bounds 3")
__success __success_unpriv __retval(0xfffffffe)
__naked void masking_test_in_bounds_3(void)
{
asm volatile (" \
w1 = 0xfffffffe; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 0xffffffff - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test in bounds 4")
__success __success_unpriv __retval(0xabcde)
__naked void masking_test_in_bounds_4(void)
{
asm volatile (" \
w1 = 0xabcde; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 0xabcdef - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test in bounds 5")
__success __success_unpriv __retval(0)
__naked void masking_test_in_bounds_5(void)
{
asm volatile (" \
w1 = 0; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 1 - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test in bounds 6")
__success __success_unpriv __retval(46)
__naked void masking_test_in_bounds_6(void)
{
asm volatile (" \
w1 = 46; \
w2 = %[__imm_0]; \
r2 -= r1; \
r2 |= r1; \
r2 = -r2; \
r2 s>>= 63; \
r1 &= r2; \
r0 = r1; \
exit; \
" :
: __imm_const(__imm_0, 47 - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test in bounds 7")
__success __success_unpriv __retval(46)
__naked void masking_test_in_bounds_7(void)
{
asm volatile (" \
r3 = -46; \
r3 *= -1; \
w2 = %[__imm_0]; \
r2 -= r3; \
r2 |= r3; \
r2 = -r2; \
r2 s>>= 63; \
r3 &= r2; \
r0 = r3; \
exit; \
" :
: __imm_const(__imm_0, 47 - 1)
: __clobber_all);
}
SEC("socket")
__description("masking, test in bounds 8")
__success __success_unpriv __retval(0)
__naked void masking_test_in_bounds_8(void)
{
asm volatile (" \
r3 = -47; \
r3 *= -1; \
w2 = %[__imm_0]; \
r2 -= r3; \
r2 |= r3; \
r2 = -r2; \
r2 s>>= 63; \
r3 &= r2; \
r0 = r3; \
exit; \
" :
: __imm_const(__imm_0, 47 - 1)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_masking.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct sample {
int pid;
int seq;
long value;
char comm[16];
};
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
} ringbuf SEC(".maps");
/* inputs */
int pid = 0;
long value = 0;
long flags = 0;
/* outputs */
long total = 0;
long discarded = 0;
long dropped = 0;
long avail_data = 0;
long ring_size = 0;
long cons_pos = 0;
long prod_pos = 0;
/* inner state */
long seq = 0;
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int test_ringbuf(void *ctx)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
struct sample *sample;
if (cur_pid != pid)
return 0;
sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
if (!sample) {
__sync_fetch_and_add(&dropped, 1);
return 0;
}
sample->pid = pid;
bpf_get_current_comm(sample->comm, sizeof(sample->comm));
sample->value = value;
sample->seq = seq++;
__sync_fetch_and_add(&total, 1);
if (sample->seq & 1) {
/* copy from reserved sample to a new one... */
bpf_ringbuf_output(&ringbuf, sample, sizeof(*sample), flags);
/* ...and then discard reserved sample */
bpf_ringbuf_discard(sample, flags);
__sync_fetch_and_add(&discarded, 1);
} else {
bpf_ringbuf_submit(sample, flags);
}
avail_data = bpf_ringbuf_query(&ringbuf, BPF_RB_AVAIL_DATA);
ring_size = bpf_ringbuf_query(&ringbuf, BPF_RB_RING_SIZE);
cons_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_CONS_POS);
prod_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_PROD_POS);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_ringbuf.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
#ifndef EBUSY
#define EBUSY 16
#endif
extern bool CONFIG_PREEMPT __kconfig __weak;
int nr_get_errs = 0;
int nr_del_errs = 0;
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} task_storage SEC(".maps");
SEC("lsm.s/socket_post_create")
int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
int protocol, int kern)
{
struct task_struct *task;
int ret, zero = 0;
int *value;
if (!CONFIG_PREEMPT)
return 0;
task = bpf_get_current_task_btf();
value = bpf_task_storage_get(&task_storage, task, &zero,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!value)
__sync_fetch_and_add(&nr_get_errs, 1);
ret = bpf_task_storage_delete(&task_storage,
bpf_get_current_task_btf());
if (ret == -EBUSY)
__sync_fetch_and_add(&nr_del_errs, 1);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_experimental.h"
#include "bpf_misc.h"
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
struct generic_map_value {
void *data;
};
char _license[] SEC("license") = "GPL";
const unsigned int data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
int err = 0;
int pid = 0;
#define DEFINE_ARRAY_WITH_KPTR(_size) \
struct bin_data_##_size { \
char data[_size - sizeof(void *)]; \
}; \
struct map_value_##_size { \
struct bin_data_##_size __kptr * data; \
/* To emit BTF info for bin_data_xx */ \
struct bin_data_##_size not_used; \
}; \
struct { \
__uint(type, BPF_MAP_TYPE_ARRAY); \
__type(key, int); \
__type(value, struct map_value_##_size); \
__uint(max_entries, 128); \
} array_##_size SEC(".maps");
static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int batch,
unsigned int idx)
{
struct generic_map_value *value;
unsigned int i, key;
void *old, *new;
for (i = 0; i < batch; i++) {
key = i;
value = bpf_map_lookup_elem(map, &key);
if (!value) {
err = 1;
return;
}
new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
if (!new) {
err = 2;
return;
}
old = bpf_kptr_xchg(&value->data, new);
if (old) {
bpf_obj_drop(old);
err = 3;
return;
}
}
for (i = 0; i < batch; i++) {
key = i;
value = bpf_map_lookup_elem(map, &key);
if (!value) {
err = 4;
return;
}
old = bpf_kptr_xchg(&value->data, NULL);
if (!old) {
err = 5;
return;
}
bpf_obj_drop(old);
}
}
#define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
batch_alloc_free((struct bpf_map *)(&array_##size), batch, idx)
DEFINE_ARRAY_WITH_KPTR(8);
DEFINE_ARRAY_WITH_KPTR(16);
DEFINE_ARRAY_WITH_KPTR(32);
DEFINE_ARRAY_WITH_KPTR(64);
DEFINE_ARRAY_WITH_KPTR(96);
DEFINE_ARRAY_WITH_KPTR(128);
DEFINE_ARRAY_WITH_KPTR(192);
DEFINE_ARRAY_WITH_KPTR(256);
DEFINE_ARRAY_WITH_KPTR(512);
DEFINE_ARRAY_WITH_KPTR(1024);
DEFINE_ARRAY_WITH_KPTR(2048);
DEFINE_ARRAY_WITH_KPTR(4096);
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int test_bpf_mem_alloc_free(void *ctx)
{
if ((u32)bpf_get_current_pid_tgid() != pid)
return 0;
/* Alloc 128 8-bytes objects in batch to trigger refilling,
* then free 128 8-bytes objects in batch to trigger freeing.
*/
CALL_BATCH_ALLOC_FREE(8, 128, 0);
CALL_BATCH_ALLOC_FREE(16, 128, 1);
CALL_BATCH_ALLOC_FREE(32, 128, 2);
CALL_BATCH_ALLOC_FREE(64, 128, 3);
CALL_BATCH_ALLOC_FREE(96, 128, 4);
CALL_BATCH_ALLOC_FREE(128, 128, 5);
CALL_BATCH_ALLOC_FREE(192, 128, 6);
CALL_BATCH_ALLOC_FREE(256, 128, 7);
CALL_BATCH_ALLOC_FREE(512, 64, 8);
CALL_BATCH_ALLOC_FREE(1024, 32, 9);
CALL_BATCH_ALLOC_FREE(2048, 16, 10);
CALL_BATCH_ALLOC_FREE(4096, 8, 11);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_bpf_ma.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/raw_stack.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("tc")
__description("raw_stack: no skb_load_bytes")
__failure __msg("invalid read from stack R6 off=-8 size=8")
__naked void stack_no_skb_load_bytes(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -8; \
r3 = r6; \
r4 = 8; \
/* Call to skb_load_bytes() omitted. */ \
r0 = *(u64*)(r6 + 0); \
exit; \
" ::: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, negative len")
__failure __msg("R4 min value is negative")
__naked void skb_load_bytes_negative_len(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -8; \
r3 = r6; \
r4 = -8; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
exit; \
" :
: __imm(bpf_skb_load_bytes)
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, negative len 2")
__failure __msg("R4 min value is negative")
__naked void load_bytes_negative_len_2(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -8; \
r3 = r6; \
r4 = %[__imm_0]; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
exit; \
" :
: __imm(bpf_skb_load_bytes),
__imm_const(__imm_0, ~0)
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, zero len")
__failure __msg("invalid zero-sized read")
__naked void skb_load_bytes_zero_len(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -8; \
r3 = r6; \
r4 = 0; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
exit; \
" :
: __imm(bpf_skb_load_bytes)
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, no init")
__success __retval(0)
__naked void skb_load_bytes_no_init(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -8; \
r3 = r6; \
r4 = 8; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
exit; \
" :
: __imm(bpf_skb_load_bytes)
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, init")
__success __retval(0)
__naked void stack_skb_load_bytes_init(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -8; \
r3 = 0xcafe; \
*(u64*)(r6 + 0) = r3; \
r3 = r6; \
r4 = 8; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
exit; \
" :
: __imm(bpf_skb_load_bytes)
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, spilled regs around bounds")
__success __retval(0)
__naked void bytes_spilled_regs_around_bounds(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -16; \
*(u64*)(r6 - 8) = r1; \
*(u64*)(r6 + 8) = r1; \
r3 = r6; \
r4 = 8; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 - 8); \
r2 = *(u64*)(r6 + 8); \
r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
r0 += r2; \
exit; \
" :
: __imm(bpf_skb_load_bytes),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
__imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, spilled regs corruption")
__failure __msg("R0 invalid mem access 'scalar'")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void load_bytes_spilled_regs_corruption(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -8; \
*(u64*)(r6 + 0) = r1; \
r3 = r6; \
r4 = 8; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
exit; \
" :
: __imm(bpf_skb_load_bytes),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, spilled regs corruption 2")
__failure __msg("R3 invalid mem access 'scalar'")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void bytes_spilled_regs_corruption_2(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -16; \
*(u64*)(r6 - 8) = r1; \
*(u64*)(r6 + 0) = r1; \
*(u64*)(r6 + 8) = r1; \
r3 = r6; \
r4 = 8; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 - 8); \
r2 = *(u64*)(r6 + 8); \
r3 = *(u64*)(r6 + 0); \
r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
r0 += r2; \
r3 = *(u32*)(r3 + %[__sk_buff_pkt_type]); \
r0 += r3; \
exit; \
" :
: __imm(bpf_skb_load_bytes),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
__imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
__imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, spilled regs + data")
__success __retval(0)
__naked void load_bytes_spilled_regs_data(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -16; \
*(u64*)(r6 - 8) = r1; \
*(u64*)(r6 + 0) = r1; \
*(u64*)(r6 + 8) = r1; \
r3 = r6; \
r4 = 8; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 - 8); \
r2 = *(u64*)(r6 + 8); \
r3 = *(u64*)(r6 + 0); \
r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
r0 += r2; \
r0 += r3; \
exit; \
" :
: __imm(bpf_skb_load_bytes),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
__imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, invalid access 1")
__failure __msg("invalid indirect access to stack R3 off=-513 size=8")
__naked void load_bytes_invalid_access_1(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -513; \
r3 = r6; \
r4 = 8; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
exit; \
" :
: __imm(bpf_skb_load_bytes)
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, invalid access 2")
__failure __msg("invalid indirect access to stack R3 off=-1 size=8")
__naked void load_bytes_invalid_access_2(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -1; \
r3 = r6; \
r4 = 8; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
exit; \
" :
: __imm(bpf_skb_load_bytes)
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, invalid access 3")
__failure __msg("R4 min value is negative")
__naked void load_bytes_invalid_access_3(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += 0xffffffff; \
r3 = r6; \
r4 = 0xffffffff; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
exit; \
" :
: __imm(bpf_skb_load_bytes)
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, invalid access 4")
__failure
__msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
__naked void load_bytes_invalid_access_4(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -1; \
r3 = r6; \
r4 = 0x7fffffff; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
exit; \
" :
: __imm(bpf_skb_load_bytes)
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, invalid access 5")
__failure
__msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
__naked void load_bytes_invalid_access_5(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -512; \
r3 = r6; \
r4 = 0x7fffffff; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
exit; \
" :
: __imm(bpf_skb_load_bytes)
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, invalid access 6")
__failure __msg("invalid zero-sized read")
__naked void load_bytes_invalid_access_6(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -512; \
r3 = r6; \
r4 = 0; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
exit; \
" :
: __imm(bpf_skb_load_bytes)
: __clobber_all);
}
SEC("tc")
__description("raw_stack: skb_load_bytes, large access")
__success __retval(0)
__naked void skb_load_bytes_large_access(void)
{
asm volatile (" \
r2 = 4; \
r6 = r10; \
r6 += -512; \
r3 = r6; \
r4 = 512; \
call %[bpf_skb_load_bytes]; \
r0 = *(u64*)(r6 + 0); \
exit; \
" :
: __imm(bpf_skb_load_bytes)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_raw_stack.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
// Copyright (c) 2019 Facebook
#define STROBE_MAX_INTS 2
#define STROBE_MAX_STRS 25
#define STROBE_MAX_MAPS 100
#define STROBE_MAX_MAP_ENTRIES 20
/* full unroll by llvm #undef NO_UNROLL */
#include "strobemeta.h"
| linux-master | tools/testing/selftests/bpf/progs/strobemeta.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define __read_mostly SEC(".data.read_mostly")
struct s {
int a;
long long b;
} __attribute__((packed));
/* .data section */
int in1 = -1;
long long in2 = -1;
/* .bss section */
char in3 = '\0';
long long in4 __attribute__((aligned(64))) = 0;
struct s in5 = {};
/* .rodata section */
const volatile struct {
const int in6;
} in = {};
/* .data section */
int out1 = -1;
long long out2 = -1;
/* .bss section */
char out3 = 0;
long long out4 = 0;
int out6 = 0;
extern bool CONFIG_BPF_SYSCALL __kconfig;
extern int LINUX_KERNEL_VERSION __kconfig;
bool bpf_syscall = 0;
int kern_ver = 0;
struct s out5 = {};
const volatile int in_dynarr_sz SEC(".rodata.dyn");
const volatile int in_dynarr[4] SEC(".rodata.dyn") = { -1, -2, -3, -4 };
int out_dynarr[4] SEC(".data.dyn") = { 1, 2, 3, 4 };
int read_mostly_var __read_mostly;
int out_mostly_var;
char huge_arr[16 * 1024 * 1024];
/* non-mmapable custom .data section */
struct my_value { int x, y, z; };
__hidden int zero_key SEC(".data.non_mmapable");
static struct my_value zero_value SEC(".data.non_mmapable");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct my_value);
__uint(max_entries, 1);
} my_map SEC(".maps");
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
int i;
out1 = in1;
out2 = in2;
out3 = in3;
out4 = in4;
out5 = in5;
out6 = in.in6;
bpf_syscall = CONFIG_BPF_SYSCALL;
kern_ver = LINUX_KERNEL_VERSION;
for (i = 0; i < in_dynarr_sz; i++)
out_dynarr[i] = in_dynarr[i];
out_mostly_var = read_mostly_var;
huge_arr[sizeof(huge_arr) - 1] = 123;
/* make sure zero_key and zero_value are not optimized out */
bpf_map_update_elem(&my_map, &zero_key, &zero_value, BPF_ANY);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_skeleton.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdbool.h>
#include <linux/bpf.h>
#include <linux/netdev.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_tracing.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/udp.h>
#include <asm-generic/errno-base.h>
#include "xdp_features.h"
#define ipv6_addr_equal(a, b) ((a).s6_addr32[0] == (b).s6_addr32[0] && \
(a).s6_addr32[1] == (b).s6_addr32[1] && \
(a).s6_addr32[2] == (b).s6_addr32[2] && \
(a).s6_addr32[3] == (b).s6_addr32[3])
struct net_device;
struct bpf_prog;
struct xdp_cpumap_stats {
unsigned int redirect;
unsigned int pass;
unsigned int drop;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 1);
} stats SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 1);
} dut_stats SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_CPUMAP);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_cpumap_val));
__uint(max_entries, 1);
} cpu_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_devmap_val));
__uint(max_entries, 1);
} dev_map SEC(".maps");
const volatile struct in6_addr tester_addr;
const volatile struct in6_addr dut_addr;
static __always_inline int
xdp_process_echo_packet(struct xdp_md *xdp, bool dut)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
struct ethhdr *eh = data;
struct tlv_hdr *tlv;
struct udphdr *uh;
__be16 port;
if (eh + 1 > (struct ethhdr *)data_end)
return -EINVAL;
if (eh->h_proto == bpf_htons(ETH_P_IP)) {
struct iphdr *ih = (struct iphdr *)(eh + 1);
__be32 saddr = dut ? tester_addr.s6_addr32[3]
: dut_addr.s6_addr32[3];
__be32 daddr = dut ? dut_addr.s6_addr32[3]
: tester_addr.s6_addr32[3];
ih = (struct iphdr *)(eh + 1);
if (ih + 1 > (struct iphdr *)data_end)
return -EINVAL;
if (saddr != ih->saddr)
return -EINVAL;
if (daddr != ih->daddr)
return -EINVAL;
if (ih->protocol != IPPROTO_UDP)
return -EINVAL;
uh = (struct udphdr *)(ih + 1);
} else if (eh->h_proto == bpf_htons(ETH_P_IPV6)) {
struct in6_addr saddr = dut ? tester_addr : dut_addr;
struct in6_addr daddr = dut ? dut_addr : tester_addr;
struct ipv6hdr *ih6 = (struct ipv6hdr *)(eh + 1);
if (ih6 + 1 > (struct ipv6hdr *)data_end)
return -EINVAL;
if (!ipv6_addr_equal(saddr, ih6->saddr))
return -EINVAL;
if (!ipv6_addr_equal(daddr, ih6->daddr))
return -EINVAL;
if (ih6->nexthdr != IPPROTO_UDP)
return -EINVAL;
uh = (struct udphdr *)(ih6 + 1);
} else {
return -EINVAL;
}
if (uh + 1 > (struct udphdr *)data_end)
return -EINVAL;
port = dut ? uh->dest : uh->source;
if (port != bpf_htons(DUT_ECHO_PORT))
return -EINVAL;
tlv = (struct tlv_hdr *)(uh + 1);
if (tlv + 1 > data_end)
return -EINVAL;
return bpf_htons(tlv->type) == CMD_ECHO ? 0 : -EINVAL;
}
static __always_inline int
xdp_update_stats(struct xdp_md *xdp, bool tx, bool dut)
{
__u32 *val, key = 0;
if (xdp_process_echo_packet(xdp, tx))
return -EINVAL;
if (dut)
val = bpf_map_lookup_elem(&dut_stats, &key);
else
val = bpf_map_lookup_elem(&stats, &key);
if (val)
__sync_add_and_fetch(val, 1);
return 0;
}
/* Tester */
SEC("xdp")
int xdp_tester_check_tx(struct xdp_md *xdp)
{
xdp_update_stats(xdp, true, false);
return XDP_PASS;
}
SEC("xdp")
int xdp_tester_check_rx(struct xdp_md *xdp)
{
xdp_update_stats(xdp, false, false);
return XDP_PASS;
}
/* DUT */
SEC("xdp")
int xdp_do_pass(struct xdp_md *xdp)
{
xdp_update_stats(xdp, true, true);
return XDP_PASS;
}
SEC("xdp")
int xdp_do_drop(struct xdp_md *xdp)
{
if (xdp_update_stats(xdp, true, true))
return XDP_PASS;
return XDP_DROP;
}
SEC("xdp")
int xdp_do_aborted(struct xdp_md *xdp)
{
if (xdp_process_echo_packet(xdp, true))
return XDP_PASS;
return XDP_ABORTED;
}
SEC("xdp")
int xdp_do_tx(struct xdp_md *xdp)
{
void *data = (void *)(long)xdp->data;
struct ethhdr *eh = data;
__u8 tmp_mac[ETH_ALEN];
if (xdp_update_stats(xdp, true, true))
return XDP_PASS;
__builtin_memcpy(tmp_mac, eh->h_source, ETH_ALEN);
__builtin_memcpy(eh->h_source, eh->h_dest, ETH_ALEN);
__builtin_memcpy(eh->h_dest, tmp_mac, ETH_ALEN);
return XDP_TX;
}
SEC("xdp")
int xdp_do_redirect(struct xdp_md *xdp)
{
if (xdp_process_echo_packet(xdp, true))
return XDP_PASS;
return bpf_redirect_map(&cpu_map, 0, 0);
}
SEC("tp_btf/xdp_exception")
int BPF_PROG(xdp_exception, const struct net_device *dev,
const struct bpf_prog *xdp, __u32 act)
{
__u32 *val, key = 0;
val = bpf_map_lookup_elem(&dut_stats, &key);
if (val)
__sync_add_and_fetch(val, 1);
return 0;
}
SEC("tp_btf/xdp_cpumap_kthread")
int BPF_PROG(tp_xdp_cpumap_kthread, int map_id, unsigned int processed,
unsigned int drops, int sched, struct xdp_cpumap_stats *xdp_stats)
{
__u32 *val, key = 0;
val = bpf_map_lookup_elem(&dut_stats, &key);
if (val)
__sync_add_and_fetch(val, 1);
return 0;
}
SEC("xdp/cpumap")
int xdp_do_redirect_cpumap(struct xdp_md *xdp)
{
void *data = (void *)(long)xdp->data;
struct ethhdr *eh = data;
__u8 tmp_mac[ETH_ALEN];
if (xdp_process_echo_packet(xdp, true))
return XDP_PASS;
__builtin_memcpy(tmp_mac, eh->h_source, ETH_ALEN);
__builtin_memcpy(eh->h_source, eh->h_dest, ETH_ALEN);
__builtin_memcpy(eh->h_dest, tmp_mac, ETH_ALEN);
return bpf_redirect_map(&dev_map, 0, 0);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xdp_features.c |
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
const char LICENSE[] SEC("license") = "GPL";
__attribute__((unused)) __noinline int unused1(int x)
{
return x + 1;
}
static __attribute__((unused)) __noinline int unused2(int x)
{
return x + 2;
}
SEC("raw_tp/sys_enter")
int main_prog(void *ctx)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_subprogs_unused.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "../bpf_testmod/bpf_testmod.h"
__u32 raw_tp_read_sz = 0;
SEC("raw_tp/bpf_testmod_test_read")
int BPF_PROG(handle_raw_tp,
struct task_struct *task, struct bpf_testmod_test_read_ctx *read_ctx)
{
raw_tp_read_sz = BPF_CORE_READ(read_ctx, len);
return 0;
}
__u32 raw_tp_bare_write_sz = 0;
SEC("raw_tp/bpf_testmod_test_write_bare")
int BPF_PROG(handle_raw_tp_bare,
struct task_struct *task, struct bpf_testmod_test_write_ctx *write_ctx)
{
raw_tp_bare_write_sz = BPF_CORE_READ(write_ctx, len);
return 0;
}
int raw_tp_writable_bare_in_val = 0;
int raw_tp_writable_bare_early_ret = 0;
int raw_tp_writable_bare_out_val = 0;
SEC("raw_tp.w/bpf_testmod_test_writable_bare")
int BPF_PROG(handle_raw_tp_writable_bare,
struct bpf_testmod_test_writable_ctx *writable)
{
raw_tp_writable_bare_in_val = writable->val;
writable->early_ret = raw_tp_writable_bare_early_ret;
writable->val = raw_tp_writable_bare_out_val;
return 0;
}
__u32 tp_btf_read_sz = 0;
SEC("tp_btf/bpf_testmod_test_read")
int BPF_PROG(handle_tp_btf,
struct task_struct *task, struct bpf_testmod_test_read_ctx *read_ctx)
{
tp_btf_read_sz = read_ctx->len;
return 0;
}
__u32 fentry_read_sz = 0;
SEC("fentry/bpf_testmod_test_read")
int BPF_PROG(handle_fentry,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
{
fentry_read_sz = len;
return 0;
}
__u32 fentry_manual_read_sz = 0;
SEC("fentry")
int BPF_PROG(handle_fentry_manual,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
{
fentry_manual_read_sz = len;
return 0;
}
__u32 fexit_read_sz = 0;
int fexit_ret = 0;
SEC("fexit/bpf_testmod_test_read")
int BPF_PROG(handle_fexit,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len,
int ret)
{
fexit_read_sz = len;
fexit_ret = ret;
return 0;
}
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
{
long buf = 0;
bpf_probe_read_kernel(&buf, 8, ret);
bpf_probe_read_kernel(&buf, 8, (char *)ret + 256);
*(volatile long long *)ret;
*(volatile int *)&ret->f_mode;
return 0;
}
__u32 fmod_ret_read_sz = 0;
SEC("fmod_ret/bpf_testmod_test_read")
int BPF_PROG(handle_fmod_ret,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
{
fmod_ret_read_sz = len;
return 0; /* don't override the exit code */
}
SEC("kprobe.multi/bpf_testmod_test_read")
int BPF_PROG(kprobe_multi)
{
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_module_attach.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. */
#define KBUILD_MODNAME "foo"
#include <stddef.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/icmp.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "xdping.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 256);
__type(key, __u32);
__type(value, struct pinginfo);
} ping_map SEC(".maps");
static __always_inline void swap_src_dst_mac(void *data)
{
unsigned short *p = data;
unsigned short dst[3];
dst[0] = p[0];
dst[1] = p[1];
dst[2] = p[2];
p[0] = p[3];
p[1] = p[4];
p[2] = p[5];
p[3] = dst[0];
p[4] = dst[1];
p[5] = dst[2];
}
static __always_inline __u16 csum_fold_helper(__wsum sum)
{
sum = (sum & 0xffff) + (sum >> 16);
return ~((sum & 0xffff) + (sum >> 16));
}
static __always_inline __u16 ipv4_csum(void *data_start, int data_size)
{
__wsum sum;
sum = bpf_csum_diff(0, 0, data_start, data_size, 0);
return csum_fold_helper(sum);
}
#define ICMP_ECHO_LEN 64
static __always_inline int icmp_check(struct xdp_md *ctx, int type)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct ethhdr *eth = data;
struct icmphdr *icmph;
struct iphdr *iph;
if (data + sizeof(*eth) + sizeof(*iph) + ICMP_ECHO_LEN > data_end)
return XDP_PASS;
if (eth->h_proto != bpf_htons(ETH_P_IP))
return XDP_PASS;
iph = data + sizeof(*eth);
if (iph->protocol != IPPROTO_ICMP)
return XDP_PASS;
if (bpf_ntohs(iph->tot_len) - sizeof(*iph) != ICMP_ECHO_LEN)
return XDP_PASS;
icmph = data + sizeof(*eth) + sizeof(*iph);
if (icmph->type != type)
return XDP_PASS;
return XDP_TX;
}
SEC("xdp")
int xdping_client(struct xdp_md *ctx)
{
void *data = (void *)(long)ctx->data;
struct pinginfo *pinginfo = NULL;
struct ethhdr *eth = data;
struct icmphdr *icmph;
struct iphdr *iph;
__u64 recvtime;
__be32 raddr;
__be16 seq;
int ret;
__u8 i;
ret = icmp_check(ctx, ICMP_ECHOREPLY);
if (ret != XDP_TX)
return ret;
iph = data + sizeof(*eth);
icmph = data + sizeof(*eth) + sizeof(*iph);
raddr = iph->saddr;
/* Record time reply received. */
recvtime = bpf_ktime_get_ns();
pinginfo = bpf_map_lookup_elem(&ping_map, &raddr);
if (!pinginfo || pinginfo->seq != icmph->un.echo.sequence)
return XDP_PASS;
if (pinginfo->start) {
#pragma clang loop unroll(full)
for (i = 0; i < XDPING_MAX_COUNT; i++) {
if (pinginfo->times[i] == 0)
break;
}
/* verifier is fussy here... */
if (i < XDPING_MAX_COUNT) {
pinginfo->times[i] = recvtime -
pinginfo->start;
pinginfo->start = 0;
i++;
}
/* No more space for values? */
if (i == pinginfo->count || i == XDPING_MAX_COUNT)
return XDP_PASS;
}
/* Now convert reply back into echo request. */
swap_src_dst_mac(data);
iph->saddr = iph->daddr;
iph->daddr = raddr;
icmph->type = ICMP_ECHO;
seq = bpf_htons(bpf_ntohs(icmph->un.echo.sequence) + 1);
icmph->un.echo.sequence = seq;
icmph->checksum = 0;
icmph->checksum = ipv4_csum(icmph, ICMP_ECHO_LEN);
pinginfo->seq = seq;
pinginfo->start = bpf_ktime_get_ns();
return XDP_TX;
}
SEC("xdp")
int xdping_server(struct xdp_md *ctx)
{
void *data = (void *)(long)ctx->data;
struct ethhdr *eth = data;
struct icmphdr *icmph;
struct iphdr *iph;
__be32 raddr;
int ret;
ret = icmp_check(ctx, ICMP_ECHO);
if (ret != XDP_TX)
return ret;
iph = data + sizeof(*eth);
icmph = data + sizeof(*eth) + sizeof(*iph);
raddr = iph->saddr;
/* Now convert request into echo reply. */
swap_src_dst_mac(data);
iph->saddr = iph->daddr;
iph->daddr = raddr;
icmph->type = ICMP_ECHOREPLY;
icmph->checksum = 0;
icmph->checksum = ipv4_csum(icmph, ICMP_ECHO_LEN);
return XDP_TX;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xdping_kern.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* Permit pretty deep stack traces */
#define MAX_STACK_RAWTP 100
struct stack_trace_t {
int pid;
int kern_stack_size;
int user_stack_size;
int user_stack_buildid_size;
__u64 kern_stack[MAX_STACK_RAWTP];
__u64 user_stack[MAX_STACK_RAWTP];
struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
};
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 2);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(__u32));
} perfmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct stack_trace_t);
} stackdata_map SEC(".maps");
/* Allocate per-cpu space twice the needed. For the code below
* usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
* if (usize < 0)
* return 0;
* ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
*
* If we have value_size = MAX_STACK_RAWTP * sizeof(__u64),
* verifier will complain that access "raw_data + usize"
* with size "max_len - usize" may be out of bound.
* The maximum "raw_data + usize" is "raw_data + max_len"
* and the maximum "max_len - usize" is "max_len", verifier
* concludes that the maximum buffer access range is
* "raw_data[0...max_len * 2 - 1]" and hence reject the program.
*
* Doubling the to-be-used max buffer size can fix this verifier
* issue and avoid complicated C programming massaging.
* This is an acceptable workaround since there is one entry here.
*/
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64[2 * MAX_STACK_RAWTP]);
} rawdata_map SEC(".maps");
SEC("raw_tracepoint/sys_enter")
int bpf_prog1(void *ctx)
{
int max_len, max_buildid_len, total_size;
struct stack_trace_t *data;
long usize, ksize;
void *raw_data;
__u32 key = 0;
data = bpf_map_lookup_elem(&stackdata_map, &key);
if (!data)
return 0;
max_len = MAX_STACK_RAWTP * sizeof(__u64);
max_buildid_len = MAX_STACK_RAWTP * sizeof(struct bpf_stack_build_id);
data->pid = bpf_get_current_pid_tgid();
data->kern_stack_size = bpf_get_stack(ctx, data->kern_stack,
max_len, 0);
data->user_stack_size = bpf_get_stack(ctx, data->user_stack, max_len,
BPF_F_USER_STACK);
data->user_stack_buildid_size = bpf_get_stack(
ctx, data->user_stack_buildid, max_buildid_len,
BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
bpf_perf_event_output(ctx, &perfmap, 0, data, sizeof(*data));
/* write both kernel and user stacks to the same buffer */
raw_data = bpf_map_lookup_elem(&rawdata_map, &key);
if (!raw_data)
return 0;
usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
if (usize < 0)
return 0;
ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
if (ksize < 0)
return 0;
total_size = usize + ksize;
if (total_size > 0 && total_size <= max_len)
bpf_perf_event_output(ctx, &perfmap, 0, raw_data, total_size);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c |
#include "core_reloc_types.h"
void f(struct core_reloc_flavors__err_wrong_name x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_flavors__err_wrong_name.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
SEC("raw_tracepoint/consume_skb")
int while_true(volatile struct pt_regs* ctx)
{
int i = 0;
while (true) {
if (PT_REGS_RC(ctx) & 1)
i += 3;
else
i += 7;
if (i > 40)
break;
}
return i;
}
| linux-master | tools/testing/selftests/bpf/progs/loop2.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define BPF_RETVAL_HOOK(name, section, ctx, expected_err) \
__attribute__((__section__("?" section))) \
int name(struct ctx *_ctx) \
{ \
bpf_set_retval(bpf_get_retval()); \
return 1; \
}
#include "cgroup_getset_retval_hooks.h"
#undef BPF_RETVAL_HOOK
| linux-master | tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
*
* Author: Roberto Sassu <[email protected]>
*/
#include "vmlinux.h"
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
/* From include/linux/mm.h. */
#define FMODE_WRITE 0x2
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} data_input SEC(".maps");
char _license[] SEC("license") = "GPL";
SEC("lsm/bpf_map")
int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode)
{
if (map != (struct bpf_map *)&data_input)
return 0;
if (fmode & FMODE_WRITE)
return -EACCES;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c |
#include "core_reloc_types.h"
void f(struct core_reloc_nesting x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <linux/bpf.h>
#define BPF_NO_GLOBAL_DATA
#include <bpf/bpf_helpers.h>
char LICENSE[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, int);
__uint(max_entries, 1);
} my_pid_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, int);
__uint(max_entries, 1);
} res_map SEC(".maps");
volatile int my_pid_var = 0;
volatile int res_var = 0;
SEC("tp/raw_syscalls/sys_enter")
int handle_legacy(void *ctx)
{
int zero = 0, *my_pid, cur_pid, *my_res;
my_pid = bpf_map_lookup_elem(&my_pid_map, &zero);
if (!my_pid)
return 1;
cur_pid = bpf_get_current_pid_tgid() >> 32;
if (cur_pid != *my_pid)
return 1;
my_res = bpf_map_lookup_elem(&res_map, &zero);
if (!my_res)
return 1;
if (*my_res == 0)
/* use bpf_printk() in combination with BPF_NO_GLOBAL_DATA to
* force .rodata.str1.1 section that previously caused
* problems on old kernels due to libbpf always tried to
* create a global data map for it
*/
bpf_printk("Legacy-case bpf_printk test, pid %d\n", cur_pid);
*my_res = 1;
return *my_res;
}
SEC("tp/raw_syscalls/sys_enter")
int handle_modern(void *ctx)
{
int cur_pid;
cur_pid = bpf_get_current_pid_tgid() >> 32;
if (cur_pid != my_pid_var)
return 1;
if (res_var == 0)
/* we need bpf_printk() to validate libbpf logic around unused
* global maps and legacy kernels; see comment in handle_legacy()
*/
bpf_printk("Modern-case bpf_printk test, pid %d\n", cur_pid);
res_var = 1;
return res_var;
}
| linux-master | tools/testing/selftests/bpf/progs/test_legacy_printk.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
extern bool CONFIG_IPV6_SUBTREES __kconfig __weak;
SEC("iter/ipv6_route")
int dump_ipv6_route(struct bpf_iter__ipv6_route *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct fib6_info *rt = ctx->rt;
const struct net_device *dev;
struct fib6_nh *fib6_nh;
unsigned int flags;
struct nexthop *nh;
if (rt == (void *)0)
return 0;
fib6_nh = &rt->fib6_nh[0];
flags = rt->fib6_flags;
/* FIXME: nexthop_is_multipath is not handled here. */
nh = rt->nh;
if (rt->nh)
fib6_nh = &nh->nh_info->fib6_nh;
BPF_SEQ_PRINTF(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
if (CONFIG_IPV6_SUBTREES)
BPF_SEQ_PRINTF(seq, "%pi6 %02x ", &rt->fib6_src.addr,
rt->fib6_src.plen);
else
BPF_SEQ_PRINTF(seq, "00000000000000000000000000000000 00 ");
if (fib6_nh->fib_nh_gw_family) {
flags |= RTF_GATEWAY;
BPF_SEQ_PRINTF(seq, "%pi6 ", &fib6_nh->fib_nh_gw6);
} else {
BPF_SEQ_PRINTF(seq, "00000000000000000000000000000000 ");
}
dev = fib6_nh->fib_nh_dev;
if (dev)
BPF_SEQ_PRINTF(seq, "%08x %08x %08x %08x %8s\n", rt->fib6_metric,
rt->fib6_ref.refs.counter, 0, flags, dev->name);
else
BPF_SEQ_PRINTF(seq, "%08x %08x %08x %08x\n", rt->fib6_metric,
rt->fib6_ref.refs.counter, 0, flags);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c |
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
#define __round_mask(x, y) ((__typeof__(x))((y) - 1))
#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1)
#define ctx_ptr(ctx, mem) (void *)(unsigned long)ctx->mem
SEC("t")
int ing_cls(struct __sk_buff *ctx)
{
__u8 *data, *data_meta, *data_end;
__u32 diff = 0;
data_meta = ctx_ptr(ctx, data_meta);
data_end = ctx_ptr(ctx, data_end);
data = ctx_ptr(ctx, data);
if (data + ETH_ALEN > data_end ||
data_meta + round_up(ETH_ALEN, 4) > data)
return TC_ACT_SHOT;
diff |= ((__u32 *)data_meta)[0] ^ ((__u32 *)data)[0];
diff |= ((__u16 *)data_meta)[2] ^ ((__u16 *)data)[2];
return diff ? TC_ACT_SHOT : TC_ACT_OK;
}
SEC("x")
int ing_xdp(struct xdp_md *ctx)
{
__u8 *data, *data_meta, *data_end;
int ret;
ret = bpf_xdp_adjust_meta(ctx, -round_up(ETH_ALEN, 4));
if (ret < 0)
return XDP_DROP;
data_meta = ctx_ptr(ctx, data_meta);
data_end = ctx_ptr(ctx, data_end);
data = ctx_ptr(ctx, data);
if (data + ETH_ALEN > data_end ||
data_meta + round_up(ETH_ALEN, 4) > data)
return XDP_DROP;
__builtin_memcpy(data_meta, data, ETH_ALEN);
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_meta.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
int ifindex;
int ret;
SEC("lwt_xmit")
int redirect_ingress(struct __sk_buff *skb)
{
ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS);
return 0;
}
SEC("lwt_xmit")
int redirect_egress(struct __sk_buff *skb)
{
ret = bpf_clone_redirect(skb, ifindex, 0);
return 0;
}
SEC("tc")
int tc_redirect_ingress(struct __sk_buff *skb)
{
ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS);
return 0;
}
SEC("tc")
int tc_redirect_egress(struct __sk_buff *skb)
{
ret = bpf_clone_redirect(skb, ifindex, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/empty_skb.c |
// SPDX-License-Identifier: GPL-2.0
#include <stddef.h>
#include <string.h>
#include <netinet/in.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/tcp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_tcpnotify.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 4);
__type(key, __u32);
__type(value, struct tcpnotify_globals);
} global_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 2);
__type(key, int);
__type(value, __u32);
} perf_event_map SEC(".maps");
SEC("sockops")
int bpf_testcb(struct bpf_sock_ops *skops)
{
int rv = -1;
int op;
op = (int) skops->op;
if (bpf_ntohl(skops->remote_port) != TESTPORT) {
skops->reply = -1;
return 0;
}
switch (op) {
case BPF_SOCK_OPS_TIMEOUT_INIT:
case BPF_SOCK_OPS_RWND_INIT:
case BPF_SOCK_OPS_NEEDS_ECN:
case BPF_SOCK_OPS_BASE_RTT:
case BPF_SOCK_OPS_RTO_CB:
rv = 1;
break;
case BPF_SOCK_OPS_TCP_CONNECT_CB:
case BPF_SOCK_OPS_TCP_LISTEN_CB:
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
bpf_sock_ops_cb_flags_set(skops, (BPF_SOCK_OPS_RETRANS_CB_FLAG|
BPF_SOCK_OPS_RTO_CB_FLAG));
rv = 1;
break;
case BPF_SOCK_OPS_RETRANS_CB: {
__u32 key = 0;
struct tcpnotify_globals g, *gp;
struct tcp_notifier msg = {
.type = 0xde,
.subtype = 0xad,
.source = 0xbe,
.hash = 0xef,
};
rv = 1;
/* Update results */
gp = bpf_map_lookup_elem(&global_map, &key);
if (!gp)
break;
g = *gp;
g.total_retrans = skops->total_retrans;
g.ncalls++;
bpf_map_update_elem(&global_map, &key, &g,
BPF_ANY);
bpf_perf_event_output(skops, &perf_event_map,
BPF_F_CURRENT_CPU,
&msg, sizeof(msg));
}
break;
default:
rv = -1;
}
skops->reply = rv;
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c |
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
__type(key, int);
__type(value, int);
} sock_map_rx SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
__type(key, int);
__type(value, int);
} sock_map_tx SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
__type(key, int);
__type(value, int);
} sock_map_msg SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 20);
__type(key, int);
__type(value, int);
} sock_map_break SEC(".maps");
SEC("sk_skb2")
int bpf_prog2(struct __sk_buff *skb)
{
void *data_end = (void *)(long) skb->data_end;
void *data = (void *)(long) skb->data;
__u32 lport = skb->local_port;
__u32 rport = skb->remote_port;
__u8 *d = data;
__u8 sk, map;
__sink(lport);
__sink(rport);
if (data + 8 > data_end)
return SK_DROP;
map = d[0];
sk = d[1];
d[0] = 0xd;
d[1] = 0xe;
d[2] = 0xa;
d[3] = 0xd;
d[4] = 0xb;
d[5] = 0xe;
d[6] = 0xe;
d[7] = 0xf;
if (!map)
return bpf_sk_redirect_map(skb, &sock_map_rx, sk, 0);
return bpf_sk_redirect_map(skb, &sock_map_tx, sk, 0);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c |
#include "core_reloc_types.h"
void f(struct core_reloc_ints___bool x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_ints___bool.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
__noinline int foo(unsigned int *v)
{
if (v)
*v = bpf_get_prandom_u32();
return 0;
}
SEC("cgroup_skb/ingress")
__failure __msg("At program exit the register R0 has value")
int global_func15(struct __sk_buff *skb)
{
unsigned int v = 1;
foo(&v);
return v;
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func15.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
static long sock_i_ino(const struct sock *sk)
{
const struct socket *sk_socket = sk->sk_socket;
const struct inode *inode;
unsigned long ino;
if (!sk_socket)
return 0;
inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
return ino;
}
SEC("iter/udp")
int dump_udp4(struct bpf_iter__udp *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct udp_sock *udp_sk = ctx->udp_sk;
struct inet_sock *inet;
__u16 srcp, destp;
__be32 dest, src;
__u32 seq_num;
int rqueue;
if (udp_sk == (void *)0)
return 0;
seq_num = ctx->meta->seq_num;
if (seq_num == 0)
BPF_SEQ_PRINTF(seq,
" sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode ref pointer drops\n");
/* filter out udp6 sockets */
inet = &udp_sk->inet;
if (inet->sk.sk_family == AF_INET6)
return 0;
inet = &udp_sk->inet;
dest = inet->inet_daddr;
src = inet->inet_rcv_saddr;
srcp = bpf_ntohs(inet->inet_sport);
destp = bpf_ntohs(inet->inet_dport);
rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit;
BPF_SEQ_PRINTF(seq, "%5d: %08X:%04X %08X:%04X ",
ctx->bucket, src, srcp, dest, destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n",
inet->sk.sk_state,
inet->sk.sk_wmem_alloc.refs.counter - 1,
rqueue,
0, 0L, 0, ctx->uid, 0,
sock_i_ino(&inet->sk),
inet->sk.sk_refcnt.refs.counter, udp_sk,
inet->sk.sk_drops.counter);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_udp4.c |
// SPDX-License-Identifier: GPL-2.0
#include <stddef.h>
#include <string.h>
#include <netinet/in.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/tcp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "bpf_tcp_helpers.h"
#include "test_tcpbpf.h"
struct tcpbpf_globals global = {};
/**
* SOL_TCP is defined in <netinet/tcp.h> while
* TCP_SAVED_SYN is defined in already included <linux/tcp.h>
*/
#ifndef SOL_TCP
#define SOL_TCP 6
#endif
static __always_inline int get_tp_window_clamp(struct bpf_sock_ops *skops)
{
struct bpf_sock *sk;
struct tcp_sock *tp;
sk = skops->sk;
if (!sk)
return -1;
tp = bpf_skc_to_tcp_sock(sk);
if (!tp)
return -1;
return tp->window_clamp;
}
SEC("sockops")
int bpf_testcb(struct bpf_sock_ops *skops)
{
char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)];
struct bpf_sock_ops *reuse = skops;
struct tcphdr *thdr;
int window_clamp = 9216;
int save_syn = 1;
int rv = -1;
int v = 0;
int op;
/* Test reading fields in bpf_sock_ops using single register */
asm volatile (
"%[reuse] = *(u32 *)(%[reuse] +96)"
: [reuse] "+r"(reuse)
:);
asm volatile (
"%[op] = *(u32 *)(%[skops] +96)"
: [op] "+r"(op)
: [skops] "r"(skops)
:);
asm volatile (
"r9 = %[skops];\n"
"r8 = *(u32 *)(r9 +164);\n"
"*(u32 *)(r9 +164) = r8;\n"
:: [skops] "r"(skops)
: "r9", "r8");
asm volatile (
"r1 = %[skops];\n"
"r1 = *(u64 *)(r1 +184);\n"
"if r1 == 0 goto +1;\n"
"r1 = *(u32 *)(r1 +4);\n"
:: [skops] "r"(skops):"r1");
asm volatile (
"r9 = %[skops];\n"
"r9 = *(u64 *)(r9 +184);\n"
"if r9 == 0 goto +1;\n"
"r9 = *(u32 *)(r9 +4);\n"
:: [skops] "r"(skops):"r9");
asm volatile (
"r1 = %[skops];\n"
"r2 = *(u64 *)(r1 +184);\n"
"if r2 == 0 goto +1;\n"
"r2 = *(u32 *)(r2 +4);\n"
:: [skops] "r"(skops):"r1", "r2");
op = (int) skops->op;
global.event_map |= (1 << op);
switch (op) {
case BPF_SOCK_OPS_TCP_CONNECT_CB:
rv = bpf_setsockopt(skops, SOL_TCP, TCP_WINDOW_CLAMP,
&window_clamp, sizeof(window_clamp));
global.window_clamp_client = get_tp_window_clamp(skops);
break;
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
/* Test failure to set largest cb flag (assumes not defined) */
global.bad_cb_test_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);
/* Set callback */
global.good_cb_test_rv = bpf_sock_ops_cb_flags_set(skops,
BPF_SOCK_OPS_STATE_CB_FLAG);
break;
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
skops->sk_txhash = 0x12345f;
v = 0xff;
rv = bpf_setsockopt(skops, SOL_IPV6, IPV6_TCLASS, &v,
sizeof(v));
if (skops->family == AF_INET6) {
v = bpf_getsockopt(skops, IPPROTO_TCP, TCP_SAVED_SYN,
header, (sizeof(struct ipv6hdr) +
sizeof(struct tcphdr)));
if (!v) {
int offset = sizeof(struct ipv6hdr);
thdr = (struct tcphdr *)(header + offset);
v = thdr->syn;
global.tcp_saved_syn = v;
}
}
rv = bpf_setsockopt(skops, SOL_TCP, TCP_WINDOW_CLAMP,
&window_clamp, sizeof(window_clamp));
global.window_clamp_server = get_tp_window_clamp(skops);
break;
case BPF_SOCK_OPS_RTO_CB:
break;
case BPF_SOCK_OPS_RETRANS_CB:
break;
case BPF_SOCK_OPS_STATE_CB:
if (skops->args[1] == BPF_TCP_CLOSE) {
if (skops->args[0] == BPF_TCP_LISTEN) {
global.num_listen++;
} else {
global.total_retrans = skops->total_retrans;
global.data_segs_in = skops->data_segs_in;
global.data_segs_out = skops->data_segs_out;
global.bytes_received = skops->bytes_received;
global.bytes_acked = skops->bytes_acked;
}
global.num_close_events++;
}
break;
case BPF_SOCK_OPS_TCP_LISTEN_CB:
bpf_sock_ops_cb_flags_set(skops, BPF_SOCK_OPS_STATE_CB_FLAG);
v = bpf_setsockopt(skops, IPPROTO_TCP, TCP_SAVE_SYN,
&save_syn, sizeof(save_syn));
/* Update global map w/ result of setsock opt */
global.tcp_save_syn = v;
break;
default:
rv = -1;
}
skops->reply = rv;
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include "xdp_metadata.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
struct {
__uint(type, BPF_MAP_TYPE_XSKMAP);
__uint(max_entries, 256);
__type(key, __u32);
__type(value, __u32);
} xsk SEC(".maps");
__u64 pkts_skip = 0;
__u64 pkts_fail = 0;
__u64 pkts_redir = 0;
extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx,
__u64 *timestamp) __ksym;
extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
enum xdp_rss_hash_type *rss_type) __ksym;
SEC("xdp")
int rx(struct xdp_md *ctx)
{
void *data, *data_meta, *data_end;
struct ipv6hdr *ip6h = NULL;
struct ethhdr *eth = NULL;
struct udphdr *udp = NULL;
struct iphdr *iph = NULL;
struct xdp_meta *meta;
int err;
data = (void *)(long)ctx->data;
data_end = (void *)(long)ctx->data_end;
eth = data;
if (eth + 1 < data_end) {
if (eth->h_proto == bpf_htons(ETH_P_IP)) {
iph = (void *)(eth + 1);
if (iph + 1 < data_end && iph->protocol == IPPROTO_UDP)
udp = (void *)(iph + 1);
}
if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
ip6h = (void *)(eth + 1);
if (ip6h + 1 < data_end && ip6h->nexthdr == IPPROTO_UDP)
udp = (void *)(ip6h + 1);
}
if (udp && udp + 1 > data_end)
udp = NULL;
}
if (!udp) {
__sync_add_and_fetch(&pkts_skip, 1);
return XDP_PASS;
}
/* Forwarding UDP:9091 to AF_XDP */
if (udp->dest != bpf_htons(9091)) {
__sync_add_and_fetch(&pkts_skip, 1);
return XDP_PASS;
}
err = bpf_xdp_adjust_meta(ctx, -(int)sizeof(struct xdp_meta));
if (err) {
__sync_add_and_fetch(&pkts_fail, 1);
return XDP_PASS;
}
data = (void *)(long)ctx->data;
data_meta = (void *)(long)ctx->data_meta;
meta = data_meta;
if (meta + 1 > data) {
__sync_add_and_fetch(&pkts_fail, 1);
return XDP_PASS;
}
err = bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp);
if (!err)
meta->xdp_timestamp = bpf_ktime_get_tai_ns();
else
meta->rx_timestamp = 0; /* Used by AF_XDP as not avail signal */
err = bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type);
if (err < 0)
meta->rx_hash_err = err; /* Used by AF_XDP as no hash signal */
__sync_add_and_fetch(&pkts_redir, 1);
return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xdp_hw_metadata.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct callback_ctx {
int dummy;
};
#define VM_EXEC 0x00000004
#define DNAME_INLINE_LEN 32
pid_t target_pid = 0;
char d_iname[DNAME_INLINE_LEN] = {0};
__u32 found_vm_exec = 0;
__u64 addr = 0;
int find_zero_ret = -1;
int find_addr_ret = -1;
static long check_vma(struct task_struct *task, struct vm_area_struct *vma,
struct callback_ctx *data)
{
if (vma->vm_file)
bpf_probe_read_kernel_str(d_iname, DNAME_INLINE_LEN - 1,
vma->vm_file->f_path.dentry->d_iname);
/* check for VM_EXEC */
if (vma->vm_flags & VM_EXEC)
found_vm_exec = 1;
return 0;
}
SEC("raw_tp/sys_enter")
int handle_getpid(void)
{
struct task_struct *task = bpf_get_current_task_btf();
struct callback_ctx data = {};
if (task->pid != target_pid)
return 0;
find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0);
/* this should return -ENOENT */
find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0);
return 0;
}
SEC("perf_event")
int handle_pe(void)
{
struct task_struct *task = bpf_get_current_task_btf();
struct callback_ctx data = {};
if (task->pid != target_pid)
return 0;
find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0);
/* In NMI, this should return -EBUSY, as the previous call is using
* the irq_work.
*/
find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/find_vma.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
struct nf_conn;
struct bpf_ct_opts___local {
s32 netns_id;
s32 error;
u8 l4proto;
u8 reserved[3];
} __attribute__((preserve_access_index));
struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32) __ksym;
struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32) __ksym;
struct nf_conn *bpf_ct_insert_entry(struct nf_conn *) __ksym;
void bpf_ct_release(struct nf_conn *) __ksym;
void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym;
int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
SEC("?tc")
int alloc_release(struct __sk_buff *ctx)
{
struct bpf_ct_opts___local opts = {};
struct bpf_sock_tuple tup = {};
struct nf_conn *ct;
ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
if (!ct)
return 0;
bpf_ct_release(ct);
return 0;
}
SEC("?tc")
int insert_insert(struct __sk_buff *ctx)
{
struct bpf_ct_opts___local opts = {};
struct bpf_sock_tuple tup = {};
struct nf_conn *ct;
ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
if (!ct)
return 0;
ct = bpf_ct_insert_entry(ct);
if (!ct)
return 0;
ct = bpf_ct_insert_entry(ct);
return 0;
}
SEC("?tc")
int lookup_insert(struct __sk_buff *ctx)
{
struct bpf_ct_opts___local opts = {};
struct bpf_sock_tuple tup = {};
struct nf_conn *ct;
ct = bpf_skb_ct_lookup(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
if (!ct)
return 0;
bpf_ct_insert_entry(ct);
return 0;
}
SEC("?tc")
int write_not_allowlisted_field(struct __sk_buff *ctx)
{
struct bpf_ct_opts___local opts = {};
struct bpf_sock_tuple tup = {};
struct nf_conn *ct;
ct = bpf_skb_ct_lookup(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
if (!ct)
return 0;
ct->status = 0xF00;
return 0;
}
SEC("?tc")
int set_timeout_after_insert(struct __sk_buff *ctx)
{
struct bpf_ct_opts___local opts = {};
struct bpf_sock_tuple tup = {};
struct nf_conn *ct;
ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
if (!ct)
return 0;
ct = bpf_ct_insert_entry(ct);
if (!ct)
return 0;
bpf_ct_set_timeout(ct, 0);
return 0;
}
SEC("?tc")
int set_status_after_insert(struct __sk_buff *ctx)
{
struct bpf_ct_opts___local opts = {};
struct bpf_sock_tuple tup = {};
struct nf_conn *ct;
ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
if (!ct)
return 0;
ct = bpf_ct_insert_entry(ct);
if (!ct)
return 0;
bpf_ct_set_status(ct, 0);
return 0;
}
SEC("?tc")
int change_timeout_after_alloc(struct __sk_buff *ctx)
{
struct bpf_ct_opts___local opts = {};
struct bpf_sock_tuple tup = {};
struct nf_conn *ct;
ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
if (!ct)
return 0;
bpf_ct_change_timeout(ct, 0);
return 0;
}
SEC("?tc")
int change_status_after_alloc(struct __sk_buff *ctx)
{
struct bpf_ct_opts___local opts = {};
struct bpf_sock_tuple tup = {};
struct nf_conn *ct;
ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
if (!ct)
return 0;
bpf_ct_change_status(ct, 0);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
struct bpf_xfrm_info___local {
u32 if_id;
int link;
} __attribute__((preserve_access_index));
__u32 req_if_id;
__u32 resp_if_id;
int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx,
const struct bpf_xfrm_info___local *from) __ksym;
int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx,
struct bpf_xfrm_info___local *to) __ksym;
SEC("tc")
int set_xfrm_info(struct __sk_buff *skb)
{
struct bpf_xfrm_info___local info = { .if_id = req_if_id };
return bpf_skb_set_xfrm_info(skb, &info) ? TC_ACT_SHOT : TC_ACT_UNSPEC;
}
SEC("tc")
int get_xfrm_info(struct __sk_buff *skb)
{
struct bpf_xfrm_info___local info = {};
if (bpf_skb_get_xfrm_info(skb, &info) < 0)
return TC_ACT_SHOT;
resp_if_id = info.if_id;
return TC_ACT_UNSPEC;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xfrm_info.c |
#include "core_reloc_types.h"
void f(struct core_reloc_nesting___err_too_deep x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting___err_too_deep.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, u64);
__type(value, u64);
} m_hash SEC(".maps");
SEC("?raw_tp")
__failure __msg("R8 invalid mem access 'map_value_or_null")
int jeq_infer_not_null_ptr_to_btfid(void *ctx)
{
struct bpf_map *map = (struct bpf_map *)&m_hash;
struct bpf_map *inner_map = map->inner_map_meta;
u64 key = 0, ret = 0, *val;
val = bpf_map_lookup_elem(map, &key);
/* Do not mark ptr as non-null if one of them is
* PTR_TO_BTF_ID (R9), reject because of invalid
* access to map value (R8).
*
* Here, we need to inline those insns to access
* R8 directly, since compiler may use other reg
* once it figures out val==inner_map.
*/
asm volatile("r8 = %[val];\n"
"r9 = %[inner_map];\n"
"if r8 != r9 goto +1;\n"
"%[ret] = *(u64 *)(r8 +0);\n"
: [ret] "+r"(ret)
: [inner_map] "r"(inner_map), [val] "r"(val)
: "r8", "r9");
return ret;
}
| linux-master | tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c |
#include "core_reloc_types.h"
void f(struct core_reloc_bitfields___just_big_enough x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <time.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct timer {
struct bpf_timer t;
};
struct lock {
struct bpf_spin_lock l;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct timer);
} timers SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct lock);
} locks SEC(".maps");
static int timer_cb(void *map, int *key, struct timer *timer)
{
return 0;
}
static void timer_work(void)
{
struct timer *timer;
const int key = 0;
timer = bpf_map_lookup_elem(&timers, &key);
if (timer) {
bpf_timer_init(&timer->t, &timers, CLOCK_MONOTONIC);
bpf_timer_set_callback(&timer->t, timer_cb);
bpf_timer_start(&timer->t, 10E9, 0);
bpf_timer_cancel(&timer->t);
}
}
static void spin_lock_work(void)
{
const int key = 0;
struct lock *lock;
lock = bpf_map_lookup_elem(&locks, &key);
if (lock) {
bpf_spin_lock(&lock->l);
bpf_spin_unlock(&lock->l);
}
}
SEC("?raw_tp/sys_enter")
int raw_tp_timer(void *ctx)
{
timer_work();
return 0;
}
SEC("?tp/syscalls/sys_enter_nanosleep")
int tp_timer(void *ctx)
{
timer_work();
return 0;
}
SEC("?kprobe")
int kprobe_timer(void *ctx)
{
timer_work();
return 0;
}
SEC("?perf_event")
int perf_event_timer(void *ctx)
{
timer_work();
return 0;
}
SEC("?raw_tp/sys_enter")
int raw_tp_spin_lock(void *ctx)
{
spin_lock_work();
return 0;
}
SEC("?tp/syscalls/sys_enter_nanosleep")
int tp_spin_lock(void *ctx)
{
spin_lock_work();
return 0;
}
SEC("?kprobe")
int kprobe_spin_lock(void *ctx)
{
spin_lock_work();
return 0;
}
SEC("?perf_event")
int perf_event_spin_lock(void *ctx)
{
spin_lock_work();
return 0;
}
const char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_helper_restricted.c |
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct ipv_counts {
unsigned int v4;
unsigned int v6;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(struct ipv_counts));
__uint(max_entries, 4);
} btf_map SEC(".maps");
__attribute__((noinline))
int test_long_fname_2(void)
{
struct ipv_counts *counts;
int key = 0;
counts = bpf_map_lookup_elem(&btf_map, &key);
if (!counts)
return 0;
counts->v6++;
return 0;
}
__attribute__((noinline))
int test_long_fname_1(void)
{
return test_long_fname_2();
}
SEC("dummy_tracepoint")
int _dummy_tracepoint(void *arg)
{
return test_long_fname_1();
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_btf_nokv.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
long create_errs = 0;
long create_cnts = 0;
long kmalloc_cnts = 0;
__u32 bench_pid = 0;
struct storage {
__u8 data[64];
};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct storage);
} sk_storage_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct storage);
} task_storage_map SEC(".maps");
SEC("raw_tp/kmalloc")
int BPF_PROG(kmalloc, unsigned long call_site, const void *ptr,
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags,
int node)
{
__sync_fetch_and_add(&kmalloc_cnts, 1);
return 0;
}
SEC("tp_btf/sched_process_fork")
int BPF_PROG(sched_process_fork, struct task_struct *parent, struct task_struct *child)
{
struct storage *stg;
if (parent->tgid != bench_pid)
return 0;
stg = bpf_task_storage_get(&task_storage_map, child, NULL,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (stg)
__sync_fetch_and_add(&create_cnts, 1);
else
__sync_fetch_and_add(&create_errs, 1);
return 0;
}
SEC("lsm.s/socket_post_create")
int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
int protocol, int kern)
{
struct storage *stg;
__u32 pid;
pid = bpf_get_current_pid_tgid() >> 32;
if (pid != bench_pid)
return 0;
stg = bpf_sk_storage_get(&sk_storage_map, sock->sk, NULL,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (stg)
__sync_fetch_and_add(&create_cnts, 1);
else
__sync_fetch_and_add(&create_errs, 1);
return 0;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/bench_local_storage_create.c |
/* Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <linux/pkt_cls.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include "test_iptunnel_common.h"
#include <bpf/bpf_endian.h>
static inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
/* copy paste of jhash from kernel sources to make sure llvm
* can compile it into valid sequence of bpf instructions
*/
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
b -= a; b ^= rol32(a, 6); a += c; \
c -= b; c ^= rol32(b, 8); b += a; \
a -= c; a ^= rol32(c, 16); c += b; \
b -= a; b ^= rol32(a, 19); a += c; \
c -= b; c ^= rol32(b, 4); b += a; \
}
#define __jhash_final(a, b, c) \
{ \
c ^= b; c -= rol32(b, 14); \
a ^= c; a -= rol32(c, 11); \
b ^= a; b -= rol32(a, 25); \
c ^= b; c -= rol32(b, 16); \
a ^= c; a -= rol32(c, 4); \
b ^= a; b -= rol32(a, 14); \
c ^= b; c -= rol32(b, 24); \
}
#define JHASH_INITVAL 0xdeadbeef
typedef unsigned int u32;
static inline u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const unsigned char *k = key;
a = b = c = JHASH_INITVAL + length + initval;
while (length > 12) {
a += *(u32 *)(k);
b += *(u32 *)(k + 4);
c += *(u32 *)(k + 8);
__jhash_mix(a, b, c);
length -= 12;
k += 12;
}
switch (length) {
case 12: c += (u32)k[11]<<24;
case 11: c += (u32)k[10]<<16;
case 10: c += (u32)k[9]<<8;
case 9: c += k[8];
case 8: b += (u32)k[7]<<24;
case 7: b += (u32)k[6]<<16;
case 6: b += (u32)k[5]<<8;
case 5: b += k[4];
case 4: a += (u32)k[3]<<24;
case 3: a += (u32)k[2]<<16;
case 2: a += (u32)k[1]<<8;
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
break;
}
return c;
}
static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
b += initval;
c += initval;
__jhash_final(a, b, c);
return c;
}
static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
#define PCKT_FRAGMENTED 65343
#define IPV4_HDR_LEN_NO_OPT 20
#define IPV4_PLUS_ICMP_HDR 28
#define IPV6_PLUS_ICMP_HDR 48
#define RING_SIZE 2
#define MAX_VIPS 12
#define MAX_REALS 5
#define CTL_MAP_SIZE 16
#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE)
#define F_IPV6 (1 << 0)
#define F_HASH_NO_SRC_PORT (1 << 0)
#define F_ICMP (1 << 0)
#define F_SYN_SET (1 << 1)
struct packet_description {
union {
__be32 src;
__be32 srcv6[4];
};
union {
__be32 dst;
__be32 dstv6[4];
};
union {
__u32 ports;
__u16 port16[2];
};
__u8 proto;
__u8 flags;
};
struct ctl_value {
union {
__u64 value;
__u32 ifindex;
__u8 mac[6];
};
};
struct vip_meta {
__u32 flags;
__u32 vip_num;
};
struct real_definition {
union {
__be32 dst;
__be32 dstv6[4];
};
__u8 flags;
};
struct vip_stats {
__u64 bytes;
__u64 pkts;
};
struct eth_hdr {
unsigned char eth_dest[ETH_ALEN];
unsigned char eth_source[ETH_ALEN];
unsigned short eth_proto;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_VIPS);
__type(key, struct vip);
__type(value, struct vip_meta);
} vip_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, CH_RINGS_SIZE);
__type(key, __u32);
__type(value, __u32);
} ch_rings SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, MAX_REALS);
__type(key, __u32);
__type(value, struct real_definition);
} reals SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, MAX_VIPS);
__type(key, __u32);
__type(value, struct vip_stats);
} stats SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, CTL_MAP_SIZE);
__type(key, __u32);
__type(value, struct ctl_value);
} ctl_array SEC(".maps");
static __always_inline __u32 get_packet_hash(struct packet_description *pckt,
bool ipv6)
{
if (ipv6)
return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
pckt->ports, CH_RINGS_SIZE);
else
return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
}
static __always_inline bool get_packet_dst(struct real_definition **real,
struct packet_description *pckt,
struct vip_meta *vip_info,
bool is_ipv6)
{
__u32 hash = get_packet_hash(pckt, is_ipv6) % RING_SIZE;
__u32 key = RING_SIZE * vip_info->vip_num + hash;
__u32 *real_pos;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
return false;
key = *real_pos;
*real = bpf_map_lookup_elem(&reals, &key);
if (!(*real))
return false;
return true;
}
static __always_inline int parse_icmpv6(void *data, void *data_end, __u64 off,
struct packet_description *pckt)
{
struct icmp6hdr *icmp_hdr;
struct ipv6hdr *ip6h;
icmp_hdr = data + off;
if (icmp_hdr + 1 > data_end)
return TC_ACT_SHOT;
if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG)
return TC_ACT_OK;
off += sizeof(struct icmp6hdr);
ip6h = data + off;
if (ip6h + 1 > data_end)
return TC_ACT_SHOT;
pckt->proto = ip6h->nexthdr;
pckt->flags |= F_ICMP;
memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16);
memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16);
return TC_ACT_UNSPEC;
}
static __always_inline int parse_icmp(void *data, void *data_end, __u64 off,
struct packet_description *pckt)
{
struct icmphdr *icmp_hdr;
struct iphdr *iph;
icmp_hdr = data + off;
if (icmp_hdr + 1 > data_end)
return TC_ACT_SHOT;
if (icmp_hdr->type != ICMP_DEST_UNREACH ||
icmp_hdr->code != ICMP_FRAG_NEEDED)
return TC_ACT_OK;
off += sizeof(struct icmphdr);
iph = data + off;
if (iph + 1 > data_end)
return TC_ACT_SHOT;
if (iph->ihl != 5)
return TC_ACT_SHOT;
pckt->proto = iph->protocol;
pckt->flags |= F_ICMP;
pckt->src = iph->daddr;
pckt->dst = iph->saddr;
return TC_ACT_UNSPEC;
}
static __always_inline bool parse_udp(void *data, __u64 off, void *data_end,
struct packet_description *pckt)
{
struct udphdr *udp;
udp = data + off;
if (udp + 1 > data_end)
return false;
if (!(pckt->flags & F_ICMP)) {
pckt->port16[0] = udp->source;
pckt->port16[1] = udp->dest;
} else {
pckt->port16[0] = udp->dest;
pckt->port16[1] = udp->source;
}
return true;
}
static __always_inline bool parse_tcp(void *data, __u64 off, void *data_end,
struct packet_description *pckt)
{
struct tcphdr *tcp;
tcp = data + off;
if (tcp + 1 > data_end)
return false;
if (tcp->syn)
pckt->flags |= F_SYN_SET;
if (!(pckt->flags & F_ICMP)) {
pckt->port16[0] = tcp->source;
pckt->port16[1] = tcp->dest;
} else {
pckt->port16[0] = tcp->dest;
pckt->port16[1] = tcp->source;
}
return true;
}
static __always_inline int process_packet(void *data, __u64 off, void *data_end,
bool is_ipv6, struct __sk_buff *skb)
{
void *pkt_start = (void *)(long)skb->data;
struct packet_description pckt = {};
struct eth_hdr *eth = pkt_start;
struct bpf_tunnel_key tkey = {};
struct vip_stats *data_stats;
struct real_definition *dst;
struct vip_meta *vip_info;
struct ctl_value *cval;
__u32 v4_intf_pos = 1;
__u32 v6_intf_pos = 2;
struct ipv6hdr *ip6h;
struct vip vip = {};
struct iphdr *iph;
int tun_flag = 0;
__u16 pkt_bytes;
__u64 iph_len;
__u32 ifindex;
__u8 protocol;
__u32 vip_num;
int action;
tkey.tunnel_ttl = 64;
if (is_ipv6) {
ip6h = data + off;
if (ip6h + 1 > data_end)
return TC_ACT_SHOT;
iph_len = sizeof(struct ipv6hdr);
protocol = ip6h->nexthdr;
pckt.proto = protocol;
pkt_bytes = bpf_ntohs(ip6h->payload_len);
off += iph_len;
if (protocol == IPPROTO_FRAGMENT) {
return TC_ACT_SHOT;
} else if (protocol == IPPROTO_ICMPV6) {
action = parse_icmpv6(data, data_end, off, &pckt);
if (action >= 0)
return action;
off += IPV6_PLUS_ICMP_HDR;
} else {
memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16);
memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16);
}
} else {
iph = data + off;
if (iph + 1 > data_end)
return TC_ACT_SHOT;
if (iph->ihl != 5)
return TC_ACT_SHOT;
protocol = iph->protocol;
pckt.proto = protocol;
pkt_bytes = bpf_ntohs(iph->tot_len);
off += IPV4_HDR_LEN_NO_OPT;
if (iph->frag_off & PCKT_FRAGMENTED)
return TC_ACT_SHOT;
if (protocol == IPPROTO_ICMP) {
action = parse_icmp(data, data_end, off, &pckt);
if (action >= 0)
return action;
off += IPV4_PLUS_ICMP_HDR;
} else {
pckt.src = iph->saddr;
pckt.dst = iph->daddr;
}
}
protocol = pckt.proto;
if (protocol == IPPROTO_TCP) {
if (!parse_tcp(data, off, data_end, &pckt))
return TC_ACT_SHOT;
} else if (protocol == IPPROTO_UDP) {
if (!parse_udp(data, off, data_end, &pckt))
return TC_ACT_SHOT;
} else {
return TC_ACT_SHOT;
}
if (is_ipv6)
memcpy(vip.daddr.v6, pckt.dstv6, 16);
else
vip.daddr.v4 = pckt.dst;
vip.dport = pckt.port16[1];
vip.protocol = pckt.proto;
vip_info = bpf_map_lookup_elem(&vip_map, &vip);
if (!vip_info) {
vip.dport = 0;
vip_info = bpf_map_lookup_elem(&vip_map, &vip);
if (!vip_info)
return TC_ACT_SHOT;
pckt.port16[1] = 0;
}
if (vip_info->flags & F_HASH_NO_SRC_PORT)
pckt.port16[0] = 0;
if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6))
return TC_ACT_SHOT;
if (dst->flags & F_IPV6) {
cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos);
if (!cval)
return TC_ACT_SHOT;
ifindex = cval->ifindex;
memcpy(tkey.remote_ipv6, dst->dstv6, 16);
tun_flag = BPF_F_TUNINFO_IPV6;
} else {
cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos);
if (!cval)
return TC_ACT_SHOT;
ifindex = cval->ifindex;
tkey.remote_ipv4 = dst->dst;
}
vip_num = vip_info->vip_num;
data_stats = bpf_map_lookup_elem(&stats, &vip_num);
if (!data_stats)
return TC_ACT_SHOT;
data_stats->pkts++;
data_stats->bytes += pkt_bytes;
bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
*(u32 *)eth->eth_dest = tkey.remote_ipv4;
return bpf_redirect(ifindex, 0);
}
SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct eth_hdr *eth = data;
__u32 eth_proto;
__u32 nh_off;
nh_off = sizeof(struct eth_hdr);
if (data + nh_off > data_end)
return TC_ACT_SHOT;
eth_proto = eth->eth_proto;
if (eth_proto == bpf_htons(ETH_P_IP))
return process_packet(data, nh_off, data_end, false, ctx);
else if (eth_proto == bpf_htons(ETH_P_IPV6))
return process_packet(data, nh_off, data_end, true, ctx);
else
return TC_ACT_SHOT;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_l4lb.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} array_1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
__uint(map_flags, BPF_F_PRESERVE_ELEMS);
} array_2 SEC(".maps");
SEC("raw_tp/sched_switch")
int BPF_PROG(read_array_1)
{
struct bpf_perf_event_value val;
return bpf_perf_event_read_value(&array_1, 0, &val, sizeof(val));
}
SEC("raw_tp/task_rename")
int BPF_PROG(read_array_2)
{
struct bpf_perf_event_value val;
return bpf_perf_event_read_value(&array_2, 0, &val, sizeof(val));
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct inner_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 5);
__type(key, int);
__type(value, int);
} inner_map1 SEC(".maps");
struct outer_map {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 3);
__type(key, int);
__array(values, struct inner_map);
} outer_map1 SEC(".maps") = {
.values = {
[2] = &inner_map1,
},
};
SEC("raw_tp/sys_enter")
int handle__sys_enter(void *ctx)
{
int outer_key = 2, inner_key = 3;
int *val;
void *map;
map = bpf_map_lookup_elem(&outer_map1, &outer_key);
if (!map)
return 1;
val = bpf_map_lookup_elem(map, &inner_key);
if (!val)
return 1;
if (*val == 1)
*val = 2;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/inner_array_lookup.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/map_ret_val.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("socket")
__description("invalid map_fd for function call")
__failure __msg("fd 0 is not pointing to valid bpf_map")
__failure_unpriv
__naked void map_fd_for_function_call(void)
{
asm volatile (" \
r2 = 0; \
*(u64*)(r10 - 8) = r2; \
r2 = r10; \
r2 += -8; \
.8byte %[ld_map_fd]; \
.8byte 0; \
call %[bpf_map_delete_elem]; \
exit; \
" :
: __imm(bpf_map_delete_elem),
__imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 0))
: __clobber_all);
}
SEC("socket")
__description("don't check return value before access")
__failure __msg("R0 invalid mem access 'map_value_or_null'")
__failure_unpriv
__naked void check_return_value_before_access(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r1 = 0; \
*(u64*)(r0 + 0) = r1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("access memory with incorrect alignment")
__failure __msg("misaligned value access")
__failure_unpriv
__flag(BPF_F_STRICT_ALIGNMENT)
__naked void access_memory_with_incorrect_alignment_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 0; \
*(u64*)(r0 + 4) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("sometimes access memory with incorrect alignment")
__failure __msg("R0 invalid mem access")
__msg_unpriv("R0 leaks addr")
__flag(BPF_F_STRICT_ALIGNMENT)
__naked void access_memory_with_incorrect_alignment_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 0; \
*(u64*)(r0 + 0) = r1; \
exit; \
l0_%=: r1 = 1; \
*(u64*)(r0 + 0) = r1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_map_ret_val.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
__u64 test1_hits = 0;
__u64 address_low = 0;
__u64 address_high = 0;
int wasted_entries = 0;
long total_entries = 0;
#define ENTRY_CNT 32
struct perf_branch_entry entries[ENTRY_CNT] = {};
static inline bool gbs_in_range(__u64 val)
{
return (val >= address_low) && (val < address_high);
}
SEC("fexit/bpf_testmod_loop_test")
int BPF_PROG(test1, int n, int ret)
{
long i;
total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0);
total_entries /= sizeof(struct perf_branch_entry);
for (i = 0; i < ENTRY_CNT; i++) {
if (i >= total_entries)
break;
if (gbs_in_range(entries[i].from) && gbs_in_range(entries[i].to))
test1_hits++;
else if (!test1_hits)
wasted_entries++;
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/get_branch_snapshot.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
#include "netcnt_common.h"
#define MAX_BPS (3 * 1024 * 1024)
#define REFRESH_TIME_NS 100000000
#define NS_PER_SEC 1000000000
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, union percpu_net_cnt);
} percpu_netcnt SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, union net_cnt);
} netcnt SEC(".maps");
SEC("cgroup/skb")
int bpf_nextcnt(struct __sk_buff *skb)
{
union percpu_net_cnt *percpu_cnt;
union net_cnt *cnt;
__u64 ts, dt;
int ret;
cnt = bpf_get_local_storage(&netcnt, 0);
percpu_cnt = bpf_get_local_storage(&percpu_netcnt, 0);
percpu_cnt->packets++;
percpu_cnt->bytes += skb->len;
if (percpu_cnt->packets > MAX_PERCPU_PACKETS) {
__sync_fetch_and_add(&cnt->packets,
percpu_cnt->packets);
percpu_cnt->packets = 0;
__sync_fetch_and_add(&cnt->bytes,
percpu_cnt->bytes);
percpu_cnt->bytes = 0;
}
ts = bpf_ktime_get_ns();
dt = ts - percpu_cnt->prev_ts;
dt *= MAX_BPS;
dt /= NS_PER_SEC;
if (cnt->bytes + percpu_cnt->bytes - percpu_cnt->prev_bytes < dt)
ret = 1;
else
ret = 0;
if (dt > REFRESH_TIME_NS) {
percpu_cnt->prev_ts = ts;
percpu_cnt->prev_packets = cnt->packets;
percpu_cnt->prev_bytes = cnt->bytes;
}
return !!ret;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/netcnt_prog.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
#define STACK_MAX_LEN 600
#define SUBPROGS
#define NO_UNROLL
#define USE_ITER
#include "pyperf.h"
| linux-master | tools/testing/selftests/bpf/progs/pyperf600_iter.c |
// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <asm/errno.h>
#define TC_ACT_OK 0
#define TC_ACT_SHOT 2
#define NSEC_PER_SEC 1000000000L
#define ETH_ALEN 6
#define ETH_P_IP 0x0800
#define ETH_P_IPV6 0x86DD
#define tcp_flag_word(tp) (((union tcp_word_hdr *)(tp))->words[3])
#define IP_DF 0x4000
#define IP_MF 0x2000
#define IP_OFFSET 0x1fff
#define NEXTHDR_TCP 6
#define TCPOPT_NOP 1
#define TCPOPT_EOL 0
#define TCPOPT_MSS 2
#define TCPOPT_WINDOW 3
#define TCPOPT_SACK_PERM 4
#define TCPOPT_TIMESTAMP 8
#define TCPOLEN_MSS 4
#define TCPOLEN_WINDOW 3
#define TCPOLEN_SACK_PERM 2
#define TCPOLEN_TIMESTAMP 10
#define TCP_TS_HZ 1000
#define TS_OPT_WSCALE_MASK 0xf
#define TS_OPT_SACK (1 << 4)
#define TS_OPT_ECN (1 << 5)
#define TSBITS 6
#define TSMASK (((__u32)1 << TSBITS) - 1)
#define TCP_MAX_WSCALE 14U
#define IPV4_MAXLEN 60
#define TCP_MAXLEN 60
#define DEFAULT_MSS4 1460
#define DEFAULT_MSS6 1440
#define DEFAULT_WSCALE 7
#define DEFAULT_TTL 64
#define MAX_ALLOWED_PORTS 8
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
#define __get_unaligned_t(type, ptr) ({ \
const struct { type x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(ptr); \
__pptr->x; \
})
#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, __u64);
__uint(max_entries, 2);
} values SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, __u16);
__uint(max_entries, MAX_ALLOWED_PORTS);
} allowed_ports SEC(".maps");
/* Some symbols defined in net/netfilter/nf_conntrack_bpf.c are unavailable in
* vmlinux.h if CONFIG_NF_CONNTRACK=m, so they are redefined locally.
*/
struct bpf_ct_opts___local {
s32 netns_id;
s32 error;
u8 l4proto;
u8 dir;
u8 reserved[2];
} __attribute__((preserve_access_index));
#define BPF_F_CURRENT_NETNS (-1)
extern struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx,
struct bpf_sock_tuple *bpf_tuple,
__u32 len_tuple,
struct bpf_ct_opts___local *opts,
__u32 len_opts) __ksym;
extern struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *skb_ctx,
struct bpf_sock_tuple *bpf_tuple,
u32 len_tuple,
struct bpf_ct_opts___local *opts,
u32 len_opts) __ksym;
extern void bpf_ct_release(struct nf_conn *ct) __ksym;
static __always_inline void swap_eth_addr(__u8 *a, __u8 *b)
{
__u8 tmp[ETH_ALEN];
__builtin_memcpy(tmp, a, ETH_ALEN);
__builtin_memcpy(a, b, ETH_ALEN);
__builtin_memcpy(b, tmp, ETH_ALEN);
}
static __always_inline __u16 csum_fold(__u32 csum)
{
csum = (csum & 0xffff) + (csum >> 16);
csum = (csum & 0xffff) + (csum >> 16);
return (__u16)~csum;
}
static __always_inline __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto,
__u32 csum)
{
__u64 s = csum;
s += (__u32)saddr;
s += (__u32)daddr;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
s += proto + len;
#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
s += (proto + len) << 8;
#else
#error Unknown endian
#endif
s = (s & 0xffffffff) + (s >> 32);
s = (s & 0xffffffff) + (s >> 32);
return csum_fold((__u32)s);
}
static __always_inline __u16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, __u8 proto, __u32 csum)
{
__u64 sum = csum;
int i;
#pragma unroll
for (i = 0; i < 4; i++)
sum += (__u32)saddr->in6_u.u6_addr32[i];
#pragma unroll
for (i = 0; i < 4; i++)
sum += (__u32)daddr->in6_u.u6_addr32[i];
/* Don't combine additions to avoid 32-bit overflow. */
sum += bpf_htonl(len);
sum += bpf_htonl(proto);
sum = (sum & 0xffffffff) + (sum >> 32);
sum = (sum & 0xffffffff) + (sum >> 32);
return csum_fold((__u32)sum);
}
static __always_inline __u64 tcp_clock_ns(void)
{
return bpf_ktime_get_ns();
}
static __always_inline __u32 tcp_ns_to_ts(__u64 ns)
{
return ns / (NSEC_PER_SEC / TCP_TS_HZ);
}
static __always_inline __u32 tcp_time_stamp_raw(void)
{
return tcp_ns_to_ts(tcp_clock_ns());
}
struct tcpopt_context {
__u8 *ptr;
__u8 *end;
void *data_end;
__be32 *tsecr;
__u8 wscale;
bool option_timestamp;
bool option_sack;
};
static int tscookie_tcpopt_parse(struct tcpopt_context *ctx)
{
__u8 opcode, opsize;
if (ctx->ptr >= ctx->end)
return 1;
if (ctx->ptr >= ctx->data_end)
return 1;
opcode = ctx->ptr[0];
if (opcode == TCPOPT_EOL)
return 1;
if (opcode == TCPOPT_NOP) {
++ctx->ptr;
return 0;
}
if (ctx->ptr + 1 >= ctx->end)
return 1;
if (ctx->ptr + 1 >= ctx->data_end)
return 1;
opsize = ctx->ptr[1];
if (opsize < 2)
return 1;
if (ctx->ptr + opsize > ctx->end)
return 1;
switch (opcode) {
case TCPOPT_WINDOW:
if (opsize == TCPOLEN_WINDOW && ctx->ptr + TCPOLEN_WINDOW <= ctx->data_end)
ctx->wscale = ctx->ptr[2] < TCP_MAX_WSCALE ? ctx->ptr[2] : TCP_MAX_WSCALE;
break;
case TCPOPT_TIMESTAMP:
if (opsize == TCPOLEN_TIMESTAMP && ctx->ptr + TCPOLEN_TIMESTAMP <= ctx->data_end) {
ctx->option_timestamp = true;
/* Client's tsval becomes our tsecr. */
*ctx->tsecr = get_unaligned((__be32 *)(ctx->ptr + 2));
}
break;
case TCPOPT_SACK_PERM:
if (opsize == TCPOLEN_SACK_PERM)
ctx->option_sack = true;
break;
}
ctx->ptr += opsize;
return 0;
}
static int tscookie_tcpopt_parse_batch(__u32 index, void *context)
{
int i;
for (i = 0; i < 7; i++)
if (tscookie_tcpopt_parse(context))
return 1;
return 0;
}
static __always_inline bool tscookie_init(struct tcphdr *tcp_header,
__u16 tcp_len, __be32 *tsval,
__be32 *tsecr, void *data_end)
{
struct tcpopt_context loop_ctx = {
.ptr = (__u8 *)(tcp_header + 1),
.end = (__u8 *)tcp_header + tcp_len,
.data_end = data_end,
.tsecr = tsecr,
.wscale = TS_OPT_WSCALE_MASK,
.option_timestamp = false,
.option_sack = false,
};
u32 cookie;
bpf_loop(6, tscookie_tcpopt_parse_batch, &loop_ctx, 0);
if (!loop_ctx.option_timestamp)
return false;
cookie = tcp_time_stamp_raw() & ~TSMASK;
cookie |= loop_ctx.wscale & TS_OPT_WSCALE_MASK;
if (loop_ctx.option_sack)
cookie |= TS_OPT_SACK;
if (tcp_header->ece && tcp_header->cwr)
cookie |= TS_OPT_ECN;
*tsval = bpf_htonl(cookie);
return true;
}
static __always_inline void values_get_tcpipopts(__u16 *mss, __u8 *wscale,
__u8 *ttl, bool ipv6)
{
__u32 key = 0;
__u64 *value;
value = bpf_map_lookup_elem(&values, &key);
if (value && *value != 0) {
if (ipv6)
*mss = (*value >> 32) & 0xffff;
else
*mss = *value & 0xffff;
*wscale = (*value >> 16) & 0xf;
*ttl = (*value >> 24) & 0xff;
return;
}
*mss = ipv6 ? DEFAULT_MSS6 : DEFAULT_MSS4;
*wscale = DEFAULT_WSCALE;
*ttl = DEFAULT_TTL;
}
static __always_inline void values_inc_synacks(void)
{
__u32 key = 1;
__u64 *value;
value = bpf_map_lookup_elem(&values, &key);
if (value)
__sync_fetch_and_add(value, 1);
}
static __always_inline bool check_port_allowed(__u16 port)
{
__u32 i;
for (i = 0; i < MAX_ALLOWED_PORTS; i++) {
__u32 key = i;
__u16 *value;
value = bpf_map_lookup_elem(&allowed_ports, &key);
if (!value)
break;
/* 0 is a terminator value. Check it first to avoid matching on
* a forbidden port == 0 and returning true.
*/
if (*value == 0)
break;
if (*value == port)
return true;
}
return false;
}
struct header_pointers {
struct ethhdr *eth;
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
struct tcphdr *tcp;
__u16 tcp_len;
};
static __always_inline int tcp_dissect(void *data, void *data_end,
struct header_pointers *hdr)
{
hdr->eth = data;
if (hdr->eth + 1 > data_end)
return XDP_DROP;
switch (bpf_ntohs(hdr->eth->h_proto)) {
case ETH_P_IP:
hdr->ipv6 = NULL;
hdr->ipv4 = (void *)hdr->eth + sizeof(*hdr->eth);
if (hdr->ipv4 + 1 > data_end)
return XDP_DROP;
if (hdr->ipv4->ihl * 4 < sizeof(*hdr->ipv4))
return XDP_DROP;
if (hdr->ipv4->version != 4)
return XDP_DROP;
if (hdr->ipv4->protocol != IPPROTO_TCP)
return XDP_PASS;
hdr->tcp = (void *)hdr->ipv4 + hdr->ipv4->ihl * 4;
break;
case ETH_P_IPV6:
hdr->ipv4 = NULL;
hdr->ipv6 = (void *)hdr->eth + sizeof(*hdr->eth);
if (hdr->ipv6 + 1 > data_end)
return XDP_DROP;
if (hdr->ipv6->version != 6)
return XDP_DROP;
/* XXX: Extension headers are not supported and could circumvent
* XDP SYN flood protection.
*/
if (hdr->ipv6->nexthdr != NEXTHDR_TCP)
return XDP_PASS;
hdr->tcp = (void *)hdr->ipv6 + sizeof(*hdr->ipv6);
break;
default:
/* XXX: VLANs will circumvent XDP SYN flood protection. */
return XDP_PASS;
}
if (hdr->tcp + 1 > data_end)
return XDP_DROP;
hdr->tcp_len = hdr->tcp->doff * 4;
if (hdr->tcp_len < sizeof(*hdr->tcp))
return XDP_DROP;
return XDP_TX;
}
static __always_inline int tcp_lookup(void *ctx, struct header_pointers *hdr, bool xdp)
{
struct bpf_ct_opts___local ct_lookup_opts = {
.netns_id = BPF_F_CURRENT_NETNS,
.l4proto = IPPROTO_TCP,
};
struct bpf_sock_tuple tup = {};
struct nf_conn *ct;
__u32 tup_size;
if (hdr->ipv4) {
/* TCP doesn't normally use fragments, and XDP can't reassemble
* them.
*/
if ((hdr->ipv4->frag_off & bpf_htons(IP_DF | IP_MF | IP_OFFSET)) != bpf_htons(IP_DF))
return XDP_DROP;
tup.ipv4.saddr = hdr->ipv4->saddr;
tup.ipv4.daddr = hdr->ipv4->daddr;
tup.ipv4.sport = hdr->tcp->source;
tup.ipv4.dport = hdr->tcp->dest;
tup_size = sizeof(tup.ipv4);
} else if (hdr->ipv6) {
__builtin_memcpy(tup.ipv6.saddr, &hdr->ipv6->saddr, sizeof(tup.ipv6.saddr));
__builtin_memcpy(tup.ipv6.daddr, &hdr->ipv6->daddr, sizeof(tup.ipv6.daddr));
tup.ipv6.sport = hdr->tcp->source;
tup.ipv6.dport = hdr->tcp->dest;
tup_size = sizeof(tup.ipv6);
} else {
/* The verifier can't track that either ipv4 or ipv6 is not
* NULL.
*/
return XDP_ABORTED;
}
if (xdp)
ct = bpf_xdp_ct_lookup(ctx, &tup, tup_size, &ct_lookup_opts, sizeof(ct_lookup_opts));
else
ct = bpf_skb_ct_lookup(ctx, &tup, tup_size, &ct_lookup_opts, sizeof(ct_lookup_opts));
if (ct) {
unsigned long status = ct->status;
bpf_ct_release(ct);
if (status & IPS_CONFIRMED_BIT)
return XDP_PASS;
} else if (ct_lookup_opts.error != -ENOENT) {
return XDP_ABORTED;
}
/* error == -ENOENT || !(status & IPS_CONFIRMED_BIT) */
return XDP_TX;
}
static __always_inline __u8 tcp_mkoptions(__be32 *buf, __be32 *tsopt, __u16 mss,
__u8 wscale)
{
__be32 *start = buf;
*buf++ = bpf_htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
if (!tsopt)
return buf - start;
if (tsopt[0] & bpf_htonl(1 << 4))
*buf++ = bpf_htonl((TCPOPT_SACK_PERM << 24) |
(TCPOLEN_SACK_PERM << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP);
else
*buf++ = bpf_htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP);
*buf++ = tsopt[0];
*buf++ = tsopt[1];
if ((tsopt[0] & bpf_htonl(0xf)) != bpf_htonl(0xf))
*buf++ = bpf_htonl((TCPOPT_NOP << 24) |
(TCPOPT_WINDOW << 16) |
(TCPOLEN_WINDOW << 8) |
wscale);
return buf - start;
}
static __always_inline void tcp_gen_synack(struct tcphdr *tcp_header,
__u32 cookie, __be32 *tsopt,
__u16 mss, __u8 wscale)
{
void *tcp_options;
tcp_flag_word(tcp_header) = TCP_FLAG_SYN | TCP_FLAG_ACK;
if (tsopt && (tsopt[0] & bpf_htonl(1 << 5)))
tcp_flag_word(tcp_header) |= TCP_FLAG_ECE;
tcp_header->doff = 5; /* doff is part of tcp_flag_word. */
swap(tcp_header->source, tcp_header->dest);
tcp_header->ack_seq = bpf_htonl(bpf_ntohl(tcp_header->seq) + 1);
tcp_header->seq = bpf_htonl(cookie);
tcp_header->window = 0;
tcp_header->urg_ptr = 0;
tcp_header->check = 0; /* Calculate checksum later. */
tcp_options = (void *)(tcp_header + 1);
tcp_header->doff += tcp_mkoptions(tcp_options, tsopt, mss, wscale);
}
static __always_inline void tcpv4_gen_synack(struct header_pointers *hdr,
__u32 cookie, __be32 *tsopt)
{
__u8 wscale;
__u16 mss;
__u8 ttl;
values_get_tcpipopts(&mss, &wscale, &ttl, false);
swap_eth_addr(hdr->eth->h_source, hdr->eth->h_dest);
swap(hdr->ipv4->saddr, hdr->ipv4->daddr);
hdr->ipv4->check = 0; /* Calculate checksum later. */
hdr->ipv4->tos = 0;
hdr->ipv4->id = 0;
hdr->ipv4->ttl = ttl;
tcp_gen_synack(hdr->tcp, cookie, tsopt, mss, wscale);
hdr->tcp_len = hdr->tcp->doff * 4;
hdr->ipv4->tot_len = bpf_htons(sizeof(*hdr->ipv4) + hdr->tcp_len);
}
static __always_inline void tcpv6_gen_synack(struct header_pointers *hdr,
__u32 cookie, __be32 *tsopt)
{
__u8 wscale;
__u16 mss;
__u8 ttl;
values_get_tcpipopts(&mss, &wscale, &ttl, true);
swap_eth_addr(hdr->eth->h_source, hdr->eth->h_dest);
swap(hdr->ipv6->saddr, hdr->ipv6->daddr);
*(__be32 *)hdr->ipv6 = bpf_htonl(0x60000000);
hdr->ipv6->hop_limit = ttl;
tcp_gen_synack(hdr->tcp, cookie, tsopt, mss, wscale);
hdr->tcp_len = hdr->tcp->doff * 4;
hdr->ipv6->payload_len = bpf_htons(hdr->tcp_len);
}
static __always_inline int syncookie_handle_syn(struct header_pointers *hdr,
void *ctx,
void *data, void *data_end,
bool xdp)
{
__u32 old_pkt_size, new_pkt_size;
/* Unlike clang 10, clang 11 and 12 generate code that doesn't pass the
* BPF verifier if tsopt is not volatile. Volatile forces it to store
* the pointer value and use it directly, otherwise tcp_mkoptions is
* (mis)compiled like this:
* if (!tsopt)
* return buf - start;
* reg = stored_return_value_of_tscookie_init;
* if (reg)
* tsopt = tsopt_buf;
* else
* tsopt = NULL;
* ...
* *buf++ = tsopt[1];
* It creates a dead branch where tsopt is assigned NULL, but the
* verifier can't prove it's dead and blocks the program.
*/
__be32 * volatile tsopt = NULL;
__be32 tsopt_buf[2] = {};
__u16 ip_len;
__u32 cookie;
__s64 value;
/* Checksum is not yet verified, but both checksum failure and TCP
* header checks return XDP_DROP, so the order doesn't matter.
*/
if (hdr->tcp->fin || hdr->tcp->rst)
return XDP_DROP;
/* Issue SYN cookies on allowed ports, drop SYN packets on blocked
* ports.
*/
if (!check_port_allowed(bpf_ntohs(hdr->tcp->dest)))
return XDP_DROP;
if (hdr->ipv4) {
/* Check the IPv4 and TCP checksums before creating a SYNACK. */
value = bpf_csum_diff(0, 0, (void *)hdr->ipv4, hdr->ipv4->ihl * 4, 0);
if (value < 0)
return XDP_ABORTED;
if (csum_fold(value) != 0)
return XDP_DROP; /* Bad IPv4 checksum. */
value = bpf_csum_diff(0, 0, (void *)hdr->tcp, hdr->tcp_len, 0);
if (value < 0)
return XDP_ABORTED;
if (csum_tcpudp_magic(hdr->ipv4->saddr, hdr->ipv4->daddr,
hdr->tcp_len, IPPROTO_TCP, value) != 0)
return XDP_DROP; /* Bad TCP checksum. */
ip_len = sizeof(*hdr->ipv4);
value = bpf_tcp_raw_gen_syncookie_ipv4(hdr->ipv4, hdr->tcp,
hdr->tcp_len);
} else if (hdr->ipv6) {
/* Check the TCP checksum before creating a SYNACK. */
value = bpf_csum_diff(0, 0, (void *)hdr->tcp, hdr->tcp_len, 0);
if (value < 0)
return XDP_ABORTED;
if (csum_ipv6_magic(&hdr->ipv6->saddr, &hdr->ipv6->daddr,
hdr->tcp_len, IPPROTO_TCP, value) != 0)
return XDP_DROP; /* Bad TCP checksum. */
ip_len = sizeof(*hdr->ipv6);
value = bpf_tcp_raw_gen_syncookie_ipv6(hdr->ipv6, hdr->tcp,
hdr->tcp_len);
} else {
return XDP_ABORTED;
}
if (value < 0)
return XDP_ABORTED;
cookie = (__u32)value;
if (tscookie_init((void *)hdr->tcp, hdr->tcp_len,
&tsopt_buf[0], &tsopt_buf[1], data_end))
tsopt = tsopt_buf;
/* Check that there is enough space for a SYNACK. It also covers
* the check that the destination of the __builtin_memmove below
* doesn't overflow.
*/
if (data + sizeof(*hdr->eth) + ip_len + TCP_MAXLEN > data_end)
return XDP_ABORTED;
if (hdr->ipv4) {
if (hdr->ipv4->ihl * 4 > sizeof(*hdr->ipv4)) {
struct tcphdr *new_tcp_header;
new_tcp_header = data + sizeof(*hdr->eth) + sizeof(*hdr->ipv4);
__builtin_memmove(new_tcp_header, hdr->tcp, sizeof(*hdr->tcp));
hdr->tcp = new_tcp_header;
hdr->ipv4->ihl = sizeof(*hdr->ipv4) / 4;
}
tcpv4_gen_synack(hdr, cookie, tsopt);
} else if (hdr->ipv6) {
tcpv6_gen_synack(hdr, cookie, tsopt);
} else {
return XDP_ABORTED;
}
/* Recalculate checksums. */
hdr->tcp->check = 0;
value = bpf_csum_diff(0, 0, (void *)hdr->tcp, hdr->tcp_len, 0);
if (value < 0)
return XDP_ABORTED;
if (hdr->ipv4) {
hdr->tcp->check = csum_tcpudp_magic(hdr->ipv4->saddr,
hdr->ipv4->daddr,
hdr->tcp_len,
IPPROTO_TCP,
value);
hdr->ipv4->check = 0;
value = bpf_csum_diff(0, 0, (void *)hdr->ipv4, sizeof(*hdr->ipv4), 0);
if (value < 0)
return XDP_ABORTED;
hdr->ipv4->check = csum_fold(value);
} else if (hdr->ipv6) {
hdr->tcp->check = csum_ipv6_magic(&hdr->ipv6->saddr,
&hdr->ipv6->daddr,
hdr->tcp_len,
IPPROTO_TCP,
value);
} else {
return XDP_ABORTED;
}
/* Set the new packet size. */
old_pkt_size = data_end - data;
new_pkt_size = sizeof(*hdr->eth) + ip_len + hdr->tcp->doff * 4;
if (xdp) {
if (bpf_xdp_adjust_tail(ctx, new_pkt_size - old_pkt_size))
return XDP_ABORTED;
} else {
if (bpf_skb_change_tail(ctx, new_pkt_size, 0))
return XDP_ABORTED;
}
values_inc_synacks();
return XDP_TX;
}
static __always_inline int syncookie_handle_ack(struct header_pointers *hdr)
{
int err;
if (hdr->tcp->rst)
return XDP_DROP;
if (hdr->ipv4)
err = bpf_tcp_raw_check_syncookie_ipv4(hdr->ipv4, hdr->tcp);
else if (hdr->ipv6)
err = bpf_tcp_raw_check_syncookie_ipv6(hdr->ipv6, hdr->tcp);
else
return XDP_ABORTED;
if (err)
return XDP_DROP;
return XDP_PASS;
}
static __always_inline int syncookie_part1(void *ctx, void *data, void *data_end,
struct header_pointers *hdr, bool xdp)
{
int ret;
ret = tcp_dissect(data, data_end, hdr);
if (ret != XDP_TX)
return ret;
ret = tcp_lookup(ctx, hdr, xdp);
if (ret != XDP_TX)
return ret;
/* Packet is TCP and doesn't belong to an established connection. */
if ((hdr->tcp->syn ^ hdr->tcp->ack) != 1)
return XDP_DROP;
/* Grow the TCP header to TCP_MAXLEN to be able to pass any hdr->tcp_len
* to bpf_tcp_raw_gen_syncookie_ipv{4,6} and pass the verifier.
*/
if (xdp) {
if (bpf_xdp_adjust_tail(ctx, TCP_MAXLEN - hdr->tcp_len))
return XDP_ABORTED;
} else {
/* Without volatile the verifier throws this error:
* R9 32-bit pointer arithmetic prohibited
*/
volatile u64 old_len = data_end - data;
if (bpf_skb_change_tail(ctx, old_len + TCP_MAXLEN - hdr->tcp_len, 0))
return XDP_ABORTED;
}
return XDP_TX;
}
static __always_inline int syncookie_part2(void *ctx, void *data, void *data_end,
struct header_pointers *hdr, bool xdp)
{
if (hdr->ipv4) {
hdr->eth = data;
hdr->ipv4 = (void *)hdr->eth + sizeof(*hdr->eth);
/* IPV4_MAXLEN is needed when calculating checksum.
* At least sizeof(struct iphdr) is needed here to access ihl.
*/
if ((void *)hdr->ipv4 + IPV4_MAXLEN > data_end)
return XDP_ABORTED;
hdr->tcp = (void *)hdr->ipv4 + hdr->ipv4->ihl * 4;
} else if (hdr->ipv6) {
hdr->eth = data;
hdr->ipv6 = (void *)hdr->eth + sizeof(*hdr->eth);
hdr->tcp = (void *)hdr->ipv6 + sizeof(*hdr->ipv6);
} else {
return XDP_ABORTED;
}
if ((void *)hdr->tcp + TCP_MAXLEN > data_end)
return XDP_ABORTED;
/* We run out of registers, tcp_len gets spilled to the stack, and the
* verifier forgets its min and max values checked above in tcp_dissect.
*/
hdr->tcp_len = hdr->tcp->doff * 4;
if (hdr->tcp_len < sizeof(*hdr->tcp))
return XDP_ABORTED;
return hdr->tcp->syn ? syncookie_handle_syn(hdr, ctx, data, data_end, xdp) :
syncookie_handle_ack(hdr);
}
SEC("xdp")
int syncookie_xdp(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct header_pointers hdr;
int ret;
ret = syncookie_part1(ctx, data, data_end, &hdr, true);
if (ret != XDP_TX)
return ret;
data_end = (void *)(long)ctx->data_end;
data = (void *)(long)ctx->data;
return syncookie_part2(ctx, data, data_end, &hdr, true);
}
SEC("tc")
int syncookie_tc(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
struct header_pointers hdr;
int ret;
ret = syncookie_part1(skb, data, data_end, &hdr, false);
if (ret != XDP_TX)
return ret == XDP_PASS ? TC_ACT_OK : TC_ACT_SHOT;
data_end = (void *)(long)skb->data_end;
data = (void *)(long)skb->data;
ret = syncookie_part2(skb, data, data_end, &hdr, false);
switch (ret) {
case XDP_PASS:
return TC_ACT_OK;
case XDP_TX:
return bpf_redirect(skb->ifindex, 0);
default:
return TC_ACT_SHOT;
}
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/var_off.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("lwt_in")
__description("variable-offset ctx access")
__failure __msg("variable ctx access var_off=(0x0; 0x4)")
__naked void variable_offset_ctx_access(void)
{
asm volatile (" \
/* Get an unknown value */ \
r2 = *(u32*)(r1 + 0); \
/* Make it small and 4-byte aligned */ \
r2 &= 4; \
/* add it to skb. We now have either &skb->len or\
* &skb->pkt_type, but we don't know which \
*/ \
r1 += r2; \
/* dereference it */ \
r0 = *(u32*)(r1 + 0); \
exit; \
" ::: __clobber_all);
}
SEC("cgroup/skb")
__description("variable-offset stack read, priv vs unpriv")
__success __failure_unpriv
__msg_unpriv("R2 variable stack access prohibited for !root")
__retval(0)
__naked void stack_read_priv_vs_unpriv(void)
{
asm volatile (" \
/* Fill the top 8 bytes of the stack */ \
r0 = 0; \
*(u64*)(r10 - 8) = r0; \
/* Get an unknown value */ \
r2 = *(u32*)(r1 + 0); \
/* Make it small and 4-byte aligned */ \
r2 &= 4; \
r2 -= 8; \
/* add it to fp. We now have either fp-4 or fp-8, but\
* we don't know which \
*/ \
r2 += r10; \
/* dereference it for a stack read */ \
r0 = *(u32*)(r2 + 0); \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("lwt_in")
__description("variable-offset stack read, uninitialized")
__failure __msg("invalid variable-offset read from stack R2")
__naked void variable_offset_stack_read_uninitialized(void)
{
asm volatile (" \
/* Get an unknown value */ \
r2 = *(u32*)(r1 + 0); \
/* Make it small and 4-byte aligned */ \
r2 &= 4; \
r2 -= 8; \
/* add it to fp. We now have either fp-4 or fp-8, but\
* we don't know which \
*/ \
r2 += r10; \
/* dereference it for a stack read */ \
r0 = *(u32*)(r2 + 0); \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("variable-offset stack write, priv vs unpriv")
__success __failure_unpriv
/* Variable stack access is rejected for unprivileged.
*/
__msg_unpriv("R2 variable stack access prohibited for !root")
__retval(0)
__naked void stack_write_priv_vs_unpriv(void)
{
asm volatile (" \
/* Get an unknown value */ \
r2 = *(u32*)(r1 + 0); \
/* Make it small and 8-byte aligned */ \
r2 &= 8; \
r2 -= 16; \
/* Add it to fp. We now have either fp-8 or fp-16, but\
* we don't know which \
*/ \
r2 += r10; \
/* Dereference it for a stack write */ \
r0 = 0; \
*(u64*)(r2 + 0) = r0; \
/* Now read from the address we just wrote. This shows\
* that, after a variable-offset write, a priviledged\
* program can read the slots that were in the range of\
* that write (even if the verifier doesn't actually know\
* if the slot being read was really written to or not.\
*/ \
r3 = *(u64*)(r2 + 0); \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("variable-offset stack write clobbers spilled regs")
__failure
/* In the priviledged case, dereferencing a spilled-and-then-filled
* register is rejected because the previous variable offset stack
* write might have overwritten the spilled pointer (i.e. we lose track
* of the spilled register when we analyze the write).
*/
__msg("R2 invalid mem access 'scalar'")
__failure_unpriv
/* The unprivileged case is not too interesting; variable
* stack access is rejected.
*/
__msg_unpriv("R2 variable stack access prohibited for !root")
__naked void stack_write_clobbers_spilled_regs(void)
{
asm volatile (" \
/* Dummy instruction; needed because we need to patch the next one\
* and we can't patch the first instruction. \
*/ \
r6 = 0; \
/* Make R0 a map ptr */ \
r0 = %[map_hash_8b] ll; \
/* Get an unknown value */ \
r2 = *(u32*)(r1 + 0); \
/* Make it small and 8-byte aligned */ \
r2 &= 8; \
r2 -= 16; \
/* Add it to fp. We now have either fp-8 or fp-16, but\
* we don't know which. \
*/ \
r2 += r10; \
/* Spill R0(map ptr) into stack */ \
*(u64*)(r10 - 8) = r0; \
/* Dereference the unknown value for a stack write */\
r0 = 0; \
*(u64*)(r2 + 0) = r0; \
/* Fill the register back into R2 */ \
r2 = *(u64*)(r10 - 8); \
/* Try to dereference R2 for a memory load */ \
r0 = *(u64*)(r2 + 8); \
exit; \
" :
: __imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("sockops")
__description("indirect variable-offset stack access, unbounded")
__failure __msg("invalid unbounded variable-offset indirect access to stack R4")
__naked void variable_offset_stack_access_unbounded(void)
{
asm volatile (" \
r2 = 6; \
r3 = 28; \
/* Fill the top 16 bytes of the stack. */ \
r4 = 0; \
*(u64*)(r10 - 16) = r4; \
r4 = 0; \
*(u64*)(r10 - 8) = r4; \
/* Get an unknown value. */ \
r4 = *(u64*)(r1 + %[bpf_sock_ops_bytes_received]);\
/* Check the lower bound but don't check the upper one. */\
if r4 s< 0 goto l0_%=; \
/* Point the lower bound to initialized stack. Offset is now in range\
* from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded.\
*/ \
r4 -= 16; \
r4 += r10; \
r5 = 8; \
/* Dereference it indirectly. */ \
call %[bpf_getsockopt]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_getsockopt),
__imm_const(bpf_sock_ops_bytes_received, offsetof(struct bpf_sock_ops, bytes_received))
: __clobber_all);
}
SEC("lwt_in")
__description("indirect variable-offset stack access, max out of bound")
__failure __msg("invalid variable-offset indirect access to stack R2")
__naked void access_max_out_of_bound(void)
{
asm volatile (" \
/* Fill the top 8 bytes of the stack */ \
r2 = 0; \
*(u64*)(r10 - 8) = r2; \
/* Get an unknown value */ \
r2 = *(u32*)(r1 + 0); \
/* Make it small and 4-byte aligned */ \
r2 &= 4; \
r2 -= 8; \
/* add it to fp. We now have either fp-4 or fp-8, but\
* we don't know which \
*/ \
r2 += r10; \
/* dereference it indirectly */ \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("lwt_in")
__description("indirect variable-offset stack access, min out of bound")
__failure __msg("invalid variable-offset indirect access to stack R2")
__naked void access_min_out_of_bound(void)
{
asm volatile (" \
/* Fill the top 8 bytes of the stack */ \
r2 = 0; \
*(u64*)(r10 - 8) = r2; \
/* Get an unknown value */ \
r2 = *(u32*)(r1 + 0); \
/* Make it small and 4-byte aligned */ \
r2 &= 4; \
r2 -= 516; \
/* add it to fp. We now have either fp-516 or fp-512, but\
* we don't know which \
*/ \
r2 += r10; \
/* dereference it indirectly */ \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("lwt_in")
__description("indirect variable-offset stack access, min_off < min_initialized")
__failure __msg("invalid indirect read from stack R2 var_off")
__naked void access_min_off_min_initialized(void)
{
asm volatile (" \
/* Fill only the top 8 bytes of the stack. */ \
r2 = 0; \
*(u64*)(r10 - 8) = r2; \
/* Get an unknown value */ \
r2 = *(u32*)(r1 + 0); \
/* Make it small and 4-byte aligned. */ \
r2 &= 4; \
r2 -= 16; \
/* Add it to fp. We now have either fp-12 or fp-16, but we don't know\
* which. fp-16 size 8 is partially uninitialized stack.\
*/ \
r2 += r10; \
/* Dereference it indirectly. */ \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("cgroup/skb")
__description("indirect variable-offset stack access, priv vs unpriv")
__success __failure_unpriv
__msg_unpriv("R2 variable stack access prohibited for !root")
__retval(0)
__naked void stack_access_priv_vs_unpriv(void)
{
asm volatile (" \
/* Fill the top 16 bytes of the stack. */ \
r2 = 0; \
*(u64*)(r10 - 16) = r2; \
r2 = 0; \
*(u64*)(r10 - 8) = r2; \
/* Get an unknown value. */ \
r2 = *(u32*)(r1 + 0); \
/* Make it small and 4-byte aligned. */ \
r2 &= 4; \
r2 -= 16; \
/* Add it to fp. We now have either fp-12 or fp-16, we don't know\
* which, but either way it points to initialized stack.\
*/ \
r2 += r10; \
/* Dereference it indirectly. */ \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("lwt_in")
__description("indirect variable-offset stack access, ok")
__success __retval(0)
__naked void variable_offset_stack_access_ok(void)
{
asm volatile (" \
/* Fill the top 16 bytes of the stack. */ \
r2 = 0; \
*(u64*)(r10 - 16) = r2; \
r2 = 0; \
*(u64*)(r10 - 8) = r2; \
/* Get an unknown value. */ \
r2 = *(u32*)(r1 + 0); \
/* Make it small and 4-byte aligned. */ \
r2 &= 4; \
r2 -= 16; \
/* Add it to fp. We now have either fp-12 or fp-16, we don't know\
* which, but either way it points to initialized stack.\
*/ \
r2 += r10; \
/* Dereference it indirectly. */ \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_var_off.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
uint32_t tid = 0;
int num_unknown_tid = 0;
int num_known_tid = 0;
SEC("iter/task")
int dump_task(struct bpf_iter__task *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
static char info[] = " === END ===";
if (task == (void *)0) {
BPF_SEQ_PRINTF(seq, "%s\n", info);
return 0;
}
if (task->pid != tid)
num_unknown_tid++;
else
num_known_tid++;
if (ctx->meta->seq_num == 0)
BPF_SEQ_PRINTF(seq, " tgid gid\n");
BPF_SEQ_PRINTF(seq, "%8d %8d\n", task->tgid, task->pid);
return 0;
}
int num_expected_failure_copy_from_user_task = 0;
int num_success_copy_from_user_task = 0;
SEC("iter.s/task")
int dump_task_sleepable(struct bpf_iter__task *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
static const char info[] = " === END ===";
struct pt_regs *regs;
void *ptr;
uint32_t user_data = 0;
int ret;
if (task == (void *)0) {
BPF_SEQ_PRINTF(seq, "%s\n", info);
return 0;
}
/* Read an invalid pointer and ensure we get an error */
ptr = NULL;
ret = bpf_copy_from_user_task(&user_data, sizeof(uint32_t), ptr, task, 0);
if (ret) {
++num_expected_failure_copy_from_user_task;
} else {
BPF_SEQ_PRINTF(seq, "%s\n", info);
return 0;
}
/* Try to read the contents of the task's instruction pointer from the
* remote task's address space.
*/
regs = (struct pt_regs *)bpf_task_pt_regs(task);
if (regs == (void *)0) {
BPF_SEQ_PRINTF(seq, "%s\n", info);
return 0;
}
ptr = (void *)PT_REGS_IP(regs);
ret = bpf_copy_from_user_task(&user_data, sizeof(uint32_t), ptr, task, 0);
if (ret) {
BPF_SEQ_PRINTF(seq, "%s\n", info);
return 0;
}
++num_success_copy_from_user_task;
if (ctx->meta->seq_num == 0)
BPF_SEQ_PRINTF(seq, " tgid gid data\n");
BPF_SEQ_PRINTF(seq, "%8d %8d %8d\n", task->tgid, task->pid, user_data);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_task.c |
// SPDX-License-Identifier: GPL-2.0
#define KBUILD_MODNAME "xdp_dummy"
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
SEC("xdp")
int xdp_dummy_prog(struct xdp_md *ctx)
{
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xdp_dummy.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
/* Copied from mm.h */
#define VM_READ 0x00000001
#define VM_WRITE 0x00000002
#define VM_EXEC 0x00000004
#define VM_MAYSHARE 0x00000080
/* Copied from kdev_t.h */
#define MINORBITS 20
#define MINORMASK ((1U << MINORBITS) - 1)
#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS))
#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK))
#define D_PATH_BUF_SIZE 1024
char d_path_buf[D_PATH_BUF_SIZE] = {};
__u32 pid = 0;
__u32 one_task = 0;
__u32 one_task_error = 0;
SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx)
{
struct vm_area_struct *vma = ctx->vma;
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
struct file *file;
char perm_str[] = "----";
if (task == (void *)0 || vma == (void *)0)
return 0;
file = vma->vm_file;
if (task->tgid != pid) {
if (one_task)
one_task_error = 1;
return 0;
}
perm_str[0] = (vma->vm_flags & VM_READ) ? 'r' : '-';
perm_str[1] = (vma->vm_flags & VM_WRITE) ? 'w' : '-';
perm_str[2] = (vma->vm_flags & VM_EXEC) ? 'x' : '-';
perm_str[3] = (vma->vm_flags & VM_MAYSHARE) ? 's' : 'p';
BPF_SEQ_PRINTF(seq, "%08llx-%08llx %s ", vma->vm_start, vma->vm_end, perm_str);
if (file) {
__u32 dev = file->f_inode->i_sb->s_dev;
bpf_d_path(&file->f_path, d_path_buf, D_PATH_BUF_SIZE);
BPF_SEQ_PRINTF(seq, "%08llx ", vma->vm_pgoff << 12);
BPF_SEQ_PRINTF(seq, "%02x:%02x %u", MAJOR(dev), MINOR(dev),
file->f_inode->i_ino);
BPF_SEQ_PRINTF(seq, "\t%s\n", d_path_buf);
} else {
BPF_SEQ_PRINTF(seq, "%08llx 00:00 0\n", 0ULL);
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
const volatile int my_pid;
bool abc1_called;
bool abc2_called;
bool custom1_called;
bool custom2_called;
bool kprobe1_called;
bool xyz_called;
SEC("abc")
int abc1(void *ctx)
{
abc1_called = true;
return 0;
}
SEC("abc/whatever")
int abc2(void *ctx)
{
abc2_called = true;
return 0;
}
SEC("custom")
int custom1(void *ctx)
{
custom1_called = true;
return 0;
}
SEC("custom/something")
int custom2(void *ctx)
{
custom2_called = true;
return 0;
}
SEC("kprobe")
int kprobe1(void *ctx)
{
kprobe1_called = true;
return 0;
}
SEC("xyz/blah")
int xyz(void *ctx)
{
int whatever;
/* use sleepable helper, custom handler should set sleepable flag */
bpf_copy_from_user(&whatever, sizeof(whatever), NULL);
xyz_called = true;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
char LICENSE[] SEC("license") = "GPL";
int pid = 0;
int fentry_cnt = 0;
int fexit_cnt = 0;
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int nanosleep_fentry(void *ctx)
{
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
fentry_cnt++;
return 0;
}
SEC("fexit/" SYS_PREFIX "sys_nanosleep")
int nanosleep_fexit(void *ctx)
{
if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0;
fexit_cnt++;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/fexit_sleep.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdint.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/stddef.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/pkt_cls.h>
#include <linux/tcp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
/* the maximum delay we are willing to add (drop packets beyond that) */
#define TIME_HORIZON_NS (2000 * 1000 * 1000)
#define NS_PER_SEC 1000000000
#define ECN_HORIZON_NS 5000000
#define THROTTLE_RATE_BPS (5 * 1000 * 1000)
/* flow_key => last_tstamp timestamp used */
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, uint32_t);
__type(value, uint64_t);
__uint(max_entries, 1);
} flow_map SEC(".maps");
static inline int throttle_flow(struct __sk_buff *skb)
{
int key = 0;
uint64_t *last_tstamp = bpf_map_lookup_elem(&flow_map, &key);
uint64_t delay_ns = ((uint64_t)skb->len) * NS_PER_SEC /
THROTTLE_RATE_BPS;
uint64_t now = bpf_ktime_get_ns();
uint64_t tstamp, next_tstamp = 0;
if (last_tstamp)
next_tstamp = *last_tstamp + delay_ns;
tstamp = skb->tstamp;
if (tstamp < now)
tstamp = now;
/* should we throttle? */
if (next_tstamp <= tstamp) {
if (bpf_map_update_elem(&flow_map, &key, &tstamp, BPF_ANY))
return TC_ACT_SHOT;
return TC_ACT_OK;
}
/* do not queue past the time horizon */
if (next_tstamp - now >= TIME_HORIZON_NS)
return TC_ACT_SHOT;
/* set ecn bit, if needed */
if (next_tstamp - now >= ECN_HORIZON_NS)
bpf_skb_ecn_set_ce(skb);
if (bpf_map_update_elem(&flow_map, &key, &next_tstamp, BPF_EXIST))
return TC_ACT_SHOT;
skb->tstamp = next_tstamp;
return TC_ACT_OK;
}
static inline int handle_tcp(struct __sk_buff *skb, struct tcphdr *tcp)
{
void *data_end = (void *)(long)skb->data_end;
/* drop malformed packets */
if ((void *)(tcp + 1) > data_end)
return TC_ACT_SHOT;
if (tcp->dest == bpf_htons(9000))
return throttle_flow(skb);
return TC_ACT_OK;
}
static inline int handle_ipv4(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
struct iphdr *iph;
uint32_t ihl;
/* drop malformed packets */
if (data + sizeof(struct ethhdr) > data_end)
return TC_ACT_SHOT;
iph = (struct iphdr *)(data + sizeof(struct ethhdr));
if ((void *)(iph + 1) > data_end)
return TC_ACT_SHOT;
ihl = iph->ihl * 4;
if (((void *)iph) + ihl > data_end)
return TC_ACT_SHOT;
if (iph->protocol == IPPROTO_TCP)
return handle_tcp(skb, (struct tcphdr *)(((void *)iph) + ihl));
return TC_ACT_OK;
}
SEC("cls_test") int tc_prog(struct __sk_buff *skb)
{
if (skb->protocol == bpf_htons(ETH_P_IP))
return handle_ipv4(skb);
return TC_ACT_OK;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_tc_edt.c |
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
SEC("sk_msg1")
int bpf_prog1(struct sk_msg_md *msg)
{
return SK_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "X";
void BPF_STRUCT_OPS(nogpltcp_init, struct sock *sk)
{
}
SEC(".struct_ops")
struct tcp_congestion_ops bpf_nogpltcp = {
.init = (void *)nogpltcp_init,
.name = "bpf_nogpltcp",
};
| linux-master | tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
int a[4];
const volatile int off = 4000;
SEC("raw_tp/sys_enter")
int good_prog(const void *ctx)
{
a[0] = (int)(long)ctx;
return a[1];
}
SEC("raw_tp/sys_enter")
int bad_prog(const void *ctx)
{
/* out of bounds access */
return a[off];
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_log_buf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_call_test4(struct __sk_buff *skb)
{
struct bpf_sock *sk = skb->sk;
long tmp;
if (!sk)
return -1;
sk = bpf_sk_fullsock(sk);
if (!sk)
return -1;
tmp = bpf_kfunc_call_test4(-3, -30, -200, -1000);
return (tmp >> 32) + tmp;
}
SEC("tc")
int kfunc_call_test2(struct __sk_buff *skb)
{
struct bpf_sock *sk = skb->sk;
if (!sk)
return -1;
sk = bpf_sk_fullsock(sk);
if (!sk)
return -1;
return bpf_kfunc_call_test2((struct sock *)sk, 1, 2);
}
SEC("tc")
int kfunc_call_test1(struct __sk_buff *skb)
{
struct bpf_sock *sk = skb->sk;
__u64 a = 1ULL << 32;
__u32 ret;
if (!sk)
return -1;
sk = bpf_sk_fullsock(sk);
if (!sk)
return -1;
a = bpf_kfunc_call_test1((struct sock *)sk, 1, a | 2, 3, a | 4);
ret = a >> 32; /* ret should be 2 */
ret += (__u32)a; /* ret should be 12 */
return ret;
}
SEC("tc")
int kfunc_call_test_ref_btf_id(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
if (pt->a != 42 || pt->b != 108)
ret = -1;
bpf_kfunc_call_test_release(pt);
}
return ret;
}
SEC("tc")
int kfunc_call_test_pass(struct __sk_buff *skb)
{
struct prog_test_pass1 p1 = {};
struct prog_test_pass2 p2 = {};
short a = 0;
__u64 b = 0;
long c = 0;
char d = 0;
int e = 0;
bpf_kfunc_call_test_pass_ctx(skb);
bpf_kfunc_call_test_pass1(&p1);
bpf_kfunc_call_test_pass2(&p2);
bpf_kfunc_call_test_mem_len_pass1(&a, sizeof(a));
bpf_kfunc_call_test_mem_len_pass1(&b, sizeof(b));
bpf_kfunc_call_test_mem_len_pass1(&c, sizeof(c));
bpf_kfunc_call_test_mem_len_pass1(&d, sizeof(d));
bpf_kfunc_call_test_mem_len_pass1(&e, sizeof(e));
bpf_kfunc_call_test_mem_len_fail2(&b, -1);
return 0;
}
struct syscall_test_args {
__u8 data[16];
size_t size;
};
SEC("syscall")
int kfunc_syscall_test(struct syscall_test_args *args)
{
const long size = args->size;
if (size > sizeof(args->data))
return -7; /* -E2BIG */
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(args->data));
bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args));
bpf_kfunc_call_test_mem_len_pass1(&args->data, size);
return 0;
}
SEC("syscall")
int kfunc_syscall_test_null(struct syscall_test_args *args)
{
/* Must be called with args as a NULL pointer
* we do not check for it to have the verifier consider that
* the pointer might not be null, and so we can load it.
*
* So the following can not be added:
*
* if (args)
* return -22;
*/
bpf_kfunc_call_test_mem_len_pass1(args, 0);
return 0;
}
SEC("tc")
int kfunc_call_test_get_mem(struct __sk_buff *skb)
{
struct prog_test_ref_kfunc *pt;
unsigned long s = 0;
int *p = NULL;
int ret = 0;
pt = bpf_kfunc_call_test_acquire(&s);
if (pt) {
p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int));
if (p) {
p[0] = 42;
ret = p[1]; /* 108 */
} else {
ret = -1;
}
if (ret >= 0) {
p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
if (p)
ret = p[0]; /* 42 */
else
ret = -1;
}
bpf_kfunc_call_test_release(pt);
}
return ret;
}
SEC("tc")
int kfunc_call_test_static_unused_arg(struct __sk_buff *skb)
{
u32 expected = 5, actual;
actual = bpf_kfunc_call_test_static_unused_arg(expected, 0xdeadbeef);
return actual != expected ? -1 : 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/kfunc_call_test.c |
#include "core_reloc_types.h"
void f(struct core_reloc_type_based___diff_sz x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff_sz.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "bpf_tracing_net.h"
#define NF_DROP 0
#define NF_ACCEPT 1
#define ETH_P_IP 0x0800
#define ETH_P_IPV6 0x86DD
#define IP_MF 0x2000
#define IP_OFFSET 0x1FFF
#define NEXTHDR_FRAGMENT 44
extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags,
struct bpf_dynptr *ptr__uninit) __ksym;
extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset,
void *buffer, uint32_t buffer__sz) __ksym;
volatile int shootdowns = 0;
static bool is_frag_v4(struct iphdr *iph)
{
int offset;
int flags;
offset = bpf_ntohs(iph->frag_off);
flags = offset & ~IP_OFFSET;
offset &= IP_OFFSET;
offset <<= 3;
return (flags & IP_MF) || offset;
}
static bool is_frag_v6(struct ipv6hdr *ip6h)
{
/* Simplifying assumption that there are no extension headers
* between fixed header and fragmentation header. This assumption
* is only valid in this test case. It saves us the hassle of
* searching all potential extension headers.
*/
return ip6h->nexthdr == NEXTHDR_FRAGMENT;
}
static int handle_v4(struct sk_buff *skb)
{
struct bpf_dynptr ptr;
u8 iph_buf[20] = {};
struct iphdr *iph;
if (bpf_dynptr_from_skb(skb, 0, &ptr))
return NF_DROP;
iph = bpf_dynptr_slice(&ptr, 0, iph_buf, sizeof(iph_buf));
if (!iph)
return NF_DROP;
/* Shootdown any frags */
if (is_frag_v4(iph)) {
shootdowns++;
return NF_DROP;
}
return NF_ACCEPT;
}
static int handle_v6(struct sk_buff *skb)
{
struct bpf_dynptr ptr;
struct ipv6hdr *ip6h;
u8 ip6h_buf[40] = {};
if (bpf_dynptr_from_skb(skb, 0, &ptr))
return NF_DROP;
ip6h = bpf_dynptr_slice(&ptr, 0, ip6h_buf, sizeof(ip6h_buf));
if (!ip6h)
return NF_DROP;
/* Shootdown any frags */
if (is_frag_v6(ip6h)) {
shootdowns++;
return NF_DROP;
}
return NF_ACCEPT;
}
SEC("netfilter")
int defrag(struct bpf_nf_ctx *ctx)
{
struct sk_buff *skb = ctx->skb;
switch (bpf_ntohs(skb->protocol)) {
case ETH_P_IP:
return handle_v4(skb);
case ETH_P_IPV6:
return handle_v6(skb);
default:
return NF_ACCEPT;
}
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/ip_check_defrag.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define STACK_MAX_LEN 50
#define GLOBAL_FUNC
#include "pyperf.h"
| linux-master | tools/testing/selftests/bpf/progs/pyperf_global.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
__u32 value_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx)
{
void *value = ctx->value;
if (value == (void *)0)
return 0;
/* negative offset, verifier failure. */
value_sum += *(__u32 *)(value - 4);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define UDP_TEST_PORT 7777
void *bpf_cast_to_kern_ctx(void *) __ksym;
bool init_csum_partial = false;
bool final_csum_none = false;
bool broken_csum_start = false;
static unsigned int skb_headlen(const struct sk_buff *skb)
{
return skb->len - skb->data_len;
}
static unsigned int skb_headroom(const struct sk_buff *skb)
{
return skb->data - skb->head;
}
static int skb_checksum_start_offset(const struct sk_buff *skb)
{
return skb->csum_start - skb_headroom(skb);
}
SEC("tc")
int decap_sanity(struct __sk_buff *skb)
{
struct sk_buff *kskb;
struct ipv6hdr ip6h;
struct udphdr udph;
int err;
if (skb->protocol != __bpf_constant_htons(ETH_P_IPV6))
return TC_ACT_SHOT;
if (bpf_skb_load_bytes(skb, ETH_HLEN, &ip6h, sizeof(ip6h)))
return TC_ACT_SHOT;
if (ip6h.nexthdr != IPPROTO_UDP)
return TC_ACT_SHOT;
if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(ip6h), &udph, sizeof(udph)))
return TC_ACT_SHOT;
if (udph.dest != __bpf_constant_htons(UDP_TEST_PORT))
return TC_ACT_SHOT;
kskb = bpf_cast_to_kern_ctx(skb);
init_csum_partial = (kskb->ip_summed == CHECKSUM_PARTIAL);
err = bpf_skb_adjust_room(skb, -(s32)(ETH_HLEN + sizeof(ip6h) + sizeof(udph)),
1, BPF_F_ADJ_ROOM_FIXED_GSO);
if (err)
return TC_ACT_SHOT;
final_csum_none = (kskb->ip_summed == CHECKSUM_NONE);
if (kskb->ip_summed == CHECKSUM_PARTIAL &&
(unsigned int)skb_checksum_start_offset(kskb) >= skb_headlen(kskb))
broken_csum_start = true;
return TC_ACT_SHOT;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/decap_sanity.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("perf_event")
__description("unpriv: spill/fill of different pointers ldx")
__failure __msg("same insn cannot be used with different pointers")
__naked void fill_of_different_pointers_ldx(void)
{
asm volatile (" \
r6 = r10; \
r6 += -8; \
if r1 == 0 goto l0_%=; \
r2 = r10; \
r2 += %[__imm_0]; \
*(u64*)(r6 + 0) = r2; \
l0_%=: if r1 != 0 goto l1_%=; \
*(u64*)(r6 + 0) = r1; \
l1_%=: r1 = *(u64*)(r6 + 0); \
r1 = *(u64*)(r1 + %[sample_period]); \
r0 = 0; \
exit; \
" :
: __imm_const(__imm_0,
-(__s32) offsetof(struct bpf_perf_event_data, sample_period) - 8),
__imm_const(sample_period,
offsetof(struct bpf_perf_event_data, sample_period))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <errno.h>
#include <linux/capability.h>
typedef struct { unsigned long long val; } kernel_cap_t;
struct cred {
kernel_cap_t cap_effective;
} __attribute__((preserve_access_index));
char _license[] SEC("license") = "GPL";
SEC("lsm.s/userns_create")
int BPF_PROG(test_userns_create, const struct cred *cred, int ret)
{
kernel_cap_t caps = cred->cap_effective;
__u64 cap_mask = 1ULL << CAP_SYS_ADMIN;
if (ret)
return 0;
ret = -EPERM;
if (caps.val & cap_mask)
return 0;
return -EPERM;
}
| linux-master | tools/testing/selftests/bpf/progs/test_deny_namespace.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define IN16 0x1234
#define IN32 0x12345678U
#define IN64 0x123456789abcdef0ULL
__u16 in16 = 0;
__u32 in32 = 0;
__u64 in64 = 0;
__u16 out16 = 0;
__u32 out32 = 0;
__u64 out64 = 0;
__u16 const16 = 0;
__u32 const32 = 0;
__u64 const64 = 0;
SEC("raw_tp/sys_enter")
int sys_enter(const void *ctx)
{
out16 = __builtin_bswap16(in16);
out32 = __builtin_bswap32(in32);
out64 = __builtin_bswap64(in64);
const16 = ___bpf_swab16(IN16);
const32 = ___bpf_swab32(IN32);
const64 = ___bpf_swab64(IN64);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_endian.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
static int hlist_unhashed_lockless(const struct hlist_node *h)
{
return !(h->pprev);
}
static int timer_pending(const struct timer_list * timer)
{
return !hlist_unhashed_lockless(&timer->entry);
}
extern unsigned CONFIG_HZ __kconfig;
#define USER_HZ 100
#define NSEC_PER_SEC 1000000000ULL
static clock_t jiffies_to_clock_t(unsigned long x)
{
/* The implementation here tailored to a particular
* setting of USER_HZ.
*/
u64 tick_nsec = (NSEC_PER_SEC + CONFIG_HZ/2) / CONFIG_HZ;
u64 user_hz_nsec = NSEC_PER_SEC / USER_HZ;
if ((tick_nsec % user_hz_nsec) == 0) {
if (CONFIG_HZ < USER_HZ)
return x * (USER_HZ / CONFIG_HZ);
else
return x / (CONFIG_HZ / USER_HZ);
}
return x * tick_nsec/user_hz_nsec;
}
static clock_t jiffies_delta_to_clock_t(long delta)
{
if (delta <= 0)
return 0;
return jiffies_to_clock_t(delta);
}
static long sock_i_ino(const struct sock *sk)
{
const struct socket *sk_socket = sk->sk_socket;
const struct inode *inode;
unsigned long ino;
if (!sk_socket)
return 0;
inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
return ino;
}
static bool
inet_csk_in_pingpong_mode(const struct inet_connection_sock *icsk)
{
return icsk->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
}
static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp)
{
return tcp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
}
static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp,
uid_t uid, __u32 seq_num)
{
const struct inet_connection_sock *icsk;
const struct fastopen_queue *fastopenq;
const struct inet_sock *inet;
unsigned long timer_expires;
const struct sock *sp;
__u16 destp, srcp;
__be32 dest, src;
int timer_active;
int rx_queue;
int state;
icsk = &tp->inet_conn;
inet = &icsk->icsk_inet;
sp = &inet->sk;
fastopenq = &icsk->icsk_accept_queue.fastopenq;
dest = inet->inet_daddr;
src = inet->inet_rcv_saddr;
destp = bpf_ntohs(inet->inet_dport);
srcp = bpf_ntohs(inet->inet_sport);
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
timer_expires = icsk->icsk_timeout;
} else if (timer_pending(&sp->sk_timer)) {
timer_active = 2;
timer_expires = sp->sk_timer.expires;
} else {
timer_active = 0;
timer_expires = bpf_jiffies64();
}
state = sp->sk_state;
if (state == TCP_LISTEN) {
rx_queue = sp->sk_ack_backlog;
} else {
rx_queue = tp->rcv_nxt - tp->copied_seq;
if (rx_queue < 0)
rx_queue = 0;
}
BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
seq_num, src, srcp, dest, destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ",
state,
tp->write_seq - tp->snd_una, rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - bpf_jiffies64()),
icsk->icsk_retransmits, uid,
icsk->icsk_probes_out,
sock_i_ino(sp),
sp->sk_refcnt.refs.counter);
BPF_SEQ_PRINTF(seq, "%pK %lu %lu %u %u %d\n",
tp,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(icsk),
tp->snd_cwnd,
state == TCP_LISTEN ? fastopenq->max_qlen
: (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
);
return 0;
}
static int dump_tw_sock(struct seq_file *seq, struct tcp_timewait_sock *ttw,
uid_t uid, __u32 seq_num)
{
struct inet_timewait_sock *tw = &ttw->tw_sk;
__u16 destp, srcp;
__be32 dest, src;
long delta;
delta = tw->tw_timer.expires - bpf_jiffies64();
dest = tw->tw_daddr;
src = tw->tw_rcv_saddr;
destp = bpf_ntohs(tw->tw_dport);
srcp = bpf_ntohs(tw->tw_sport);
BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
seq_num, src, srcp, dest, destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
tw->tw_substate, 0, 0,
3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
tw->tw_refcnt.refs.counter, tw);
return 0;
}
static int dump_req_sock(struct seq_file *seq, struct tcp_request_sock *treq,
uid_t uid, __u32 seq_num)
{
struct inet_request_sock *irsk = &treq->req;
struct request_sock *req = &irsk->req;
long ttd;
ttd = req->rsk_timer.expires - bpf_jiffies64();
if (ttd < 0)
ttd = 0;
BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
seq_num, irsk->ir_loc_addr,
irsk->ir_num, irsk->ir_rmt_addr,
bpf_ntohs(irsk->ir_rmt_port));
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
TCP_SYN_RECV, 0, 0, 1, jiffies_to_clock_t(ttd),
req->num_timeout, uid, 0, 0, 0, req);
return 0;
}
SEC("iter/tcp")
int dump_tcp4(struct bpf_iter__tcp *ctx)
{
struct sock_common *sk_common = ctx->sk_common;
struct seq_file *seq = ctx->meta->seq;
struct tcp_timewait_sock *tw;
struct tcp_request_sock *req;
struct tcp_sock *tp;
uid_t uid = ctx->uid;
__u32 seq_num;
if (sk_common == (void *)0)
return 0;
seq_num = ctx->meta->seq_num;
if (seq_num == 0)
BPF_SEQ_PRINTF(seq, " sl "
"local_address "
"rem_address "
"st tx_queue rx_queue tr tm->when retrnsmt"
" uid timeout inode\n");
if (sk_common->skc_family != AF_INET)
return 0;
tp = bpf_skc_to_tcp_sock(sk_common);
if (tp)
return dump_tcp_sock(seq, tp, uid, seq_num);
tw = bpf_skc_to_tcp_timewait_sock(sk_common);
if (tw)
return dump_tw_sock(seq, tw, uid, seq_num);
req = bpf_skc_to_tcp_request_sock(sk_common);
if (req)
return dump_req_sock(seq, req, uid, seq_num);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf_sockopt_helpers.h>
#define SRC_REWRITE_IP6_0 0
#define SRC_REWRITE_IP6_1 0
#define SRC_REWRITE_IP6_2 0
#define SRC_REWRITE_IP6_3 6
#define DST_REWRITE_IP6_0 0
#define DST_REWRITE_IP6_1 0
#define DST_REWRITE_IP6_2 0
#define DST_REWRITE_IP6_3 1
#define DST_REWRITE_PORT6 6666
SEC("cgroup/sendmsg6")
int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
{
if (ctx->type != SOCK_DGRAM)
return 0;
if (!get_set_sk_priority(ctx))
return 0;
/* Rewrite source. */
if (ctx->msg_src_ip6[3] == bpf_htonl(1) ||
ctx->msg_src_ip6[3] == bpf_htonl(0)) {
ctx->msg_src_ip6[0] = bpf_htonl(SRC_REWRITE_IP6_0);
ctx->msg_src_ip6[1] = bpf_htonl(SRC_REWRITE_IP6_1);
ctx->msg_src_ip6[2] = bpf_htonl(SRC_REWRITE_IP6_2);
ctx->msg_src_ip6[3] = bpf_htonl(SRC_REWRITE_IP6_3);
} else {
/* Unexpected source. Reject sendmsg. */
return 0;
}
/* Rewrite destination. */
if (ctx->user_ip6[0] == bpf_htonl(0xFACEB00C)) {
ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1);
ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2);
ctx->user_ip6[3] = bpf_htonl(DST_REWRITE_IP6_3);
ctx->user_port = bpf_htons(DST_REWRITE_PORT6);
} else {
/* Unexpected destination. Reject sendmsg. */
return 0;
}
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/sendmsg6_prog.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define START_CHAR 'A'
#include "bpf_iter_test_kern_common.h"
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_test_kern2.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#define vm_flags vm_start
char _license[] SEC("license") = "GPL";
struct callback_ctx {
int dummy;
};
static long write_vma(struct task_struct *task, struct vm_area_struct *vma,
struct callback_ctx *data)
{
/* writing to vma, which is illegal */
vma->vm_start = 0xffffffffff600000;
return 0;
}
SEC("raw_tp/sys_enter")
int handle_getpid(void)
{
struct task_struct *task = bpf_get_current_task_btf();
struct callback_ctx data = {};
bpf_find_vma(task, 0, write_vma, &data, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/find_vma_fail1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <errno.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/socket.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define BPF_PROG_TEST_TCP_HDR_OPTIONS
#include "test_tcp_hdr_options.h"
__u16 last_addr16_n = __bpf_htons(1);
__u16 active_lport_n = 0;
__u16 active_lport_h = 0;
__u16 passive_lport_n = 0;
__u16 passive_lport_h = 0;
/* options received at passive side */
unsigned int nr_pure_ack = 0;
unsigned int nr_data = 0;
unsigned int nr_syn = 0;
unsigned int nr_fin = 0;
unsigned int nr_hwtstamp = 0;
/* Check the header received from the active side */
static int __check_active_hdr_in(struct bpf_sock_ops *skops, bool check_syn)
{
union {
struct tcphdr th;
struct ipv6hdr ip6;
struct tcp_exprm_opt exprm_opt;
struct tcp_opt reg_opt;
__u8 data[100]; /* IPv6 (40) + Max TCP hdr (60) */
} hdr = {};
__u64 load_flags = check_syn ? BPF_LOAD_HDR_OPT_TCP_SYN : 0;
struct tcphdr *pth;
int ret;
hdr.reg_opt.kind = 0xB9;
/* The option is 4 bytes long instead of 2 bytes */
ret = bpf_load_hdr_opt(skops, &hdr.reg_opt, 2, load_flags);
if (ret != -ENOSPC)
RET_CG_ERR(ret);
/* Test searching magic with regular kind */
hdr.reg_opt.len = 4;
ret = bpf_load_hdr_opt(skops, &hdr.reg_opt, sizeof(hdr.reg_opt),
load_flags);
if (ret != -EINVAL)
RET_CG_ERR(ret);
hdr.reg_opt.len = 0;
ret = bpf_load_hdr_opt(skops, &hdr.reg_opt, sizeof(hdr.reg_opt),
load_flags);
if (ret != 4 || hdr.reg_opt.len != 4 || hdr.reg_opt.kind != 0xB9 ||
hdr.reg_opt.data[0] != 0xfa || hdr.reg_opt.data[1] != 0xce)
RET_CG_ERR(ret);
/* Test searching experimental option with invalid kind length */
hdr.exprm_opt.kind = TCPOPT_EXP;
hdr.exprm_opt.len = 5;
hdr.exprm_opt.magic = 0;
ret = bpf_load_hdr_opt(skops, &hdr.exprm_opt, sizeof(hdr.exprm_opt),
load_flags);
if (ret != -EINVAL)
RET_CG_ERR(ret);
/* Test searching experimental option with 0 magic value */
hdr.exprm_opt.len = 4;
ret = bpf_load_hdr_opt(skops, &hdr.exprm_opt, sizeof(hdr.exprm_opt),
load_flags);
if (ret != -ENOMSG)
RET_CG_ERR(ret);
hdr.exprm_opt.magic = __bpf_htons(0xeB9F);
ret = bpf_load_hdr_opt(skops, &hdr.exprm_opt, sizeof(hdr.exprm_opt),
load_flags);
if (ret != 4 || hdr.exprm_opt.len != 4 ||
hdr.exprm_opt.kind != TCPOPT_EXP ||
hdr.exprm_opt.magic != __bpf_htons(0xeB9F))
RET_CG_ERR(ret);
if (!check_syn)
return CG_OK;
/* Test loading from skops->syn_skb if sk_state == TCP_NEW_SYN_RECV
*
* Test loading from tp->saved_syn for other sk_state.
*/
ret = bpf_getsockopt(skops, SOL_TCP, TCP_BPF_SYN_IP, &hdr.ip6,
sizeof(hdr.ip6));
if (ret != -ENOSPC)
RET_CG_ERR(ret);
if (hdr.ip6.saddr.s6_addr16[7] != last_addr16_n ||
hdr.ip6.daddr.s6_addr16[7] != last_addr16_n)
RET_CG_ERR(0);
ret = bpf_getsockopt(skops, SOL_TCP, TCP_BPF_SYN_IP, &hdr, sizeof(hdr));
if (ret < 0)
RET_CG_ERR(ret);
pth = (struct tcphdr *)(&hdr.ip6 + 1);
if (pth->dest != passive_lport_n || pth->source != active_lport_n)
RET_CG_ERR(0);
ret = bpf_getsockopt(skops, SOL_TCP, TCP_BPF_SYN, &hdr, sizeof(hdr));
if (ret < 0)
RET_CG_ERR(ret);
if (hdr.th.dest != passive_lport_n || hdr.th.source != active_lport_n)
RET_CG_ERR(0);
return CG_OK;
}
static int check_active_syn_in(struct bpf_sock_ops *skops)
{
return __check_active_hdr_in(skops, true);
}
static int check_active_hdr_in(struct bpf_sock_ops *skops)
{
struct tcphdr *th;
if (__check_active_hdr_in(skops, false) == CG_ERR)
return CG_ERR;
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
if (tcp_hdrlen(th) < skops->skb_len)
nr_data++;
if (th->fin)
nr_fin++;
if (th->ack && !th->fin && tcp_hdrlen(th) == skops->skb_len)
nr_pure_ack++;
if (skops->skb_hwtstamp)
nr_hwtstamp++;
return CG_OK;
}
static int active_opt_len(struct bpf_sock_ops *skops)
{
int err;
/* Reserve more than enough to allow the -EEXIST test in
* the write_active_opt().
*/
err = bpf_reserve_hdr_opt(skops, 12, 0);
if (err)
RET_CG_ERR(err);
return CG_OK;
}
static int write_active_opt(struct bpf_sock_ops *skops)
{
struct tcp_exprm_opt exprm_opt = {};
struct tcp_opt win_scale_opt = {};
struct tcp_opt reg_opt = {};
struct tcphdr *th;
int err, ret;
exprm_opt.kind = TCPOPT_EXP;
exprm_opt.len = 4;
exprm_opt.magic = __bpf_htons(0xeB9F);
reg_opt.kind = 0xB9;
reg_opt.len = 4;
reg_opt.data[0] = 0xfa;
reg_opt.data[1] = 0xce;
win_scale_opt.kind = TCPOPT_WINDOW;
err = bpf_store_hdr_opt(skops, &exprm_opt, sizeof(exprm_opt), 0);
if (err)
RET_CG_ERR(err);
/* Store the same exprm option */
err = bpf_store_hdr_opt(skops, &exprm_opt, sizeof(exprm_opt), 0);
if (err != -EEXIST)
RET_CG_ERR(err);
err = bpf_store_hdr_opt(skops, ®_opt, sizeof(reg_opt), 0);
if (err)
RET_CG_ERR(err);
err = bpf_store_hdr_opt(skops, ®_opt, sizeof(reg_opt), 0);
if (err != -EEXIST)
RET_CG_ERR(err);
/* Check the option has been written and can be searched */
ret = bpf_load_hdr_opt(skops, &exprm_opt, sizeof(exprm_opt), 0);
if (ret != 4 || exprm_opt.len != 4 || exprm_opt.kind != TCPOPT_EXP ||
exprm_opt.magic != __bpf_htons(0xeB9F))
RET_CG_ERR(ret);
reg_opt.len = 0;
ret = bpf_load_hdr_opt(skops, ®_opt, sizeof(reg_opt), 0);
if (ret != 4 || reg_opt.len != 4 || reg_opt.kind != 0xB9 ||
reg_opt.data[0] != 0xfa || reg_opt.data[1] != 0xce)
RET_CG_ERR(ret);
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
if (th->syn) {
active_lport_h = skops->local_port;
active_lport_n = th->source;
/* Search the win scale option written by kernel
* in the SYN packet.
*/
ret = bpf_load_hdr_opt(skops, &win_scale_opt,
sizeof(win_scale_opt), 0);
if (ret != 3 || win_scale_opt.len != 3 ||
win_scale_opt.kind != TCPOPT_WINDOW)
RET_CG_ERR(ret);
/* Write the win scale option that kernel
* has already written.
*/
err = bpf_store_hdr_opt(skops, &win_scale_opt,
sizeof(win_scale_opt), 0);
if (err != -EEXIST)
RET_CG_ERR(err);
}
return CG_OK;
}
static int handle_hdr_opt_len(struct bpf_sock_ops *skops)
{
__u8 tcp_flags = skops_tcp_flags(skops);
if ((tcp_flags & TCPHDR_SYNACK) == TCPHDR_SYNACK)
/* Check the SYN from bpf_sock_ops_kern->syn_skb */
return check_active_syn_in(skops);
/* Passive side should have cleared the write hdr cb by now */
if (skops->local_port == passive_lport_h)
RET_CG_ERR(0);
return active_opt_len(skops);
}
static int handle_write_hdr_opt(struct bpf_sock_ops *skops)
{
if (skops->local_port == passive_lport_h)
RET_CG_ERR(0);
return write_active_opt(skops);
}
static int handle_parse_hdr(struct bpf_sock_ops *skops)
{
/* Passive side is not writing any non-standard/unknown
* option, so the active side should never be called.
*/
if (skops->local_port == active_lport_h)
RET_CG_ERR(0);
return check_active_hdr_in(skops);
}
static int handle_passive_estab(struct bpf_sock_ops *skops)
{
int err;
/* No more write hdr cb */
bpf_sock_ops_cb_flags_set(skops,
skops->bpf_sock_ops_cb_flags &
~BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG);
/* Recheck the SYN but check the tp->saved_syn this time */
err = check_active_syn_in(skops);
if (err == CG_ERR)
return err;
nr_syn++;
/* The ack has header option written by the active side also */
return check_active_hdr_in(skops);
}
SEC("sockops")
int misc_estab(struct bpf_sock_ops *skops)
{
int true_val = 1;
switch (skops->op) {
case BPF_SOCK_OPS_TCP_LISTEN_CB:
passive_lport_h = skops->local_port;
passive_lport_n = __bpf_htons(passive_lport_h);
bpf_setsockopt(skops, SOL_TCP, TCP_SAVE_SYN,
&true_val, sizeof(true_val));
set_hdr_cb_flags(skops, 0);
break;
case BPF_SOCK_OPS_TCP_CONNECT_CB:
set_hdr_cb_flags(skops, 0);
break;
case BPF_SOCK_OPS_PARSE_HDR_OPT_CB:
return handle_parse_hdr(skops);
case BPF_SOCK_OPS_HDR_OPT_LEN_CB:
return handle_hdr_opt_len(skops);
case BPF_SOCK_OPS_WRITE_HDR_OPT_CB:
return handle_write_hdr_opt(skops);
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
return handle_passive_estab(skops);
}
return CG_OK;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c |
#include "core_reloc_types.h"
void f(struct core_reloc_arrays___err_bad_zero_sz_arr x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_zero_sz_arr.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf_sockopt_helpers.h>
char _license[] SEC("license") = "GPL";
struct svc_addr {
__be32 addr[4];
__be16 port;
};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct svc_addr);
} service_mapping SEC(".maps");
SEC("cgroup/connect6")
int connect6(struct bpf_sock_addr *ctx)
{
struct sockaddr_in6 sa = {};
struct svc_addr *orig;
/* Force local address to [::1]:22223. */
sa.sin6_family = AF_INET6;
sa.sin6_port = bpf_htons(22223);
sa.sin6_addr.s6_addr32[3] = bpf_htonl(1);
if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0)
return 0;
/* Rewire service [fc00::1]:60000 to backend [::1]:60124. */
if (ctx->user_port == bpf_htons(60000)) {
orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!orig)
return 0;
orig->addr[0] = ctx->user_ip6[0];
orig->addr[1] = ctx->user_ip6[1];
orig->addr[2] = ctx->user_ip6[2];
orig->addr[3] = ctx->user_ip6[3];
orig->port = ctx->user_port;
ctx->user_ip6[0] = 0;
ctx->user_ip6[1] = 0;
ctx->user_ip6[2] = 0;
ctx->user_ip6[3] = bpf_htonl(1);
ctx->user_port = bpf_htons(60124);
}
return 1;
}
SEC("cgroup/getsockname6")
int getsockname6(struct bpf_sock_addr *ctx)
{
if (!get_set_sk_priority(ctx))
return 1;
/* Expose local server as [fc00::1]:60000 to client. */
if (ctx->user_port == bpf_htons(60124)) {
ctx->user_ip6[0] = bpf_htonl(0xfc000000);
ctx->user_ip6[1] = 0;
ctx->user_ip6[2] = 0;
ctx->user_ip6[3] = bpf_htonl(1);
ctx->user_port = bpf_htons(60000);
}
return 1;
}
SEC("cgroup/getpeername6")
int getpeername6(struct bpf_sock_addr *ctx)
{
struct svc_addr *orig;
if (!get_set_sk_priority(ctx))
return 1;
/* Expose service [fc00::1]:60000 as peer instead of backend. */
if (ctx->user_port == bpf_htons(60124)) {
orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0, 0);
if (orig) {
ctx->user_ip6[0] = orig->addr[0];
ctx->user_ip6[1] = orig->addr[1];
ctx->user_ip6[2] = orig->addr[2];
ctx->user_ip6[3] = orig->addr[3];
ctx->user_port = orig->port;
}
}
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/connect_force_port6.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, long);
} map_a SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, long);
} map_b SEC(".maps");
#define MAGIC_VALUE 0xabcd1234
pid_t target_pid = 0;
int mismatch_cnt = 0;
int enter_cnt = 0;
int exit_cnt = 0;
SEC("tp_btf/sys_enter")
int BPF_PROG(on_enter, struct pt_regs *regs, long id)
{
struct task_struct *task;
long *ptr;
int err;
task = bpf_get_current_task_btf();
if (task->pid != target_pid)
return 0;
/* populate value 0 */
ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!ptr)
return 0;
/* delete value 0 */
err = bpf_cgrp_storage_delete(&map_a, task->cgroups->dfl_cgrp);
if (err)
return 0;
/* value is not available */
ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, 0);
if (ptr)
return 0;
/* re-populate the value */
ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!ptr)
return 0;
__sync_fetch_and_add(&enter_cnt, 1);
*ptr = MAGIC_VALUE + enter_cnt;
return 0;
}
SEC("tp_btf/sys_exit")
int BPF_PROG(on_exit, struct pt_regs *regs, long id)
{
struct task_struct *task;
long *ptr;
task = bpf_get_current_task_btf();
if (task->pid != target_pid)
return 0;
ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!ptr)
return 0;
__sync_fetch_and_add(&exit_cnt, 1);
if (*ptr != MAGIC_VALUE + exit_cnt)
__sync_fetch_and_add(&mismatch_cnt, 1);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/helper_value_access.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct other_val {
long long foo;
long long bar;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct other_val);
} map_hash_16b SEC(".maps");
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("tracepoint")
__description("helper access to map: full range")
__success
__naked void access_to_map_full_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = %[sizeof_test_val]; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(sizeof_test_val, sizeof(struct test_val))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: partial range")
__success
__naked void access_to_map_partial_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = 8; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: empty range")
__failure __msg("invalid access to map value, value_size=48 off=0 size=0")
__naked void access_to_map_empty_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = 0; \
call %[bpf_trace_printk]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_trace_printk),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: out-of-bound range")
__failure __msg("invalid access to map value, value_size=48 off=0 size=56")
__naked void map_out_of_bound_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = %[__imm_0]; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, sizeof(struct test_val) + 8)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: negative range")
__failure __msg("R2 min value is negative")
__naked void access_to_map_negative_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = -8; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via const imm): full range")
__success
__naked void via_const_imm_full_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r1 += %[test_val_foo]; \
r2 = %[__imm_0]; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via const imm): partial range")
__success
__naked void via_const_imm_partial_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r1 += %[test_val_foo]; \
r2 = 8; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via const imm): empty range")
__failure __msg("invalid access to map value, value_size=48 off=4 size=0")
__naked void via_const_imm_empty_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r1 += %[test_val_foo]; \
r2 = 0; \
call %[bpf_trace_printk]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_trace_printk),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via const imm): out-of-bound range")
__failure __msg("invalid access to map value, value_size=48 off=4 size=52")
__naked void imm_out_of_bound_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r1 += %[test_val_foo]; \
r2 = %[__imm_0]; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via const imm): negative range (> adjustment)")
__failure __msg("R2 min value is negative")
__naked void const_imm_negative_range_adjustment_1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r1 += %[test_val_foo]; \
r2 = -8; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via const imm): negative range (< adjustment)")
__failure __msg("R2 min value is negative")
__naked void const_imm_negative_range_adjustment_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r1 += %[test_val_foo]; \
r2 = -1; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via const reg): full range")
__success
__naked void via_const_reg_full_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = %[test_val_foo]; \
r1 += r3; \
r2 = %[__imm_0]; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via const reg): partial range")
__success
__naked void via_const_reg_partial_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = %[test_val_foo]; \
r1 += r3; \
r2 = 8; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via const reg): empty range")
__failure __msg("R1 min value is outside of the allowed memory range")
__naked void via_const_reg_empty_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = 0; \
r1 += r3; \
r2 = 0; \
call %[bpf_trace_printk]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_trace_printk),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via const reg): out-of-bound range")
__failure __msg("invalid access to map value, value_size=48 off=4 size=52")
__naked void reg_out_of_bound_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = %[test_val_foo]; \
r1 += r3; \
r2 = %[__imm_0]; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via const reg): negative range (> adjustment)")
__failure __msg("R2 min value is negative")
__naked void const_reg_negative_range_adjustment_1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = %[test_val_foo]; \
r1 += r3; \
r2 = -8; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via const reg): negative range (< adjustment)")
__failure __msg("R2 min value is negative")
__naked void const_reg_negative_range_adjustment_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = %[test_val_foo]; \
r1 += r3; \
r2 = -1; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via variable): full range")
__success
__naked void map_via_variable_full_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 > %[test_val_foo] goto l0_%=; \
r1 += r3; \
r2 = %[__imm_0]; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via variable): partial range")
__success
__naked void map_via_variable_partial_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 > %[test_val_foo] goto l0_%=; \
r1 += r3; \
r2 = 8; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via variable): empty range")
__failure __msg("R1 min value is outside of the allowed memory range")
__naked void map_via_variable_empty_range(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 > %[test_val_foo] goto l0_%=; \
r1 += r3; \
r2 = 0; \
call %[bpf_trace_printk]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_trace_printk),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via variable): no max check")
__failure __msg("R1 unbounded memory access")
__naked void via_variable_no_max_check_1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
r1 += r3; \
r2 = 1; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to adjusted map (via variable): wrong max check")
__failure __msg("invalid access to map value, value_size=48 off=4 size=45")
__naked void via_variable_wrong_max_check_1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 > %[test_val_foo] goto l0_%=; \
r1 += r3; \
r2 = %[__imm_0]; \
r3 = 0; \
call %[bpf_probe_read_kernel]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_probe_read_kernel),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 1),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: bounds check using <, good access")
__success
__naked void bounds_check_using_good_access_1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 < 32 goto l1_%=; \
r0 = 0; \
l0_%=: exit; \
l1_%=: r1 += r3; \
r0 = 0; \
*(u8*)(r1 + 0) = r0; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: bounds check using <, bad access")
__failure __msg("R1 unbounded memory access")
__naked void bounds_check_using_bad_access_1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 < 32 goto l1_%=; \
r1 += r3; \
l0_%=: r0 = 0; \
*(u8*)(r1 + 0) = r0; \
r0 = 0; \
exit; \
l1_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: bounds check using <=, good access")
__success
__naked void bounds_check_using_good_access_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 <= 32 goto l1_%=; \
r0 = 0; \
l0_%=: exit; \
l1_%=: r1 += r3; \
r0 = 0; \
*(u8*)(r1 + 0) = r0; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: bounds check using <=, bad access")
__failure __msg("R1 unbounded memory access")
__naked void bounds_check_using_bad_access_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 <= 32 goto l1_%=; \
r1 += r3; \
l0_%=: r0 = 0; \
*(u8*)(r1 + 0) = r0; \
r0 = 0; \
exit; \
l1_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: bounds check using s<, good access")
__success
__naked void check_using_s_good_access_1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 s< 32 goto l1_%=; \
l2_%=: r0 = 0; \
l0_%=: exit; \
l1_%=: if r3 s< 0 goto l2_%=; \
r1 += r3; \
r0 = 0; \
*(u8*)(r1 + 0) = r0; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: bounds check using s<, good access 2")
__success
__naked void using_s_good_access_2_1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 s< 32 goto l1_%=; \
l2_%=: r0 = 0; \
l0_%=: exit; \
l1_%=: if r3 s< -3 goto l2_%=; \
r1 += r3; \
r0 = 0; \
*(u8*)(r1 + 0) = r0; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: bounds check using s<, bad access")
__failure __msg("R1 min value is negative")
__naked void check_using_s_bad_access_1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u64*)(r0 + 0); \
if r3 s< 32 goto l1_%=; \
l2_%=: r0 = 0; \
l0_%=: exit; \
l1_%=: if r3 s< -3 goto l2_%=; \
r1 += r3; \
r0 = 0; \
*(u8*)(r1 + 0) = r0; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: bounds check using s<=, good access")
__success
__naked void check_using_s_good_access_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 s<= 32 goto l1_%=; \
l2_%=: r0 = 0; \
l0_%=: exit; \
l1_%=: if r3 s<= 0 goto l2_%=; \
r1 += r3; \
r0 = 0; \
*(u8*)(r1 + 0) = r0; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: bounds check using s<=, good access 2")
__success
__naked void using_s_good_access_2_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 s<= 32 goto l1_%=; \
l2_%=: r0 = 0; \
l0_%=: exit; \
l1_%=: if r3 s<= -3 goto l2_%=; \
r1 += r3; \
r0 = 0; \
*(u8*)(r1 + 0) = r0; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("helper access to map: bounds check using s<=, bad access")
__failure __msg("R1 min value is negative")
__naked void check_using_s_bad_access_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r3 = *(u64*)(r0 + 0); \
if r3 s<= 32 goto l1_%=; \
l2_%=: r0 = 0; \
l0_%=: exit; \
l1_%=: if r3 s<= -3 goto l2_%=; \
r1 += r3; \
r0 = 0; \
*(u8*)(r1 + 0) = r0; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b)
: __clobber_all);
}
SEC("tracepoint")
__description("map lookup helper access to map")
__success
__naked void lookup_helper_access_to_map(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = r0; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_16b)
: __clobber_all);
}
SEC("tracepoint")
__description("map update helper access to map")
__success
__naked void update_helper_access_to_map(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r4 = 0; \
r3 = r0; \
r2 = r0; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_update_elem]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_map_update_elem),
__imm_addr(map_hash_16b)
: __clobber_all);
}
SEC("tracepoint")
__description("map update helper access to map: wrong size")
__failure __msg("invalid access to map value, value_size=8 off=0 size=16")
__naked void access_to_map_wrong_size(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r4 = 0; \
r3 = r0; \
r2 = r0; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_update_elem]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_map_update_elem),
__imm_addr(map_hash_16b),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tracepoint")
__description("map helper access to adjusted map (via const imm)")
__success
__naked void adjusted_map_via_const_imm(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = r0; \
r2 += %[other_val_bar]; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_16b),
__imm_const(other_val_bar, offsetof(struct other_val, bar))
: __clobber_all);
}
SEC("tracepoint")
__description("map helper access to adjusted map (via const imm): out-of-bound 1")
__failure __msg("invalid access to map value, value_size=16 off=12 size=8")
__naked void imm_out_of_bound_1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = r0; \
r2 += %[__imm_0]; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_16b),
__imm_const(__imm_0, sizeof(struct other_val) - 4)
: __clobber_all);
}
SEC("tracepoint")
__description("map helper access to adjusted map (via const imm): out-of-bound 2")
__failure __msg("invalid access to map value, value_size=16 off=-4 size=8")
__naked void imm_out_of_bound_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = r0; \
r2 += -4; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_16b)
: __clobber_all);
}
SEC("tracepoint")
__description("map helper access to adjusted map (via const reg)")
__success
__naked void adjusted_map_via_const_reg(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = r0; \
r3 = %[other_val_bar]; \
r2 += r3; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_16b),
__imm_const(other_val_bar, offsetof(struct other_val, bar))
: __clobber_all);
}
SEC("tracepoint")
__description("map helper access to adjusted map (via const reg): out-of-bound 1")
__failure __msg("invalid access to map value, value_size=16 off=12 size=8")
__naked void reg_out_of_bound_1(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = r0; \
r3 = %[__imm_0]; \
r2 += r3; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_16b),
__imm_const(__imm_0, sizeof(struct other_val) - 4)
: __clobber_all);
}
SEC("tracepoint")
__description("map helper access to adjusted map (via const reg): out-of-bound 2")
__failure __msg("invalid access to map value, value_size=16 off=-4 size=8")
__naked void reg_out_of_bound_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = r0; \
r3 = -4; \
r2 += r3; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_16b)
: __clobber_all);
}
SEC("tracepoint")
__description("map helper access to adjusted map (via variable)")
__success
__naked void to_adjusted_map_via_variable(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 > %[other_val_bar] goto l0_%=; \
r2 += r3; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_16b),
__imm_const(other_val_bar, offsetof(struct other_val, bar))
: __clobber_all);
}
SEC("tracepoint")
__description("map helper access to adjusted map (via variable): no max check")
__failure
__msg("R2 unbounded memory access, make sure to bounds check any such access")
__naked void via_variable_no_max_check_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = r0; \
r3 = *(u32*)(r0 + 0); \
r2 += r3; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_16b)
: __clobber_all);
}
SEC("tracepoint")
__description("map helper access to adjusted map (via variable): wrong max check")
__failure __msg("invalid access to map value, value_size=16 off=9 size=8")
__naked void via_variable_wrong_max_check_2(void)
{
asm volatile (" \
r2 = r10; \
r2 += -8; \
r1 = 0; \
*(u64*)(r2 + 0) = r1; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r2 = r0; \
r3 = *(u32*)(r0 + 0); \
if r3 > %[__imm_0] goto l0_%=; \
r2 += r3; \
r1 = %[map_hash_16b] ll; \
call %[bpf_map_lookup_elem]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_16b),
__imm_const(__imm_0, offsetof(struct other_val, bar) + 1)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_helper_value_access.c |
#include <stddef.h>
#include <inttypes.h>
#include <errno.h>
#include <linux/seg6_local.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
/* Packet parsing state machine helpers. */
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
#define SR6_FLAG_ALERT (1 << 4)
#define BPF_PACKET_HEADER __attribute__((packed))
struct ip6_t {
unsigned int ver:4;
unsigned int priority:8;
unsigned int flow_label:20;
unsigned short payload_len;
unsigned char next_header;
unsigned char hop_limit;
unsigned long long src_hi;
unsigned long long src_lo;
unsigned long long dst_hi;
unsigned long long dst_lo;
} BPF_PACKET_HEADER;
struct ip6_addr_t {
unsigned long long hi;
unsigned long long lo;
} BPF_PACKET_HEADER;
struct ip6_srh_t {
unsigned char nexthdr;
unsigned char hdrlen;
unsigned char type;
unsigned char segments_left;
unsigned char first_segment;
unsigned char flags;
unsigned short tag;
struct ip6_addr_t segments[0];
} BPF_PACKET_HEADER;
struct sr6_tlv_t {
unsigned char type;
unsigned char len;
unsigned char value[0];
} BPF_PACKET_HEADER;
static __always_inline struct ip6_srh_t *get_srh(struct __sk_buff *skb)
{
void *cursor, *data_end;
struct ip6_srh_t *srh;
struct ip6_t *ip;
uint8_t *ipver;
data_end = (void *)(long)skb->data_end;
cursor = (void *)(long)skb->data;
ipver = (uint8_t *)cursor;
if ((void *)ipver + sizeof(*ipver) > data_end)
return NULL;
if ((*ipver >> 4) != 6)
return NULL;
ip = cursor_advance(cursor, sizeof(*ip));
if ((void *)ip + sizeof(*ip) > data_end)
return NULL;
if (ip->next_header != 43)
return NULL;
srh = cursor_advance(cursor, sizeof(*srh));
if ((void *)srh + sizeof(*srh) > data_end)
return NULL;
if (srh->type != 4)
return NULL;
return srh;
}
static __always_inline
int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
uint32_t old_pad, uint32_t pad_off)
{
int err;
if (new_pad != old_pad) {
err = bpf_lwt_seg6_adjust_srh(skb, pad_off,
(int) new_pad - (int) old_pad);
if (err)
return err;
}
if (new_pad > 0) {
char pad_tlv_buf[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0};
struct sr6_tlv_t *pad_tlv = (struct sr6_tlv_t *) pad_tlv_buf;
pad_tlv->type = SR6_TLV_PADDING;
pad_tlv->len = new_pad - 2;
err = bpf_lwt_seg6_store_bytes(skb, pad_off,
(void *)pad_tlv_buf, new_pad);
if (err)
return err;
}
return 0;
}
static __always_inline
int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
uint32_t *tlv_off, uint32_t *pad_size,
uint32_t *pad_off)
{
uint32_t srh_off, cur_off;
int offset_valid = 0;
int err;
srh_off = (char *)srh - (char *)(long)skb->data;
// cur_off = end of segments, start of possible TLVs
cur_off = srh_off + sizeof(*srh) +
sizeof(struct ip6_addr_t) * (srh->first_segment + 1);
*pad_off = 0;
// we can only go as far as ~10 TLVs due to the BPF max stack size
#pragma clang loop unroll(full)
for (int i = 0; i < 10; i++) {
struct sr6_tlv_t tlv;
if (cur_off == *tlv_off)
offset_valid = 1;
if (cur_off >= srh_off + ((srh->hdrlen + 1) << 3))
break;
err = bpf_skb_load_bytes(skb, cur_off, &tlv, sizeof(tlv));
if (err)
return err;
if (tlv.type == SR6_TLV_PADDING) {
*pad_size = tlv.len + sizeof(tlv);
*pad_off = cur_off;
if (*tlv_off == srh_off) {
*tlv_off = cur_off;
offset_valid = 1;
}
break;
} else if (tlv.type == SR6_TLV_HMAC) {
break;
}
cur_off += sizeof(tlv) + tlv.len;
} // we reached the padding or HMAC TLVs, or the end of the SRH
if (*pad_off == 0)
*pad_off = cur_off;
if (*tlv_off == -1)
*tlv_off = cur_off;
else if (!offset_valid)
return -EINVAL;
return 0;
}
static __always_inline
int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
struct sr6_tlv_t *itlv, uint8_t tlv_size)
{
uint32_t srh_off = (char *)srh - (char *)(long)skb->data;
uint8_t len_remaining, new_pad;
uint32_t pad_off = 0;
uint32_t pad_size = 0;
uint32_t partial_srh_len;
int err;
if (tlv_off != -1)
tlv_off += srh_off;
if (itlv->type == SR6_TLV_PADDING || itlv->type == SR6_TLV_HMAC)
return -EINVAL;
err = is_valid_tlv_boundary(skb, srh, &tlv_off, &pad_size, &pad_off);
if (err)
return err;
err = bpf_lwt_seg6_adjust_srh(skb, tlv_off, sizeof(*itlv) + itlv->len);
if (err)
return err;
err = bpf_lwt_seg6_store_bytes(skb, tlv_off, (void *)itlv, tlv_size);
if (err)
return err;
// the following can't be moved inside update_tlv_pad because the
// bpf verifier has some issues with it
pad_off += sizeof(*itlv) + itlv->len;
partial_srh_len = pad_off - srh_off;
len_remaining = partial_srh_len % 8;
new_pad = 8 - len_remaining;
if (new_pad == 1) // cannot pad for 1 byte only
new_pad = 9;
else if (new_pad == 8)
new_pad = 0;
return update_tlv_pad(skb, new_pad, pad_size, pad_off);
}
static __always_inline
int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
uint32_t tlv_off)
{
uint32_t srh_off = (char *)srh - (char *)(long)skb->data;
uint8_t len_remaining, new_pad;
uint32_t partial_srh_len;
uint32_t pad_off = 0;
uint32_t pad_size = 0;
struct sr6_tlv_t tlv;
int err;
tlv_off += srh_off;
err = is_valid_tlv_boundary(skb, srh, &tlv_off, &pad_size, &pad_off);
if (err)
return err;
err = bpf_skb_load_bytes(skb, tlv_off, &tlv, sizeof(tlv));
if (err)
return err;
err = bpf_lwt_seg6_adjust_srh(skb, tlv_off, -(sizeof(tlv) + tlv.len));
if (err)
return err;
pad_off -= sizeof(tlv) + tlv.len;
partial_srh_len = pad_off - srh_off;
len_remaining = partial_srh_len % 8;
new_pad = 8 - len_remaining;
if (new_pad == 1) // cannot pad for 1 byte only
new_pad = 9;
else if (new_pad == 8)
new_pad = 0;
return update_tlv_pad(skb, new_pad, pad_size, pad_off);
}
static __always_inline
int has_egr_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh)
{
int tlv_offset = sizeof(struct ip6_t) + sizeof(struct ip6_srh_t) +
((srh->first_segment + 1) << 4);
struct sr6_tlv_t tlv;
if (bpf_skb_load_bytes(skb, tlv_offset, &tlv, sizeof(struct sr6_tlv_t)))
return 0;
if (tlv.type == SR6_TLV_EGRESS && tlv.len == 18) {
struct ip6_addr_t egr_addr;
if (bpf_skb_load_bytes(skb, tlv_offset + 4, &egr_addr, 16))
return 0;
// check if egress TLV value is correct
if (bpf_be64_to_cpu(egr_addr.hi) == 0xfd00000000000000 &&
bpf_be64_to_cpu(egr_addr.lo) == 0x4)
return 1;
}
return 0;
}
// This function will push a SRH with segments fd00::1, fd00::2, fd00::3,
// fd00::4
SEC("encap_srh")
int __encap_srh(struct __sk_buff *skb)
{
unsigned long long hi = 0xfd00000000000000;
struct ip6_addr_t *seg;
struct ip6_srh_t *srh;
char srh_buf[72]; // room for 4 segments
int err;
srh = (struct ip6_srh_t *)srh_buf;
srh->nexthdr = 0;
srh->hdrlen = 8;
srh->type = 4;
srh->segments_left = 3;
srh->first_segment = 3;
srh->flags = 0;
srh->tag = 0;
seg = (struct ip6_addr_t *)((char *)srh + sizeof(*srh));
#pragma clang loop unroll(full)
for (unsigned long long lo = 0; lo < 4; lo++) {
seg->lo = bpf_cpu_to_be64(4 - lo);
seg->hi = bpf_cpu_to_be64(hi);
seg = (struct ip6_addr_t *)((char *)seg + sizeof(*seg));
}
err = bpf_lwt_push_encap(skb, 0, (void *)srh, sizeof(srh_buf));
if (err)
return BPF_DROP;
return BPF_REDIRECT;
}
// Add an Egress TLV fc00::4, add the flag A,
// and apply End.X action to fc42::1
SEC("add_egr_x")
int __add_egr_x(struct __sk_buff *skb)
{
unsigned long long hi = 0xfc42000000000000;
unsigned long long lo = 0x1;
struct ip6_srh_t *srh = get_srh(skb);
uint8_t new_flags = SR6_FLAG_ALERT;
struct ip6_addr_t addr;
int err, offset;
if (srh == NULL)
return BPF_DROP;
uint8_t tlv[20] = {2, 18, 0, 0, 0xfd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4};
err = add_tlv(skb, srh, (srh->hdrlen+1) << 3,
(struct sr6_tlv_t *)&tlv, 20);
if (err)
return BPF_DROP;
offset = sizeof(struct ip6_t) + offsetof(struct ip6_srh_t, flags);
err = bpf_lwt_seg6_store_bytes(skb, offset,
(void *)&new_flags, sizeof(new_flags));
if (err)
return BPF_DROP;
addr.lo = bpf_cpu_to_be64(lo);
addr.hi = bpf_cpu_to_be64(hi);
err = bpf_lwt_seg6_action(skb, SEG6_LOCAL_ACTION_END_X,
(void *)&addr, sizeof(addr));
if (err)
return BPF_DROP;
return BPF_REDIRECT;
}
// Pop the Egress TLV, reset the flags, change the tag 2442 and finally do a
// simple End action
SEC("pop_egr")
int __pop_egr(struct __sk_buff *skb)
{
struct ip6_srh_t *srh = get_srh(skb);
uint16_t new_tag = bpf_htons(2442);
uint8_t new_flags = 0;
int err, offset;
if (srh == NULL)
return BPF_DROP;
if (srh->flags != SR6_FLAG_ALERT)
return BPF_DROP;
if (srh->hdrlen != 11) // 4 segments + Egress TLV + Padding TLV
return BPF_DROP;
if (!has_egr_tlv(skb, srh))
return BPF_DROP;
err = delete_tlv(skb, srh, 8 + (srh->first_segment + 1) * 16);
if (err)
return BPF_DROP;
offset = sizeof(struct ip6_t) + offsetof(struct ip6_srh_t, flags);
if (bpf_lwt_seg6_store_bytes(skb, offset, (void *)&new_flags,
sizeof(new_flags)))
return BPF_DROP;
offset = sizeof(struct ip6_t) + offsetof(struct ip6_srh_t, tag);
if (bpf_lwt_seg6_store_bytes(skb, offset, (void *)&new_tag,
sizeof(new_tag)))
return BPF_DROP;
return BPF_OK;
}
// Inspect if the Egress TLV and flag have been removed, if the tag is correct,
// then apply a End.T action to reach the last segment
SEC("inspect_t")
int __inspect_t(struct __sk_buff *skb)
{
struct ip6_srh_t *srh = get_srh(skb);
int table = 117;
int err;
if (srh == NULL)
return BPF_DROP;
if (srh->flags != 0)
return BPF_DROP;
if (srh->tag != bpf_htons(2442))
return BPF_DROP;
if (srh->hdrlen != 8) // 4 segments
return BPF_DROP;
err = bpf_lwt_seg6_action(skb, SEG6_LOCAL_ACTION_END_T,
(void *)&table, sizeof(table));
if (err)
return BPF_DROP;
return BPF_REDIRECT;
}
char __license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_lwt_seg6local.c |
#include "core_reloc_types.h"
void f1(struct core_reloc_nesting___err_dup_incompat_types__1 x) {}
void f2(struct core_reloc_nesting___err_dup_incompat_types__2 x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_nesting___err_dup_incompat_types.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright Amazon.com Inc. or its affiliates. */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <limits.h>
#define AUTOBIND_LEN 6
char sun_path[AUTOBIND_LEN];
#define NR_CASES 5
int sndbuf_setsockopt[NR_CASES] = {-1, 0, 8192, INT_MAX / 2, INT_MAX};
int sndbuf_getsockopt[NR_CASES] = {-1, -1, -1, -1, -1};
int sndbuf_getsockopt_expected[NR_CASES];
static inline int cmpname(struct unix_sock *unix_sk)
{
int i;
for (i = 0; i < AUTOBIND_LEN; i++) {
if (unix_sk->addr->name->sun_path[i] != sun_path[i])
return -1;
}
return 0;
}
SEC("iter/unix")
int change_sndbuf(struct bpf_iter__unix *ctx)
{
struct unix_sock *unix_sk = ctx->unix_sk;
int i, err;
if (!unix_sk || !unix_sk->addr)
return 0;
if (unix_sk->addr->name->sun_path[0])
return 0;
if (cmpname(unix_sk))
return 0;
for (i = 0; i < NR_CASES; i++) {
err = bpf_setsockopt(unix_sk, SOL_SOCKET, SO_SNDBUF,
&sndbuf_setsockopt[i],
sizeof(sndbuf_setsockopt[i]));
if (err)
break;
err = bpf_getsockopt(unix_sk, SOL_SOCKET, SO_SNDBUF,
&sndbuf_getsockopt[i],
sizeof(sndbuf_getsockopt[i]));
if (err)
break;
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper tests for bitfield.
*
* Copyright (c) 2019 Facebook
*/
#include <stdbool.h>
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct bitfields_only_mixed_types {
* int a: 3;
* long b: 2;
* _Bool c: 1;
* enum {
* A = 0,
* B = 1,
* } d: 1;
* short e: 5;
* int: 20;
* unsigned int f: 30;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct bitfields_only_mixed_types {
int a: 3;
long b: 2;
bool c: 1; /* it's really a _Bool type */
enum {
A, /* A = 0, dumper is very explicit */
B, /* B = 1, same */
} d: 1;
short e: 5;
/* 20-bit padding here */
unsigned f: 30; /* this gets aligned on 4-byte boundary */
};
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct bitfield_mixed_with_others {
* char: 4;
* int a: 4;
* short b;
* long c;
* long d: 8;
* int e;
* int f;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct bitfield_mixed_with_others {
char: 4; /* char is enough as a backing field */
int a: 4;
/* 8-bit implicit padding */
short b; /* combined with previous bitfield */
/* 4 more bytes of implicit padding */
long c;
long d: 8;
/* 24 bits implicit padding */
int e; /* combined with previous bitfield */
int f;
/* 4 bytes of padding */
};
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct bitfield_flushed {
* int a: 4;
* long: 60;
* long b: 16;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct bitfield_flushed {
int a: 4;
long: 0; /* flush until next natural alignment boundary */
long b: 16;
};
int f(struct {
struct bitfields_only_mixed_types _1;
struct bitfield_mixed_with_others _2;
struct bitfield_flushed _3;
} *_)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
{
return skb->len;
}
__attribute__ ((noinline))
int f2(int val, struct __sk_buff *skb)
{
return f1(skb) + val;
}
__attribute__ ((noinline))
int f3(int val, struct __sk_buff *skb, int var)
{
return f2(var, skb) + val;
}
__attribute__ ((noinline))
int f4(struct __sk_buff *skb)
{
return f3(1, skb, 2);
}
__attribute__ ((noinline))
int f5(struct __sk_buff *skb)
{
return f4(skb);
}
__attribute__ ((noinline))
int f6(struct __sk_buff *skb)
{
return f5(skb);
}
__attribute__ ((noinline))
int f7(struct __sk_buff *skb)
{
return f6(skb);
}
SEC("tc")
__success
int global_func4(struct __sk_buff *skb)
{
return f7(skb);
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func4.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 2);
__type(key, __u32);
__type(value, __u64);
} sock_map SEC(".maps");
SEC("sk_skb")
int prog_skb_verdict(struct __sk_buff *skb)
{
return SK_DROP;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
struct map_value {
struct prog_test_ref_kfunc __kptr *ptr;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 16);
} array_map SEC(".maps");
static __noinline int cb1(void *map, void *key, void *value, void *ctx)
{
void *p = *(void **)ctx;
bpf_kfunc_call_test_release(p);
/* Without the fix this would cause underflow */
return 0;
}
SEC("?tc")
int underflow_prog(void *ctx)
{
struct prog_test_ref_kfunc *p;
unsigned long sl = 0;
p = bpf_kfunc_call_test_acquire(&sl);
if (!p)
return 0;
bpf_for_each_map_elem(&array_map, cb1, &p, 0);
return 0;
}
static __always_inline int cb2(void *map, void *key, void *value, void *ctx)
{
unsigned long sl = 0;
*(void **)ctx = bpf_kfunc_call_test_acquire(&sl);
/* Without the fix this would leak memory */
return 0;
}
SEC("?tc")
int leak_prog(void *ctx)
{
struct prog_test_ref_kfunc *p;
struct map_value *v;
v = bpf_map_lookup_elem(&array_map, &(int){0});
if (!v)
return 0;
p = NULL;
bpf_for_each_map_elem(&array_map, cb2, &p, 0);
p = bpf_kptr_xchg(&v->ptr, p);
if (p)
bpf_kfunc_call_test_release(p);
return 0;
}
static __always_inline int cb(void *map, void *key, void *value, void *ctx)
{
return 0;
}
static __always_inline int cb3(void *map, void *key, void *value, void *ctx)
{
unsigned long sl = 0;
void *p;
bpf_kfunc_call_test_acquire(&sl);
bpf_for_each_map_elem(&array_map, cb, &p, 0);
/* It should only complain here, not in cb. This is why we need
* callback_ref to be set to frameno.
*/
return 0;
}
SEC("?tc")
int nested_cb(void *ctx)
{
struct prog_test_ref_kfunc *p;
unsigned long sl = 0;
int sp = 0;
p = bpf_kfunc_call_test_acquire(&sl);
if (!p)
return 0;
bpf_for_each_map_elem(&array_map, cb3, &sp, 0);
bpf_kfunc_call_test_release(p);
return 0;
}
SEC("?tc")
int non_cb_transfer_ref(void *ctx)
{
struct prog_test_ref_kfunc *p;
unsigned long sl = 0;
p = bpf_kfunc_call_test_acquire(&sl);
if (!p)
return 0;
cb1(NULL, NULL, NULL, &p);
bpf_kfunc_call_test_acquire(&sl);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/cb_refs.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
struct task_ls_map {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} task_ls_map SEC(".maps");
long gp_seq;
SEC("syscall")
int do_call_rcu_tasks_trace(void *ctx)
{
struct task_struct *current;
int *v;
current = bpf_get_current_task_btf();
v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!v)
return 1;
/* Invoke call_rcu_tasks_trace */
return bpf_task_storage_delete(&task_ls_map, current);
}
SEC("kprobe/rcu_tasks_trace_postgp")
int rcu_tasks_trace_postgp(void *ctx)
{
__sync_add_and_fetch(&gp_seq, 1);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define barrier_var(var) /**/
/* undef #define UNROLL */
#define INLINE /**/
#include "profiler.inc.h"
| linux-master | tools/testing/selftests/bpf/progs/profiler2.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_tracing_net.h"
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, long);
} map_a SEC(".maps");
__u32 user_data, key_serial, target_pid;
__u64 flags, task_storage_val, cgroup_id;
struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym;
void bpf_key_put(struct bpf_key *key) __ksym;
void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
void bpf_task_release(struct task_struct *p) __ksym;
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int get_cgroup_id(void *ctx)
{
struct task_struct *task;
struct css_set *cgroups;
task = bpf_get_current_task_btf();
if (task->pid != target_pid)
return 0;
/* simulate bpf_get_current_cgroup_id() helper */
bpf_rcu_read_lock();
cgroups = task->cgroups;
if (!cgroups)
goto unlock;
cgroup_id = cgroups->dfl_cgrp->kn->id;
unlock:
bpf_rcu_read_unlock();
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int task_succ(void *ctx)
{
struct task_struct *task, *real_parent;
long init_val = 2;
long *ptr;
task = bpf_get_current_task_btf();
if (task->pid != target_pid)
return 0;
bpf_rcu_read_lock();
/* region including helper using rcu ptr real_parent */
real_parent = task->real_parent;
if (!real_parent)
goto out;
ptr = bpf_task_storage_get(&map_a, real_parent, &init_val,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!ptr)
goto out;
ptr = bpf_task_storage_get(&map_a, real_parent, 0, 0);
if (!ptr)
goto out;
task_storage_val = *ptr;
out:
bpf_rcu_read_unlock();
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
int no_lock(void *ctx)
{
struct task_struct *task, *real_parent;
/* old style ptr_to_btf_id is not allowed in sleepable */
task = bpf_get_current_task_btf();
real_parent = task->real_parent;
(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
int two_regions(void *ctx)
{
struct task_struct *task, *real_parent;
/* two regions */
task = bpf_get_current_task_btf();
bpf_rcu_read_lock();
bpf_rcu_read_unlock();
bpf_rcu_read_lock();
real_parent = task->real_parent;
if (!real_parent)
goto out;
(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
out:
bpf_rcu_read_unlock();
return 0;
}
SEC("?fentry/" SYS_PREFIX "sys_getpgid")
int non_sleepable_1(void *ctx)
{
struct task_struct *task, *real_parent;
task = bpf_get_current_task_btf();
bpf_rcu_read_lock();
real_parent = task->real_parent;
if (!real_parent)
goto out;
(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
out:
bpf_rcu_read_unlock();
return 0;
}
SEC("?fentry/" SYS_PREFIX "sys_getpgid")
int non_sleepable_2(void *ctx)
{
struct task_struct *task, *real_parent;
bpf_rcu_read_lock();
task = bpf_get_current_task_btf();
bpf_rcu_read_unlock();
bpf_rcu_read_lock();
real_parent = task->real_parent;
if (!real_parent)
goto out;
(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
out:
bpf_rcu_read_unlock();
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
int task_acquire(void *ctx)
{
struct task_struct *task, *real_parent, *gparent;
task = bpf_get_current_task_btf();
bpf_rcu_read_lock();
real_parent = task->real_parent;
if (!real_parent)
goto out;
/* rcu_ptr->rcu_field */
gparent = real_parent->real_parent;
if (!gparent)
goto out;
/* acquire a reference which can be used outside rcu read lock region */
gparent = bpf_task_acquire(gparent);
if (!gparent)
goto out;
(void)bpf_task_storage_get(&map_a, gparent, 0, 0);
bpf_task_release(gparent);
out:
bpf_rcu_read_unlock();
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int miss_lock(void *ctx)
{
struct task_struct *task;
/* missing bpf_rcu_read_lock() */
task = bpf_get_current_task_btf();
bpf_rcu_read_lock();
(void)bpf_task_storage_get(&map_a, task, 0, 0);
bpf_rcu_read_unlock();
bpf_rcu_read_unlock();
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int miss_unlock(void *ctx)
{
struct task_struct *task;
/* missing bpf_rcu_read_unlock() */
task = bpf_get_current_task_btf();
bpf_rcu_read_lock();
(void)bpf_task_storage_get(&map_a, task, 0, 0);
return 0;
}
SEC("?fentry/" SYS_PREFIX "sys_getpgid")
int non_sleepable_rcu_mismatch(void *ctx)
{
struct task_struct *task, *real_parent;
task = bpf_get_current_task_btf();
/* non-sleepable: missing bpf_rcu_read_unlock() in one path */
bpf_rcu_read_lock();
real_parent = task->real_parent;
if (!real_parent)
goto out;
(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
if (real_parent)
bpf_rcu_read_unlock();
out:
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int inproper_sleepable_helper(void *ctx)
{
struct task_struct *task, *real_parent;
struct pt_regs *regs;
__u32 value = 0;
void *ptr;
task = bpf_get_current_task_btf();
/* sleepable helper in rcu read lock region */
bpf_rcu_read_lock();
real_parent = task->real_parent;
if (!real_parent)
goto out;
regs = (struct pt_regs *)bpf_task_pt_regs(real_parent);
if (!regs)
goto out;
ptr = (void *)PT_REGS_IP(regs);
(void)bpf_copy_from_user_task(&value, sizeof(uint32_t), ptr, task, 0);
user_data = value;
(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
out:
bpf_rcu_read_unlock();
return 0;
}
SEC("?lsm.s/bpf")
int BPF_PROG(inproper_sleepable_kfunc, int cmd, union bpf_attr *attr, unsigned int size)
{
struct bpf_key *bkey;
/* sleepable kfunc in rcu read lock region */
bpf_rcu_read_lock();
bkey = bpf_lookup_user_key(key_serial, flags);
bpf_rcu_read_unlock();
if (!bkey)
return -1;
bpf_key_put(bkey);
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
int nested_rcu_region(void *ctx)
{
struct task_struct *task, *real_parent;
/* nested rcu read lock regions */
task = bpf_get_current_task_btf();
bpf_rcu_read_lock();
bpf_rcu_read_lock();
real_parent = task->real_parent;
if (!real_parent)
goto out;
(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
out:
bpf_rcu_read_unlock();
bpf_rcu_read_unlock();
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int task_trusted_non_rcuptr(void *ctx)
{
struct task_struct *task, *group_leader;
task = bpf_get_current_task_btf();
bpf_rcu_read_lock();
/* the pointer group_leader is explicitly marked as trusted */
group_leader = task->real_parent->group_leader;
(void)bpf_task_storage_get(&map_a, group_leader, 0, 0);
bpf_rcu_read_unlock();
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int task_untrusted_rcuptr(void *ctx)
{
struct task_struct *task, *real_parent;
task = bpf_get_current_task_btf();
bpf_rcu_read_lock();
real_parent = task->real_parent;
bpf_rcu_read_unlock();
/* helper use of rcu ptr outside the rcu read lock region */
(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
int cross_rcu_region(void *ctx)
{
struct task_struct *task, *real_parent;
/* rcu ptr define/use in different regions */
task = bpf_get_current_task_btf();
bpf_rcu_read_lock();
real_parent = task->real_parent;
bpf_rcu_read_unlock();
bpf_rcu_read_lock();
(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
bpf_rcu_read_unlock();
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/rcu_read_lock.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char LICENSE[] SEC("license") = "GPL";
SEC("xdp")
int xdp_handler(struct xdp_md *xdp)
{
return 0;
}
SEC("tc")
int tc_handler(struct __sk_buff *skb)
{
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_link.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <string.h>
#include <bpf/bpf_helpers.h>
#define NUM_CGROUP_LEVELS 4
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, __u64);
__uint(max_entries, NUM_CGROUP_LEVELS);
} cgroup_ids SEC(".maps");
static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level)
{
__u64 id;
/* [1] &level passed to external function that may change it, it's
* incompatible with loop unroll.
*/
id = bpf_skb_ancestor_cgroup_id(skb, level);
bpf_map_update_elem(&cgroup_ids, &level, &id, 0);
}
SEC("cgroup_id_logger")
int log_cgroup_id(struct __sk_buff *skb)
{
/* Loop unroll can't be used here due to [1]. Unrolling manually.
* Number of calls should be in sync with NUM_CGROUP_LEVELS.
*/
log_nth_level(skb, 0);
log_nth_level(skb, 1);
log_nth_level(skb, 2);
log_nth_level(skb, 3);
return TC_ACT_OK;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022, Oracle and/or its affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
__u32 perfbuf_val = 0;
__u32 ringbuf_val = 0;
int test_pid;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} percpu_array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} hash SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} percpu_hash SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__type(key, __u32);
__type(value, __u32);
} perfbuf SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 1 << 12);
} ringbuf SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} prog_array SEC(".maps");
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int sys_nanosleep_enter(void *ctx)
{
int cur_pid;
cur_pid = bpf_get_current_pid_tgid() >> 32;
if (cur_pid != test_pid)
return 0;
bpf_perf_event_output(ctx, &perfbuf, BPF_F_CURRENT_CPU, &perfbuf_val, sizeof(perfbuf_val));
bpf_ringbuf_output(&ringbuf, &ringbuf_val, sizeof(ringbuf_val), 0);
return 0;
}
SEC("perf_event")
int handle_perf_event(void *ctx)
{
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_unpriv_bpf_disabled.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
// Copyright (c) 2019, 2020 Cloudflare
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/pkt_cls.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_cls_redirect.h"
#include "bpf_kfuncs.h"
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
#define IP_OFFSET_MASK (0x1FFF)
#define IP_MF (0x2000)
char _license[] SEC("license") = "Dual BSD/GPL";
/**
* Destination port and IP used for UDP encapsulation.
*/
volatile const __be16 ENCAPSULATION_PORT;
volatile const __be32 ENCAPSULATION_IP;
typedef struct {
uint64_t processed_packets_total;
uint64_t l3_protocol_packets_total_ipv4;
uint64_t l3_protocol_packets_total_ipv6;
uint64_t l4_protocol_packets_total_tcp;
uint64_t l4_protocol_packets_total_udp;
uint64_t accepted_packets_total_syn;
uint64_t accepted_packets_total_syn_cookies;
uint64_t accepted_packets_total_last_hop;
uint64_t accepted_packets_total_icmp_echo_request;
uint64_t accepted_packets_total_established;
uint64_t forwarded_packets_total_gue;
uint64_t forwarded_packets_total_gre;
uint64_t errors_total_unknown_l3_proto;
uint64_t errors_total_unknown_l4_proto;
uint64_t errors_total_malformed_ip;
uint64_t errors_total_fragmented_ip;
uint64_t errors_total_malformed_icmp;
uint64_t errors_total_unwanted_icmp;
uint64_t errors_total_malformed_icmp_pkt_too_big;
uint64_t errors_total_malformed_tcp;
uint64_t errors_total_malformed_udp;
uint64_t errors_total_icmp_echo_replies;
uint64_t errors_total_malformed_encapsulation;
uint64_t errors_total_encap_adjust_failed;
uint64_t errors_total_encap_buffer_too_small;
uint64_t errors_total_redirect_loop;
uint64_t errors_total_encap_mtu_violate;
} metrics_t;
typedef enum {
INVALID = 0,
UNKNOWN,
ECHO_REQUEST,
SYN,
SYN_COOKIE,
ESTABLISHED,
} verdict_t;
typedef struct {
uint16_t src, dst;
} flow_ports_t;
_Static_assert(
sizeof(flow_ports_t) !=
offsetofend(struct bpf_sock_tuple, ipv4.dport) -
offsetof(struct bpf_sock_tuple, ipv4.sport) - 1,
"flow_ports_t must match sport and dport in struct bpf_sock_tuple");
_Static_assert(
sizeof(flow_ports_t) !=
offsetofend(struct bpf_sock_tuple, ipv6.dport) -
offsetof(struct bpf_sock_tuple, ipv6.sport) - 1,
"flow_ports_t must match sport and dport in struct bpf_sock_tuple");
struct iphdr_info {
void *hdr;
__u64 len;
};
typedef int ret_t;
/* This is a bit of a hack. We need a return value which allows us to
* indicate that the regular flow of the program should continue,
* while allowing functions to use XDP_PASS and XDP_DROP, etc.
*/
static const ret_t CONTINUE_PROCESSING = -1;
/* Convenience macro to call functions which return ret_t.
*/
#define MAYBE_RETURN(x) \
do { \
ret_t __ret = x; \
if (__ret != CONTINUE_PROCESSING) \
return __ret; \
} while (0)
static bool ipv4_is_fragment(const struct iphdr *ip)
{
uint16_t frag_off = ip->frag_off & bpf_htons(IP_OFFSET_MASK);
return (ip->frag_off & bpf_htons(IP_MF)) != 0 || frag_off > 0;
}
static int pkt_parse_ipv4(struct bpf_dynptr *dynptr, __u64 *offset, struct iphdr *iphdr)
{
if (bpf_dynptr_read(iphdr, sizeof(*iphdr), dynptr, *offset, 0))
return -1;
*offset += sizeof(*iphdr);
if (iphdr->ihl < 5)
return -1;
/* skip ipv4 options */
*offset += (iphdr->ihl - 5) * 4;
return 0;
}
/* Parse the L4 ports from a packet, assuming a layout like TCP or UDP. */
static bool pkt_parse_icmp_l4_ports(struct bpf_dynptr *dynptr, __u64 *offset, flow_ports_t *ports)
{
if (bpf_dynptr_read(ports, sizeof(*ports), dynptr, *offset, 0))
return false;
*offset += sizeof(*ports);
/* Ports in the L4 headers are reversed, since we are parsing an ICMP
* payload which is going towards the eyeball.
*/
uint16_t dst = ports->src;
ports->src = ports->dst;
ports->dst = dst;
return true;
}
static uint16_t pkt_checksum_fold(uint32_t csum)
{
/* The highest reasonable value for an IPv4 header
* checksum requires two folds, so we just do that always.
*/
csum = (csum & 0xffff) + (csum >> 16);
csum = (csum & 0xffff) + (csum >> 16);
return (uint16_t)~csum;
}
static void pkt_ipv4_checksum(struct iphdr *iph)
{
iph->check = 0;
/* An IP header without options is 20 bytes. Two of those
* are the checksum, which we always set to zero. Hence,
* the maximum accumulated value is 18 / 2 * 0xffff = 0x8fff7,
* which fits in 32 bit.
*/
_Static_assert(sizeof(struct iphdr) == 20, "iphdr must be 20 bytes");
uint32_t acc = 0;
uint16_t *ipw = (uint16_t *)iph;
for (size_t i = 0; i < sizeof(struct iphdr) / 2; i++)
acc += ipw[i];
iph->check = pkt_checksum_fold(acc);
}
static bool pkt_skip_ipv6_extension_headers(struct bpf_dynptr *dynptr, __u64 *offset,
const struct ipv6hdr *ipv6, uint8_t *upper_proto,
bool *is_fragment)
{
/* We understand five extension headers.
* https://tools.ietf.org/html/rfc8200#section-4.1 states that all
* headers should occur once, except Destination Options, which may
* occur twice. Hence we give up after 6 headers.
*/
struct {
uint8_t next;
uint8_t len;
} exthdr = {
.next = ipv6->nexthdr,
};
*is_fragment = false;
for (int i = 0; i < 6; i++) {
switch (exthdr.next) {
case IPPROTO_FRAGMENT:
*is_fragment = true;
/* NB: We don't check that hdrlen == 0 as per spec. */
/* fallthrough; */
case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING:
case IPPROTO_DSTOPTS:
case IPPROTO_MH:
if (bpf_dynptr_read(&exthdr, sizeof(exthdr), dynptr, *offset, 0))
return false;
/* hdrlen is in 8-octet units, and excludes the first 8 octets. */
*offset += (exthdr.len + 1) * 8;
/* Decode next header */
break;
default:
/* The next header is not one of the known extension
* headers, treat it as the upper layer header.
*
* This handles IPPROTO_NONE.
*
* Encapsulating Security Payload (50) and Authentication
* Header (51) also end up here (and will trigger an
* unknown proto error later). They have a custom header
* format and seem too esoteric to care about.
*/
*upper_proto = exthdr.next;
return true;
}
}
/* We never found an upper layer header. */
return false;
}
static int pkt_parse_ipv6(struct bpf_dynptr *dynptr, __u64 *offset, struct ipv6hdr *ipv6,
uint8_t *proto, bool *is_fragment)
{
if (bpf_dynptr_read(ipv6, sizeof(*ipv6), dynptr, *offset, 0))
return -1;
*offset += sizeof(*ipv6);
if (!pkt_skip_ipv6_extension_headers(dynptr, offset, ipv6, proto, is_fragment))
return -1;
return 0;
}
/* Global metrics, per CPU
*/
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, unsigned int);
__type(value, metrics_t);
} metrics_map SEC(".maps");
static metrics_t *get_global_metrics(void)
{
uint64_t key = 0;
return bpf_map_lookup_elem(&metrics_map, &key);
}
static ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap)
{
const int payload_off =
sizeof(*encap) +
sizeof(struct in_addr) * encap->unigue.hop_count;
int32_t encap_overhead = payload_off - sizeof(struct ethhdr);
/* Changing the ethertype if the encapsulated packet is ipv6 */
if (encap->gue.proto_ctype == IPPROTO_IPV6)
encap->eth.h_proto = bpf_htons(ETH_P_IPV6);
if (bpf_skb_adjust_room(skb, -encap_overhead, BPF_ADJ_ROOM_MAC,
BPF_F_ADJ_ROOM_FIXED_GSO |
BPF_F_ADJ_ROOM_NO_CSUM_RESET) ||
bpf_csum_level(skb, BPF_CSUM_LEVEL_DEC))
return TC_ACT_SHOT;
return bpf_redirect(skb->ifindex, BPF_F_INGRESS);
}
static ret_t forward_with_gre(struct __sk_buff *skb, struct bpf_dynptr *dynptr,
encap_headers_t *encap, struct in_addr *next_hop,
metrics_t *metrics)
{
const int payload_off =
sizeof(*encap) +
sizeof(struct in_addr) * encap->unigue.hop_count;
int32_t encap_overhead =
payload_off - sizeof(struct ethhdr) - sizeof(struct iphdr);
int32_t delta = sizeof(struct gre_base_hdr) - encap_overhead;
__u8 encap_buffer[sizeof(encap_gre_t)] = {};
uint16_t proto = ETH_P_IP;
uint32_t mtu_len = 0;
encap_gre_t *encap_gre;
metrics->forwarded_packets_total_gre++;
/* Loop protection: the inner packet's TTL is decremented as a safeguard
* against any forwarding loop. As the only interesting field is the TTL
* hop limit for IPv6, it is easier to use bpf_skb_load_bytes/bpf_skb_store_bytes
* as they handle the split packets if needed (no need for the data to be
* in the linear section).
*/
if (encap->gue.proto_ctype == IPPROTO_IPV6) {
proto = ETH_P_IPV6;
uint8_t ttl;
int rc;
rc = bpf_skb_load_bytes(
skb, payload_off + offsetof(struct ipv6hdr, hop_limit),
&ttl, 1);
if (rc != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (ttl == 0) {
metrics->errors_total_redirect_loop++;
return TC_ACT_SHOT;
}
ttl--;
rc = bpf_skb_store_bytes(
skb, payload_off + offsetof(struct ipv6hdr, hop_limit),
&ttl, 1, 0);
if (rc != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
} else {
uint8_t ttl;
int rc;
rc = bpf_skb_load_bytes(
skb, payload_off + offsetof(struct iphdr, ttl), &ttl,
1);
if (rc != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (ttl == 0) {
metrics->errors_total_redirect_loop++;
return TC_ACT_SHOT;
}
/* IPv4 also has a checksum to patch. While the TTL is only one byte,
* this function only works for 2 and 4 bytes arguments (the result is
* the same).
*/
rc = bpf_l3_csum_replace(
skb, payload_off + offsetof(struct iphdr, check), ttl,
ttl - 1, 2);
if (rc != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
ttl--;
rc = bpf_skb_store_bytes(
skb, payload_off + offsetof(struct iphdr, ttl), &ttl, 1,
0);
if (rc != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
}
if (bpf_check_mtu(skb, skb->ifindex, &mtu_len, delta, 0)) {
metrics->errors_total_encap_mtu_violate++;
return TC_ACT_SHOT;
}
if (bpf_skb_adjust_room(skb, delta, BPF_ADJ_ROOM_NET,
BPF_F_ADJ_ROOM_FIXED_GSO |
BPF_F_ADJ_ROOM_NO_CSUM_RESET) ||
bpf_csum_level(skb, BPF_CSUM_LEVEL_INC)) {
metrics->errors_total_encap_adjust_failed++;
return TC_ACT_SHOT;
}
if (bpf_skb_pull_data(skb, sizeof(encap_gre_t))) {
metrics->errors_total_encap_buffer_too_small++;
return TC_ACT_SHOT;
}
encap_gre = bpf_dynptr_slice_rdwr(dynptr, 0, encap_buffer, sizeof(encap_buffer));
if (!encap_gre) {
metrics->errors_total_encap_buffer_too_small++;
return TC_ACT_SHOT;
}
encap_gre->ip.protocol = IPPROTO_GRE;
encap_gre->ip.daddr = next_hop->s_addr;
encap_gre->ip.saddr = ENCAPSULATION_IP;
encap_gre->ip.tot_len =
bpf_htons(bpf_ntohs(encap_gre->ip.tot_len) + delta);
encap_gre->gre.flags = 0;
encap_gre->gre.protocol = bpf_htons(proto);
pkt_ipv4_checksum((void *)&encap_gre->ip);
if (encap_gre == encap_buffer)
bpf_dynptr_write(dynptr, 0, encap_buffer, sizeof(encap_buffer), 0);
return bpf_redirect(skb->ifindex, 0);
}
static ret_t forward_to_next_hop(struct __sk_buff *skb, struct bpf_dynptr *dynptr,
encap_headers_t *encap, struct in_addr *next_hop,
metrics_t *metrics)
{
/* swap L2 addresses */
/* This assumes that packets are received from a router.
* So just swapping the MAC addresses here will make the packet go back to
* the router, which will send it to the appropriate machine.
*/
unsigned char temp[ETH_ALEN];
memcpy(temp, encap->eth.h_dest, sizeof(temp));
memcpy(encap->eth.h_dest, encap->eth.h_source,
sizeof(encap->eth.h_dest));
memcpy(encap->eth.h_source, temp, sizeof(encap->eth.h_source));
if (encap->unigue.next_hop == encap->unigue.hop_count - 1 &&
encap->unigue.last_hop_gre) {
return forward_with_gre(skb, dynptr, encap, next_hop, metrics);
}
metrics->forwarded_packets_total_gue++;
uint32_t old_saddr = encap->ip.saddr;
encap->ip.saddr = encap->ip.daddr;
encap->ip.daddr = next_hop->s_addr;
if (encap->unigue.next_hop < encap->unigue.hop_count) {
encap->unigue.next_hop++;
}
/* Remove ip->saddr, add next_hop->s_addr */
const uint64_t off = offsetof(typeof(*encap), ip.check);
int ret = bpf_l3_csum_replace(skb, off, old_saddr, next_hop->s_addr, 4);
if (ret < 0) {
return TC_ACT_SHOT;
}
return bpf_redirect(skb->ifindex, 0);
}
static ret_t skip_next_hops(__u64 *offset, int n)
{
switch (n) {
case 1:
*offset += sizeof(struct in_addr);
case 0:
return CONTINUE_PROCESSING;
default:
return TC_ACT_SHOT;
}
}
/* Get the next hop from the GLB header.
*
* Sets next_hop->s_addr to 0 if there are no more hops left.
* pkt is positioned just after the variable length GLB header
* iff the call is successful.
*/
static ret_t get_next_hop(struct bpf_dynptr *dynptr, __u64 *offset, encap_headers_t *encap,
struct in_addr *next_hop)
{
if (encap->unigue.next_hop > encap->unigue.hop_count)
return TC_ACT_SHOT;
/* Skip "used" next hops. */
MAYBE_RETURN(skip_next_hops(offset, encap->unigue.next_hop));
if (encap->unigue.next_hop == encap->unigue.hop_count) {
/* No more next hops, we are at the end of the GLB header. */
next_hop->s_addr = 0;
return CONTINUE_PROCESSING;
}
if (bpf_dynptr_read(next_hop, sizeof(*next_hop), dynptr, *offset, 0))
return TC_ACT_SHOT;
*offset += sizeof(*next_hop);
/* Skip the remainig next hops (may be zero). */
return skip_next_hops(offset, encap->unigue.hop_count - encap->unigue.next_hop - 1);
}
/* Fill a bpf_sock_tuple to be used with the socket lookup functions.
* This is a kludge that let's us work around verifier limitations:
*
* fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321)
*
* clang will substitue a costant for sizeof, which allows the verifier
* to track it's value. Based on this, it can figure out the constant
* return value, and calling code works while still being "generic" to
* IPv4 and IPv6.
*/
static uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph,
uint64_t iphlen, uint16_t sport, uint16_t dport)
{
switch (iphlen) {
case sizeof(struct iphdr): {
struct iphdr *ipv4 = (struct iphdr *)iph;
tuple->ipv4.daddr = ipv4->daddr;
tuple->ipv4.saddr = ipv4->saddr;
tuple->ipv4.sport = sport;
tuple->ipv4.dport = dport;
return sizeof(tuple->ipv4);
}
case sizeof(struct ipv6hdr): {
struct ipv6hdr *ipv6 = (struct ipv6hdr *)iph;
memcpy(&tuple->ipv6.daddr, &ipv6->daddr,
sizeof(tuple->ipv6.daddr));
memcpy(&tuple->ipv6.saddr, &ipv6->saddr,
sizeof(tuple->ipv6.saddr));
tuple->ipv6.sport = sport;
tuple->ipv6.dport = dport;
return sizeof(tuple->ipv6);
}
default:
return 0;
}
}
static verdict_t classify_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple,
uint64_t tuplen, void *iph, struct tcphdr *tcp)
{
struct bpf_sock *sk =
bpf_skc_lookup_tcp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0);
if (sk == NULL)
return UNKNOWN;
if (sk->state != BPF_TCP_LISTEN) {
bpf_sk_release(sk);
return ESTABLISHED;
}
if (iph != NULL && tcp != NULL) {
/* Kludge: we've run out of arguments, but need the length of the ip header. */
uint64_t iphlen = sizeof(struct iphdr);
if (tuplen == sizeof(tuple->ipv6))
iphlen = sizeof(struct ipv6hdr);
if (bpf_tcp_check_syncookie(sk, iph, iphlen, tcp,
sizeof(*tcp)) == 0) {
bpf_sk_release(sk);
return SYN_COOKIE;
}
}
bpf_sk_release(sk);
return UNKNOWN;
}
static verdict_t classify_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, uint64_t tuplen)
{
struct bpf_sock *sk =
bpf_sk_lookup_udp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0);
if (sk == NULL)
return UNKNOWN;
if (sk->state == BPF_TCP_ESTABLISHED) {
bpf_sk_release(sk);
return ESTABLISHED;
}
bpf_sk_release(sk);
return UNKNOWN;
}
static verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto, struct bpf_sock_tuple *tuple,
uint64_t tuplen, metrics_t *metrics)
{
switch (proto) {
case IPPROTO_TCP:
return classify_tcp(skb, tuple, tuplen, NULL, NULL);
case IPPROTO_UDP:
return classify_udp(skb, tuple, tuplen);
default:
metrics->errors_total_malformed_icmp++;
return INVALID;
}
}
static verdict_t process_icmpv4(struct __sk_buff *skb, struct bpf_dynptr *dynptr, __u64 *offset,
metrics_t *metrics)
{
struct icmphdr icmp;
struct iphdr ipv4;
if (bpf_dynptr_read(&icmp, sizeof(icmp), dynptr, *offset, 0)) {
metrics->errors_total_malformed_icmp++;
return INVALID;
}
*offset += sizeof(icmp);
/* We should never receive encapsulated echo replies. */
if (icmp.type == ICMP_ECHOREPLY) {
metrics->errors_total_icmp_echo_replies++;
return INVALID;
}
if (icmp.type == ICMP_ECHO)
return ECHO_REQUEST;
if (icmp.type != ICMP_DEST_UNREACH || icmp.code != ICMP_FRAG_NEEDED) {
metrics->errors_total_unwanted_icmp++;
return INVALID;
}
if (pkt_parse_ipv4(dynptr, offset, &ipv4)) {
metrics->errors_total_malformed_icmp_pkt_too_big++;
return INVALID;
}
/* The source address in the outer IP header is from the entity that
* originated the ICMP message. Use the original IP header to restore
* the correct flow tuple.
*/
struct bpf_sock_tuple tuple;
tuple.ipv4.saddr = ipv4.daddr;
tuple.ipv4.daddr = ipv4.saddr;
if (!pkt_parse_icmp_l4_ports(dynptr, offset, (flow_ports_t *)&tuple.ipv4.sport)) {
metrics->errors_total_malformed_icmp_pkt_too_big++;
return INVALID;
}
return classify_icmp(skb, ipv4.protocol, &tuple,
sizeof(tuple.ipv4), metrics);
}
static verdict_t process_icmpv6(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb,
metrics_t *metrics)
{
struct bpf_sock_tuple tuple;
struct ipv6hdr ipv6;
struct icmp6hdr icmp6;
bool is_fragment;
uint8_t l4_proto;
if (bpf_dynptr_read(&icmp6, sizeof(icmp6), dynptr, *offset, 0)) {
metrics->errors_total_malformed_icmp++;
return INVALID;
}
/* We should never receive encapsulated echo replies. */
if (icmp6.icmp6_type == ICMPV6_ECHO_REPLY) {
metrics->errors_total_icmp_echo_replies++;
return INVALID;
}
if (icmp6.icmp6_type == ICMPV6_ECHO_REQUEST) {
return ECHO_REQUEST;
}
if (icmp6.icmp6_type != ICMPV6_PKT_TOOBIG) {
metrics->errors_total_unwanted_icmp++;
return INVALID;
}
if (pkt_parse_ipv6(dynptr, offset, &ipv6, &l4_proto, &is_fragment)) {
metrics->errors_total_malformed_icmp_pkt_too_big++;
return INVALID;
}
if (is_fragment) {
metrics->errors_total_fragmented_ip++;
return INVALID;
}
/* Swap source and dest addresses. */
memcpy(&tuple.ipv6.saddr, &ipv6.daddr, sizeof(tuple.ipv6.saddr));
memcpy(&tuple.ipv6.daddr, &ipv6.saddr, sizeof(tuple.ipv6.daddr));
if (!pkt_parse_icmp_l4_ports(dynptr, offset, (flow_ports_t *)&tuple.ipv6.sport)) {
metrics->errors_total_malformed_icmp_pkt_too_big++;
return INVALID;
}
return classify_icmp(skb, l4_proto, &tuple, sizeof(tuple.ipv6),
metrics);
}
static verdict_t process_tcp(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb,
struct iphdr_info *info, metrics_t *metrics)
{
struct bpf_sock_tuple tuple;
struct tcphdr tcp;
uint64_t tuplen;
metrics->l4_protocol_packets_total_tcp++;
if (bpf_dynptr_read(&tcp, sizeof(tcp), dynptr, *offset, 0)) {
metrics->errors_total_malformed_tcp++;
return INVALID;
}
*offset += sizeof(tcp);
if (tcp.syn)
return SYN;
tuplen = fill_tuple(&tuple, info->hdr, info->len, tcp.source, tcp.dest);
return classify_tcp(skb, &tuple, tuplen, info->hdr, &tcp);
}
static verdict_t process_udp(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb,
struct iphdr_info *info, metrics_t *metrics)
{
struct bpf_sock_tuple tuple;
struct udphdr udph;
uint64_t tuplen;
metrics->l4_protocol_packets_total_udp++;
if (bpf_dynptr_read(&udph, sizeof(udph), dynptr, *offset, 0)) {
metrics->errors_total_malformed_udp++;
return INVALID;
}
*offset += sizeof(udph);
tuplen = fill_tuple(&tuple, info->hdr, info->len, udph.source, udph.dest);
return classify_udp(skb, &tuple, tuplen);
}
static verdict_t process_ipv4(struct __sk_buff *skb, struct bpf_dynptr *dynptr,
__u64 *offset, metrics_t *metrics)
{
struct iphdr ipv4;
struct iphdr_info info = {
.hdr = &ipv4,
.len = sizeof(ipv4),
};
metrics->l3_protocol_packets_total_ipv4++;
if (pkt_parse_ipv4(dynptr, offset, &ipv4)) {
metrics->errors_total_malformed_ip++;
return INVALID;
}
if (ipv4.version != 4) {
metrics->errors_total_malformed_ip++;
return INVALID;
}
if (ipv4_is_fragment(&ipv4)) {
metrics->errors_total_fragmented_ip++;
return INVALID;
}
switch (ipv4.protocol) {
case IPPROTO_ICMP:
return process_icmpv4(skb, dynptr, offset, metrics);
case IPPROTO_TCP:
return process_tcp(dynptr, offset, skb, &info, metrics);
case IPPROTO_UDP:
return process_udp(dynptr, offset, skb, &info, metrics);
default:
metrics->errors_total_unknown_l4_proto++;
return INVALID;
}
}
static verdict_t process_ipv6(struct __sk_buff *skb, struct bpf_dynptr *dynptr,
__u64 *offset, metrics_t *metrics)
{
struct ipv6hdr ipv6;
struct iphdr_info info = {
.hdr = &ipv6,
.len = sizeof(ipv6),
};
uint8_t l4_proto;
bool is_fragment;
metrics->l3_protocol_packets_total_ipv6++;
if (pkt_parse_ipv6(dynptr, offset, &ipv6, &l4_proto, &is_fragment)) {
metrics->errors_total_malformed_ip++;
return INVALID;
}
if (ipv6.version != 6) {
metrics->errors_total_malformed_ip++;
return INVALID;
}
if (is_fragment) {
metrics->errors_total_fragmented_ip++;
return INVALID;
}
switch (l4_proto) {
case IPPROTO_ICMPV6:
return process_icmpv6(dynptr, offset, skb, metrics);
case IPPROTO_TCP:
return process_tcp(dynptr, offset, skb, &info, metrics);
case IPPROTO_UDP:
return process_udp(dynptr, offset, skb, &info, metrics);
default:
metrics->errors_total_unknown_l4_proto++;
return INVALID;
}
}
SEC("tc")
int cls_redirect(struct __sk_buff *skb)
{
__u8 encap_buffer[sizeof(encap_headers_t)] = {};
struct bpf_dynptr dynptr;
struct in_addr next_hop;
/* Tracks offset of the dynptr. This will be unnecessary once
* bpf_dynptr_advance() is available.
*/
__u64 off = 0;
ret_t ret;
bpf_dynptr_from_skb(skb, 0, &dynptr);
metrics_t *metrics = get_global_metrics();
if (metrics == NULL)
return TC_ACT_SHOT;
metrics->processed_packets_total++;
/* Pass bogus packets as long as we're not sure they're
* destined for us.
*/
if (skb->protocol != bpf_htons(ETH_P_IP))
return TC_ACT_OK;
encap_headers_t *encap;
/* Make sure that all encapsulation headers are available in
* the linear portion of the skb. This makes it easy to manipulate them.
*/
if (bpf_skb_pull_data(skb, sizeof(*encap)))
return TC_ACT_OK;
encap = bpf_dynptr_slice_rdwr(&dynptr, 0, encap_buffer, sizeof(encap_buffer));
if (!encap)
return TC_ACT_OK;
off += sizeof(*encap);
if (encap->ip.ihl != 5)
/* We never have any options. */
return TC_ACT_OK;
if (encap->ip.daddr != ENCAPSULATION_IP ||
encap->ip.protocol != IPPROTO_UDP)
return TC_ACT_OK;
/* TODO Check UDP length? */
if (encap->udp.dest != ENCAPSULATION_PORT)
return TC_ACT_OK;
/* We now know that the packet is destined to us, we can
* drop bogus ones.
*/
if (ipv4_is_fragment((void *)&encap->ip)) {
metrics->errors_total_fragmented_ip++;
return TC_ACT_SHOT;
}
if (encap->gue.variant != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (encap->gue.control != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (encap->gue.flags != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (encap->gue.hlen !=
sizeof(encap->unigue) / 4 + encap->unigue.hop_count) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (encap->unigue.version != 0) {
metrics->errors_total_malformed_encapsulation++;
return TC_ACT_SHOT;
}
if (encap->unigue.reserved != 0)
return TC_ACT_SHOT;
MAYBE_RETURN(get_next_hop(&dynptr, &off, encap, &next_hop));
if (next_hop.s_addr == 0) {
metrics->accepted_packets_total_last_hop++;
return accept_locally(skb, encap);
}
verdict_t verdict;
switch (encap->gue.proto_ctype) {
case IPPROTO_IPIP:
verdict = process_ipv4(skb, &dynptr, &off, metrics);
break;
case IPPROTO_IPV6:
verdict = process_ipv6(skb, &dynptr, &off, metrics);
break;
default:
metrics->errors_total_unknown_l3_proto++;
return TC_ACT_SHOT;
}
switch (verdict) {
case INVALID:
/* metrics have already been bumped */
return TC_ACT_SHOT;
case UNKNOWN:
return forward_to_next_hop(skb, &dynptr, encap, &next_hop, metrics);
case ECHO_REQUEST:
metrics->accepted_packets_total_icmp_echo_request++;
break;
case SYN:
if (encap->unigue.forward_syn) {
return forward_to_next_hop(skb, &dynptr, encap, &next_hop,
metrics);
}
metrics->accepted_packets_total_syn++;
break;
case SYN_COOKIE:
metrics->accepted_packets_total_syn_cookies++;
break;
case ESTABLISHED:
metrics->accepted_packets_total_established++;
break;
}
ret = accept_locally(skb, encap);
if (encap == encap_buffer)
bpf_dynptr_write(&dynptr, 0, encap_buffer, sizeof(encap_buffer), 0);
return ret;
}
| linux-master | tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 3);
__type(key, __u32);
__type(value, __u64);
} arraymap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} percpu_map SEC(".maps");
struct callback_ctx {
int output;
};
const volatile int bypass_unused = 1;
static __u64
unused_subprog(struct bpf_map *map, __u32 *key, __u64 *val,
struct callback_ctx *data)
{
data->output = 0;
return 1;
}
static __u64
check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
struct callback_ctx *data)
{
data->output += *val;
if (*key == 1)
return 1; /* stop the iteration */
return 0;
}
__u32 cpu = 0;
__u64 percpu_val = 0;
static __u64
check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
struct callback_ctx *data)
{
cpu = bpf_get_smp_processor_id();
percpu_val = *val;
return 0;
}
u32 arraymap_output = 0;
SEC("tc")
int test_pkt_access(struct __sk_buff *skb)
{
struct callback_ctx data;
data.output = 0;
bpf_for_each_map_elem(&arraymap, check_array_elem, &data, 0);
if (!bypass_unused)
bpf_for_each_map_elem(&arraymap, unused_subprog, &data, 0);
arraymap_output = data.output;
bpf_for_each_map_elem(&percpu_map, check_percpu_elem, (void *)0, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/for_each_array_map_elem.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 50
#include "pyperf.h"
| linux-master | tools/testing/selftests/bpf/progs/pyperf50.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/spin_lock.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct val {
int cnt;
struct bpf_spin_lock l;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct val);
} map_spin_lock SEC(".maps");
SEC("cgroup/skb")
__description("spin_lock: test1 success")
__success __failure_unpriv __msg_unpriv("")
__retval(0)
__naked void spin_lock_test1_success(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
r1 += 4; \
call %[bpf_spin_lock]; \
r1 = r6; \
r1 += 4; \
r0 = *(u32*)(r6 + 0); \
call %[bpf_spin_unlock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("cgroup/skb")
__description("spin_lock: test2 direct ld/st")
__failure __msg("cannot be accessed directly")
__failure_unpriv __msg_unpriv("")
__naked void lock_test2_direct_ld_st(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
r1 += 4; \
call %[bpf_spin_lock]; \
r1 = r6; \
r1 += 4; \
r0 = *(u32*)(r1 + 0); \
call %[bpf_spin_unlock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("cgroup/skb")
__description("spin_lock: test3 direct ld/st")
__failure __msg("cannot be accessed directly")
__failure_unpriv __msg_unpriv("")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void lock_test3_direct_ld_st(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
r1 += 4; \
call %[bpf_spin_lock]; \
r1 = r6; \
r1 += 4; \
r0 = *(u32*)(r6 + 1); \
call %[bpf_spin_unlock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("cgroup/skb")
__description("spin_lock: test4 direct ld/st")
__failure __msg("cannot be accessed directly")
__failure_unpriv __msg_unpriv("")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void lock_test4_direct_ld_st(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
r1 += 4; \
call %[bpf_spin_lock]; \
r1 = r6; \
r1 += 4; \
r0 = *(u16*)(r6 + 3); \
call %[bpf_spin_unlock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("cgroup/skb")
__description("spin_lock: test5 call within a locked region")
__failure __msg("calls are not allowed")
__failure_unpriv __msg_unpriv("")
__naked void call_within_a_locked_region(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
r1 += 4; \
call %[bpf_spin_lock]; \
call %[bpf_get_prandom_u32]; \
r1 = r6; \
r1 += 4; \
call %[bpf_spin_unlock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("cgroup/skb")
__description("spin_lock: test6 missing unlock")
__failure __msg("unlock is missing")
__failure_unpriv __msg_unpriv("")
__naked void spin_lock_test6_missing_unlock(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
r1 += 4; \
call %[bpf_spin_lock]; \
r1 = r6; \
r1 += 4; \
r0 = *(u32*)(r6 + 0); \
if r0 != 0 goto l1_%=; \
call %[bpf_spin_unlock]; \
l1_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("cgroup/skb")
__description("spin_lock: test7 unlock without lock")
__failure __msg("without taking a lock")
__failure_unpriv __msg_unpriv("")
__naked void lock_test7_unlock_without_lock(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
r1 += 4; \
if r1 != 0 goto l1_%=; \
call %[bpf_spin_lock]; \
l1_%=: r1 = r6; \
r1 += 4; \
r0 = *(u32*)(r6 + 0); \
call %[bpf_spin_unlock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("cgroup/skb")
__description("spin_lock: test8 double lock")
__failure __msg("calls are not allowed")
__failure_unpriv __msg_unpriv("")
__naked void spin_lock_test8_double_lock(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
r1 += 4; \
call %[bpf_spin_lock]; \
r1 = r6; \
r1 += 4; \
call %[bpf_spin_lock]; \
r1 = r6; \
r1 += 4; \
r0 = *(u32*)(r6 + 0); \
call %[bpf_spin_unlock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("cgroup/skb")
__description("spin_lock: test9 different lock")
__failure __msg("unlock of different lock")
__failure_unpriv __msg_unpriv("")
__naked void spin_lock_test9_different_lock(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l1_%=; \
exit; \
l1_%=: r7 = r0; \
r1 = r6; \
r1 += 4; \
call %[bpf_spin_lock]; \
r1 = r7; \
r1 += 4; \
call %[bpf_spin_unlock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("cgroup/skb")
__description("spin_lock: test10 lock in subprog without unlock")
__failure __msg("unlock is missing")
__failure_unpriv __msg_unpriv("")
__naked void lock_in_subprog_without_unlock(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r6 = r0; \
r1 = r0; \
r1 += 4; \
call lock_in_subprog_without_unlock__1; \
r1 = r6; \
r1 += 4; \
call %[bpf_spin_unlock]; \
r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
static __naked __noinline __attribute__((used))
void lock_in_subprog_without_unlock__1(void)
{
asm volatile (" \
call %[bpf_spin_lock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_spin_lock)
: __clobber_all);
}
SEC("tc")
__description("spin_lock: test11 ld_abs under lock")
__failure __msg("inside bpf_spin_lock")
__naked void test11_ld_abs_under_lock(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r7 = r0; \
r1 = r0; \
r1 += 4; \
call %[bpf_spin_lock]; \
r0 = *(u8*)skb[0]; \
r1 = r7; \
r1 += 4; \
call %[bpf_spin_unlock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
SEC("tc")
__description("spin_lock: regsafe compare reg->id for map value")
__failure __msg("bpf_spin_unlock of different lock")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void reg_id_for_map_value(void)
{
asm volatile (" \
r6 = r1; \
r6 = *(u32*)(r6 + %[__sk_buff_mark]); \
r1 = %[map_spin_lock] ll; \
r9 = r1; \
r2 = 0; \
*(u32*)(r10 - 4) = r2; \
r2 = r10; \
r2 += -4; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
exit; \
l0_%=: r7 = r0; \
r1 = r9; \
r2 = r10; \
r2 += -4; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l1_%=; \
exit; \
l1_%=: r8 = r0; \
r1 = r7; \
r1 += 4; \
call %[bpf_spin_lock]; \
if r6 == 0 goto l2_%=; \
goto l3_%=; \
l2_%=: r7 = r8; \
l3_%=: r1 = r7; \
r1 += 4; \
call %[bpf_spin_unlock]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
: __clobber_all);
}
/* Make sure that regsafe() compares ids for spin lock records using
* check_ids():
* 1: r9 = map_lookup_elem(...) ; r9.id == 1
* 2: r8 = map_lookup_elem(...) ; r8.id == 2
* 3: r7 = ktime_get_ns()
* 4: r6 = ktime_get_ns()
* 5: if r6 > r7 goto <9>
* 6: spin_lock(r8)
* 7: r9 = r8
* 8: goto <10>
* 9: spin_lock(r9)
* 10: spin_unlock(r9) ; r9.id == 1 || r9.id == 2 and lock is active,
* ; second visit to (10) should be considered safe
* ; if check_ids() is used.
* 11: exit(0)
*/
SEC("cgroup/skb")
__description("spin_lock: regsafe() check_ids() similar id mappings")
__success __msg("29: safe")
__failure_unpriv __msg_unpriv("")
__log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ)
__naked void check_ids_similar_id_mappings(void)
{
asm volatile (" \
r1 = 0; \
*(u32*)(r10 - 4) = r1; \
/* r9 = map_lookup_elem(...) */ \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r9 = r0; \
/* r8 = map_lookup_elem(...) */ \
r2 = r10; \
r2 += -4; \
r1 = %[map_spin_lock] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l1_%=; \
r8 = r0; \
/* r7 = ktime_get_ns() */ \
call %[bpf_ktime_get_ns]; \
r7 = r0; \
/* r6 = ktime_get_ns() */ \
call %[bpf_ktime_get_ns]; \
r6 = r0; \
/* if r6 > r7 goto +5 ; no new information about the state is derived from\
* ; this check, thus produced verifier states differ\
* ; only in 'insn_idx' \
* spin_lock(r8) \
* r9 = r8 \
* goto unlock \
*/ \
if r6 > r7 goto l2_%=; \
r1 = r8; \
r1 += 4; \
call %[bpf_spin_lock]; \
r9 = r8; \
goto l3_%=; \
l2_%=: /* spin_lock(r9) */ \
r1 = r9; \
r1 += 4; \
call %[bpf_spin_lock]; \
l3_%=: /* spin_unlock(r9) */ \
r1 = r9; \
r1 += 4; \
call %[bpf_spin_unlock]; \
l0_%=: /* exit(0) */ \
r0 = 0; \
l1_%=: exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm(bpf_spin_lock),
__imm(bpf_spin_unlock),
__imm_addr(map_spin_lock)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_spin_lock.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, int);
__type(value, long);
} hash1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, int);
__type(value, long);
} hash2 SEC(".maps");
int pass1 = 0;
int pass2 = 0;
SEC("fentry/htab_map_delete_elem")
int BPF_PROG(on_delete, struct bpf_map *map)
{
int key = 0;
if (map == (void *)&hash1) {
pass1++;
return 0;
}
if (map == (void *)&hash2) {
pass2++;
bpf_map_delete_elem(&hash2, &key);
return 0;
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/recursion.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define IFINDEX_LO 1
struct {
__uint(type, BPF_MAP_TYPE_CPUMAP);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_cpumap_val));
__uint(max_entries, 4);
} cpu_map SEC(".maps");
SEC("xdp")
int xdp_redir_prog(struct xdp_md *ctx)
{
return bpf_redirect_map(&cpu_map, 1, 0);
}
SEC("xdp")
int xdp_dummy_prog(struct xdp_md *ctx)
{
return XDP_PASS;
}
SEC("xdp/cpumap")
int xdp_dummy_cm(struct xdp_md *ctx)
{
if (ctx->ingress_ifindex == IFINDEX_LO)
return XDP_DROP;
return XDP_PASS;
}
SEC("xdp.frags/cpumap")
int xdp_dummy_cm_frags(struct xdp_md *ctx)
{
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <sys/types.h>
pid_t pid = 0;
long ret = 0;
void *user_ptr = 0;
char buf[256] = {};
SEC("tracepoint/syscalls/sys_enter_nanosleep")
int on_write(void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
ret = bpf_probe_read_user_str(buf, sizeof(buf), user_ptr);
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_probe_read_user_str.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <errno.h>
#include <linux/bpf.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct bpf_map;
__u8 rand_vals[2500000];
const __u32 nr_rand_bytes = 2500000;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(key_size, sizeof(__u32));
/* max entries and value_size will be set programmatically.
* They are configurable from the userspace bench program.
*/
} array_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_BLOOM_FILTER);
/* max entries, value_size, and # of hash functions will be set
* programmatically. They are configurable from the userspace
* bench program.
*/
__uint(map_extra, 3);
} bloom_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
/* max entries, key_size, and value_size, will be set
* programmatically. They are configurable from the userspace
* bench program.
*/
} hashmap SEC(".maps");
struct callback_ctx {
struct bpf_map *map;
bool update;
};
/* Tracks the number of hits, drops, and false hits */
struct {
__u32 stats[3];
} __attribute__((__aligned__(256))) percpu_stats[256];
const __u32 hit_key = 0;
const __u32 drop_key = 1;
const __u32 false_hit_key = 2;
__u8 value_size;
const volatile bool hashmap_use_bloom;
const volatile bool count_false_hits;
int error = 0;
static __always_inline void log_result(__u32 key)
{
__u32 cpu = bpf_get_smp_processor_id();
percpu_stats[cpu & 255].stats[key]++;
}
static __u64
bloom_callback(struct bpf_map *map, __u32 *key, void *val,
struct callback_ctx *data)
{
int err;
if (data->update)
err = bpf_map_push_elem(data->map, val, 0);
else
err = bpf_map_peek_elem(data->map, val);
if (err) {
error |= 1;
return 1; /* stop the iteration */
}
log_result(hit_key);
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bloom_lookup(void *ctx)
{
struct callback_ctx data;
data.map = (struct bpf_map *)&bloom_map;
data.update = false;
bpf_for_each_map_elem(&array_map, bloom_callback, &data, 0);
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bloom_update(void *ctx)
{
struct callback_ctx data;
data.map = (struct bpf_map *)&bloom_map;
data.update = true;
bpf_for_each_map_elem(&array_map, bloom_callback, &data, 0);
return 0;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bloom_hashmap_lookup(void *ctx)
{
__u64 *result;
int i, err;
__u32 index = bpf_get_prandom_u32();
__u32 bitmask = (1ULL << 21) - 1;
for (i = 0; i < 1024; i++, index += value_size) {
index = index & bitmask;
if (hashmap_use_bloom) {
err = bpf_map_peek_elem(&bloom_map,
rand_vals + index);
if (err) {
if (err != -ENOENT) {
error |= 2;
return 0;
}
log_result(hit_key);
continue;
}
}
result = bpf_map_lookup_elem(&hashmap,
rand_vals + index);
if (result) {
log_result(hit_key);
} else {
if (hashmap_use_bloom && count_false_hits)
log_result(false_hit_key);
log_result(drop_key);
}
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bloom_filter_bench.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Politecnico di Torino
#define MAP_TYPE BPF_MAP_TYPE_QUEUE
#include "test_queue_stack_map.h"
| linux-master | tools/testing/selftests/bpf/progs/test_queue_map.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct S {
int x;
};
__noinline int foo(const struct S *s)
{
return s ? bpf_get_prandom_u32() < s->x : 0;
}
SEC("cgroup_skb/ingress")
__failure __msg("Caller passes invalid args into func#1")
int global_func11(struct __sk_buff *skb)
{
return foo((const void *)skb);
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func11.c |
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
SEC("sk_skb1")
int bpf_prog1(struct __sk_buff *skb)
{
void *data_end = (void *)(long) skb->data_end;
void *data = (void *)(long) skb->data;
__u8 *d = data;
int err;
if (data + 10 > data_end) {
err = bpf_skb_pull_data(skb, 10);
if (err)
return SK_DROP;
data_end = (void *)(long)skb->data_end;
data = (void *)(long)skb->data;
if (data + 10 > data_end)
return SK_DROP;
}
/* This write/read is a bit pointless but tests the verifier and
* strparser handler for read/write pkt data and access into sk
* fields.
*/
d = data;
d[7] = 1;
return skb->len;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/sockmap_parse_prog.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/array_access.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct test_val);
__uint(map_flags, BPF_F_RDONLY_PROG);
} map_array_ro SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct test_val);
__uint(map_flags, BPF_F_WRONLY_PROG);
} map_array_wo SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
SEC("socket")
__description("valid map access into an array with a constant")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(0)
__naked void an_array_with_a_constant_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("socket")
__description("valid map access into an array with a register")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void an_array_with_a_register_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 4; \
r1 <<= 2; \
r0 += r1; \
r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("socket")
__description("valid map access into an array with a variable")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void an_array_with_a_variable_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u32*)(r0 + 0); \
if r1 >= %[max_entries] goto l0_%=; \
r1 <<= 2; \
r0 += r1; \
r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(max_entries, MAX_ENTRIES),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("socket")
__description("valid map access into an array with a signed variable")
__success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
__naked void array_with_a_signed_variable(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u32*)(r0 + 0); \
if w1 s> 0xffffffff goto l1_%=; \
w1 = 0; \
l1_%=: w2 = %[max_entries]; \
if r2 s> r1 goto l2_%=; \
w1 = 0; \
l2_%=: w1 <<= 2; \
r0 += r1; \
r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(max_entries, MAX_ENTRIES),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("socket")
__description("invalid map access into an array with a constant")
__failure __msg("invalid access to map value, value_size=48 off=48 size=8")
__failure_unpriv
__naked void an_array_with_a_constant_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = %[test_val_foo]; \
*(u64*)(r0 + %[__imm_0]) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, (MAX_ENTRIES + 1) << 2),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("socket")
__description("invalid map access into an array with a register")
__failure __msg("R0 min value is outside of the allowed memory range")
__failure_unpriv
__flag(BPF_F_ANY_ALIGNMENT)
__naked void an_array_with_a_register_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = %[__imm_0]; \
r1 <<= 2; \
r0 += r1; \
r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, MAX_ENTRIES + 1),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("socket")
__description("invalid map access into an array with a variable")
__failure
__msg("R0 unbounded memory access, make sure to bounds check any such access")
__failure_unpriv
__flag(BPF_F_ANY_ALIGNMENT)
__naked void an_array_with_a_variable_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u32*)(r0 + 0); \
r1 <<= 2; \
r0 += r1; \
r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("socket")
__description("invalid map access into an array with no floor check")
__failure __msg("R0 unbounded memory access")
__failure_unpriv __msg_unpriv("R0 leaks addr")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void array_with_no_floor_check(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r0 + 0); \
w2 = %[max_entries]; \
if r2 s> r1 goto l1_%=; \
w1 = 0; \
l1_%=: w1 <<= 2; \
r0 += r1; \
r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(max_entries, MAX_ENTRIES),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("socket")
__description("invalid map access into an array with a invalid max check")
__failure __msg("invalid access to map value, value_size=48 off=44 size=8")
__failure_unpriv __msg_unpriv("R0 leaks addr")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void with_a_invalid_max_check_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u32*)(r0 + 0); \
w2 = %[__imm_0]; \
if r2 > r1 goto l1_%=; \
w1 = 0; \
l1_%=: w1 <<= 2; \
r0 += r1; \
r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, MAX_ENTRIES + 1),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("socket")
__description("invalid map access into an array with a invalid max check")
__failure __msg("R0 pointer += pointer")
__failure_unpriv
__flag(BPF_F_ANY_ALIGNMENT)
__naked void with_a_invalid_max_check_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r8 = r0; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r0 += r8; \
r0 = *(u32*)(r0 + %[test_val_foo]); \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("socket")
__description("valid read map access into a read-only array 1")
__success __success_unpriv __retval(28)
__naked void a_read_only_array_1_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_ro] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r0 = *(u32*)(r0 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_ro)
: __clobber_all);
}
SEC("tc")
__description("valid read map access into a read-only array 2")
__success __retval(65507)
__naked void a_read_only_array_2_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_ro] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = 4; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: r0 &= 0xffff; \
exit; \
" :
: __imm(bpf_csum_diff),
__imm(bpf_map_lookup_elem),
__imm_addr(map_array_ro)
: __clobber_all);
}
SEC("socket")
__description("invalid write map access into a read-only array 1")
__failure __msg("write into map forbidden")
__failure_unpriv
__naked void a_read_only_array_1_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_ro] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 42; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_ro)
: __clobber_all);
}
SEC("tc")
__description("invalid write map access into a read-only array 2")
__failure __msg("write into map forbidden")
__naked void a_read_only_array_2_2(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_ro] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r6; \
r2 = 0; \
r3 = r0; \
r4 = 8; \
call %[bpf_skb_load_bytes]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_skb_load_bytes),
__imm_addr(map_array_ro)
: __clobber_all);
}
SEC("socket")
__description("valid write map access into a write-only array 1")
__success __success_unpriv __retval(1)
__naked void a_write_only_array_1_1(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_wo] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 42; \
*(u64*)(r0 + 0) = r1; \
l0_%=: r0 = 1; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_wo)
: __clobber_all);
}
SEC("tc")
__description("valid write map access into a write-only array 2")
__success __retval(0)
__naked void a_write_only_array_2_1(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_wo] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r6; \
r2 = 0; \
r3 = r0; \
r4 = 8; \
call %[bpf_skb_load_bytes]; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm(bpf_skb_load_bytes),
__imm_addr(map_array_wo)
: __clobber_all);
}
SEC("socket")
__description("invalid read map access into a write-only array 1")
__failure __msg("read from map forbidden")
__failure_unpriv
__naked void a_write_only_array_1_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_wo] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r0 = *(u64*)(r0 + 0); \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_wo)
: __clobber_all);
}
SEC("tc")
__description("invalid read map access into a write-only array 2")
__failure __msg("read from map forbidden")
__naked void a_write_only_array_2_2(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_array_wo] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = r0; \
r2 = 4; \
r3 = 0; \
r4 = 0; \
r5 = 0; \
call %[bpf_csum_diff]; \
l0_%=: exit; \
" :
: __imm(bpf_csum_diff),
__imm(bpf_map_lookup_elem),
__imm_addr(map_array_wo)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_array_access.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Isovalent */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_HASH);
} hash_map_bench SEC(".maps");
/* The number of slots to store times */
#define NR_SLOTS 32
#define NR_CPUS 256
#define CPU_MASK (NR_CPUS-1)
/* Configured by userspace */
u64 nr_entries;
u64 nr_loops;
u32 __attribute__((__aligned__(8))) key[NR_CPUS];
/* Filled by us */
u64 __attribute__((__aligned__(256))) percpu_times_index[NR_CPUS];
u64 __attribute__((__aligned__(256))) percpu_times[NR_CPUS][NR_SLOTS];
static inline void patch_key(u32 i)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
key[0] = i + 1;
#else
key[0] = __builtin_bswap32(i + 1);
#endif
/* the rest of key is random and is configured by userspace */
}
static int lookup_callback(__u32 index, u32 *unused)
{
patch_key(index);
return bpf_map_lookup_elem(&hash_map_bench, key) ? 0 : 1;
}
static int loop_lookup_callback(__u32 index, u32 *unused)
{
return bpf_loop(nr_entries, lookup_callback, NULL, 0) ? 0 : 1;
}
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int benchmark(void *ctx)
{
u32 cpu = bpf_get_smp_processor_id();
u32 times_index;
u64 start_time;
times_index = percpu_times_index[cpu & CPU_MASK] % NR_SLOTS;
start_time = bpf_ktime_get_ns();
bpf_loop(nr_loops, loop_lookup_callback, NULL, 0);
percpu_times[cpu & CPU_MASK][times_index] = bpf_ktime_get_ns() - start_time;
percpu_times_index[cpu & CPU_MASK] += 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.