python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022. Huawei Technologies Co., Ltd */
#define _GNU_SOURCE
#include <sched.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdbool.h>
#include <errno.h>
#include <string.h>
#include <pthread.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "test_maps.h"
#include "task_local_storage_helpers.h"
#include "read_bpf_task_storage_busy.skel.h"
struct lookup_ctx {
bool start;
bool stop;
int pid_fd;
int map_fd;
int loop;
};
static void *lookup_fn(void *arg)
{
struct lookup_ctx *ctx = arg;
long value;
int i = 0;
while (!ctx->start)
usleep(1);
while (!ctx->stop && i++ < ctx->loop)
bpf_map_lookup_elem(ctx->map_fd, &ctx->pid_fd, &value);
return NULL;
}
static void abort_lookup(struct lookup_ctx *ctx, pthread_t *tids, unsigned int nr)
{
unsigned int i;
ctx->stop = true;
ctx->start = true;
for (i = 0; i < nr; i++)
pthread_join(tids[i], NULL);
}
void test_task_storage_map_stress_lookup(void)
{
#define MAX_NR_THREAD 4096
unsigned int i, nr = 256, loop = 8192, cpu = 0;
struct read_bpf_task_storage_busy *skel;
pthread_t tids[MAX_NR_THREAD];
struct lookup_ctx ctx;
cpu_set_t old, new;
const char *cfg;
int err;
cfg = getenv("TASK_STORAGE_MAP_NR_THREAD");
if (cfg) {
nr = atoi(cfg);
if (nr > MAX_NR_THREAD)
nr = MAX_NR_THREAD;
}
cfg = getenv("TASK_STORAGE_MAP_NR_LOOP");
if (cfg)
loop = atoi(cfg);
cfg = getenv("TASK_STORAGE_MAP_PIN_CPU");
if (cfg)
cpu = atoi(cfg);
skel = read_bpf_task_storage_busy__open_and_load();
err = libbpf_get_error(skel);
CHECK(err, "open_and_load", "error %d\n", err);
/* Only for a fully preemptible kernel */
if (!skel->kconfig->CONFIG_PREEMPT) {
printf("%s SKIP (no CONFIG_PREEMPT)\n", __func__);
read_bpf_task_storage_busy__destroy(skel);
skips++;
return;
}
/* Save the old affinity setting */
sched_getaffinity(getpid(), sizeof(old), &old);
/* Pinned on a specific CPU */
CPU_ZERO(&new);
CPU_SET(cpu, &new);
sched_setaffinity(getpid(), sizeof(new), &new);
ctx.start = false;
ctx.stop = false;
ctx.pid_fd = sys_pidfd_open(getpid(), 0);
ctx.map_fd = bpf_map__fd(skel->maps.task);
ctx.loop = loop;
for (i = 0; i < nr; i++) {
err = pthread_create(&tids[i], NULL, lookup_fn, &ctx);
if (err) {
abort_lookup(&ctx, tids, i);
CHECK(err, "pthread_create", "error %d\n", err);
goto out;
}
}
ctx.start = true;
for (i = 0; i < nr; i++)
pthread_join(tids[i], NULL);
skel->bss->pid = getpid();
err = read_bpf_task_storage_busy__attach(skel);
CHECK(err, "attach", "error %d\n", err);
/* Trigger program */
syscall(SYS_gettid);
skel->bss->pid = 0;
CHECK(skel->bss->busy != 0, "bad bpf_task_storage_busy", "got %d\n", skel->bss->busy);
out:
read_bpf_task_storage_busy__destroy(skel);
/* Restore affinity setting */
sched_setaffinity(getpid(), sizeof(old), &old);
printf("%s:PASS\n", __func__);
}
| linux-master | tools/testing/selftests/bpf/map_tests/task_storage_map.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <test_maps.h>
static int nr_cpus;
static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
__s64 *values, bool is_pcpu)
{
int i, j, err;
int cpu_offset = 0;
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
);
for (i = 0; i < max_entries; i++) {
keys[i] = i;
if (is_pcpu) {
cpu_offset = i * nr_cpus;
for (j = 0; j < nr_cpus; j++)
(values + cpu_offset)[j] = i + 1 + j;
} else {
values[i] = i + 1;
}
}
err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts);
CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
}
static void map_batch_verify(int *visited, __u32 max_entries, int *keys,
__s64 *values, bool is_pcpu)
{
int i, j;
int cpu_offset = 0;
memset(visited, 0, max_entries * sizeof(*visited));
for (i = 0; i < max_entries; i++) {
if (is_pcpu) {
cpu_offset = i * nr_cpus;
for (j = 0; j < nr_cpus; j++) {
__s64 value = (values + cpu_offset)[j];
CHECK(keys[i] + j + 1 != value,
"key/value checking",
"error: i %d j %d key %d value %lld\n", i,
j, keys[i], value);
}
} else {
CHECK(keys[i] + 1 != values[i], "key/value checking",
"error: i %d key %d value %lld\n", i, keys[i],
values[i]);
}
visited[i] = 1;
}
for (i = 0; i < max_entries; i++) {
CHECK(visited[i] != 1, "visited checking",
"error: keys array at index %d missing\n", i);
}
}
static void __test_map_lookup_and_update_batch(bool is_pcpu)
{
int map_fd, *keys, *visited;
__u32 count, total, total_success;
const __u32 max_entries = 10;
__u64 batch = 0;
int err, step, value_size;
void *values;
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
);
map_fd = bpf_map_create(is_pcpu ? BPF_MAP_TYPE_PERCPU_ARRAY : BPF_MAP_TYPE_ARRAY,
"array_map", sizeof(int), sizeof(__s64), max_entries, NULL);
CHECK(map_fd == -1,
"bpf_map_create()", "error:%s\n", strerror(errno));
value_size = sizeof(__s64);
if (is_pcpu)
value_size *= nr_cpus;
keys = calloc(max_entries, sizeof(*keys));
values = calloc(max_entries, value_size);
visited = calloc(max_entries, sizeof(*visited));
CHECK(!keys || !values || !visited, "malloc()", "error:%s\n",
strerror(errno));
/* test 1: lookup in a loop with various steps. */
total_success = 0;
for (step = 1; step < max_entries; step++) {
map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
map_batch_verify(visited, max_entries, keys, values, is_pcpu);
memset(keys, 0, max_entries * sizeof(*keys));
memset(values, 0, max_entries * value_size);
batch = 0;
total = 0;
/* iteratively lookup/delete elements with 'step'
* elements each.
*/
count = step;
while (true) {
err = bpf_map_lookup_batch(map_fd,
total ? &batch : NULL,
&batch, keys + total,
values + total * value_size,
&count, &opts);
CHECK((err && errno != ENOENT), "lookup with steps",
"error: %s\n", strerror(errno));
total += count;
if (err)
break;
}
CHECK(total != max_entries, "lookup with steps",
"total = %u, max_entries = %u\n", total, max_entries);
map_batch_verify(visited, max_entries, keys, values, is_pcpu);
total_success++;
}
CHECK(total_success == 0, "check total_success",
"unexpected failure\n");
free(keys);
free(values);
free(visited);
close(map_fd);
}
static void array_map_batch_ops(void)
{
__test_map_lookup_and_update_batch(false);
printf("test_%s:PASS\n", __func__);
}
static void array_percpu_map_batch_ops(void)
{
__test_map_lookup_and_update_batch(true);
printf("test_%s:PASS\n", __func__);
}
void test_array_map_batch_ops(void)
{
nr_cpus = libbpf_num_possible_cpus();
CHECK(nr_cpus < 0, "nr_cpus checking",
"error: get possible cpus failed");
array_map_batch_ops();
array_percpu_map_batch_ops();
}
| linux-master | tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <test_maps.h>
#define OUTER_MAP_ENTRIES 10
static __u32 get_map_id_from_fd(int map_fd)
{
struct bpf_map_info map_info = {};
uint32_t info_len = sizeof(map_info);
int ret;
ret = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
CHECK(ret < 0, "Finding map info failed", "error:%s\n",
strerror(errno));
return map_info.id;
}
/* This creates number of OUTER_MAP_ENTRIES maps that will be stored
* in outer map and return the created map_fds
*/
static void create_inner_maps(enum bpf_map_type map_type,
__u32 *inner_map_fds)
{
int map_fd, map_index, ret;
__u32 map_key = 0, map_id;
char map_name[15];
for (map_index = 0; map_index < OUTER_MAP_ENTRIES; map_index++) {
memset(map_name, 0, sizeof(map_name));
sprintf(map_name, "inner_map_fd_%d", map_index);
map_fd = bpf_map_create(map_type, map_name, sizeof(__u32),
sizeof(__u32), 1, NULL);
CHECK(map_fd < 0,
"inner bpf_map_create() failed",
"map_type=(%d) map_name(%s), error:%s\n",
map_type, map_name, strerror(errno));
/* keep track of the inner map fd as it is required
* to add records in outer map
*/
inner_map_fds[map_index] = map_fd;
/* Add entry into this created map
* eg: map1 key = 0, value = map1's map id
* map2 key = 0, value = map2's map id
*/
map_id = get_map_id_from_fd(map_fd);
ret = bpf_map_update_elem(map_fd, &map_key, &map_id, 0);
CHECK(ret != 0,
"bpf_map_update_elem failed",
"map_type=(%d) map_name(%s), error:%s\n",
map_type, map_name, strerror(errno));
}
}
static int create_outer_map(enum bpf_map_type map_type, __u32 inner_map_fd)
{
int outer_map_fd;
LIBBPF_OPTS(bpf_map_create_opts, attr);
attr.inner_map_fd = inner_map_fd;
outer_map_fd = bpf_map_create(map_type, "outer_map", sizeof(__u32),
sizeof(__u32), OUTER_MAP_ENTRIES,
&attr);
CHECK(outer_map_fd < 0,
"outer bpf_map_create()",
"map_type=(%d), error:%s\n",
map_type, strerror(errno));
return outer_map_fd;
}
static void validate_fetch_results(int outer_map_fd,
__u32 *fetched_keys, __u32 *fetched_values,
__u32 max_entries_fetched)
{
__u32 inner_map_key, inner_map_value;
int inner_map_fd, entry, err;
__u32 outer_map_value;
for (entry = 0; entry < max_entries_fetched; ++entry) {
outer_map_value = fetched_values[entry];
inner_map_fd = bpf_map_get_fd_by_id(outer_map_value);
CHECK(inner_map_fd < 0,
"Failed to get inner map fd",
"from id(%d), error=%s\n",
outer_map_value, strerror(errno));
err = bpf_map_get_next_key(inner_map_fd, NULL, &inner_map_key);
CHECK(err != 0,
"Failed to get inner map key",
"error=%s\n", strerror(errno));
err = bpf_map_lookup_elem(inner_map_fd, &inner_map_key,
&inner_map_value);
close(inner_map_fd);
CHECK(err != 0,
"Failed to get inner map value",
"for key(%d), error=%s\n",
inner_map_key, strerror(errno));
/* Actual value validation */
CHECK(outer_map_value != inner_map_value,
"Failed to validate inner map value",
"fetched(%d) and lookedup(%d)!\n",
outer_map_value, inner_map_value);
}
}
static void fetch_and_validate(int outer_map_fd,
struct bpf_map_batch_opts *opts,
__u32 batch_size, bool delete_entries)
{
__u32 *fetched_keys, *fetched_values, total_fetched = 0;
__u32 batch_key = 0, fetch_count, step_size;
int err, max_entries = OUTER_MAP_ENTRIES;
__u32 value_size = sizeof(__u32);
/* Total entries needs to be fetched */
fetched_keys = calloc(max_entries, value_size);
fetched_values = calloc(max_entries, value_size);
CHECK((!fetched_keys || !fetched_values),
"Memory allocation failed for fetched_keys or fetched_values",
"error=%s\n", strerror(errno));
for (step_size = batch_size;
step_size <= max_entries;
step_size += batch_size) {
fetch_count = step_size;
err = delete_entries
? bpf_map_lookup_and_delete_batch(outer_map_fd,
total_fetched ? &batch_key : NULL,
&batch_key,
fetched_keys + total_fetched,
fetched_values + total_fetched,
&fetch_count, opts)
: bpf_map_lookup_batch(outer_map_fd,
total_fetched ? &batch_key : NULL,
&batch_key,
fetched_keys + total_fetched,
fetched_values + total_fetched,
&fetch_count, opts);
if (err && errno == ENOSPC) {
/* Fetch again with higher batch size */
total_fetched = 0;
continue;
}
CHECK((err < 0 && (errno != ENOENT)),
"lookup with steps failed",
"error: %s\n", strerror(errno));
/* Update the total fetched number */
total_fetched += fetch_count;
if (err)
break;
}
CHECK((total_fetched != max_entries),
"Unable to fetch expected entries !",
"total_fetched(%d) and max_entries(%d) error: (%d):%s\n",
total_fetched, max_entries, errno, strerror(errno));
/* validate the fetched entries */
validate_fetch_results(outer_map_fd, fetched_keys,
fetched_values, total_fetched);
printf("batch_op(%s) is successful with batch_size(%d)\n",
delete_entries ? "LOOKUP_AND_DELETE" : "LOOKUP", batch_size);
free(fetched_keys);
free(fetched_values);
}
static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type,
enum bpf_map_type inner_map_type)
{
__u32 *outer_map_keys, *inner_map_fds;
__u32 max_entries = OUTER_MAP_ENTRIES;
LIBBPF_OPTS(bpf_map_batch_opts, opts);
__u32 value_size = sizeof(__u32);
int batch_size[2] = {5, 10};
__u32 map_index, op_index;
int outer_map_fd, ret;
outer_map_keys = calloc(max_entries, value_size);
inner_map_fds = calloc(max_entries, value_size);
CHECK((!outer_map_keys || !inner_map_fds),
"Memory allocation failed for outer_map_keys or inner_map_fds",
"error=%s\n", strerror(errno));
create_inner_maps(inner_map_type, inner_map_fds);
outer_map_fd = create_outer_map(outer_map_type, *inner_map_fds);
/* create outer map keys */
for (map_index = 0; map_index < max_entries; map_index++)
outer_map_keys[map_index] =
((outer_map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
? 9 : 1000) - map_index;
/* batch operation - map_update */
ret = bpf_map_update_batch(outer_map_fd, outer_map_keys,
inner_map_fds, &max_entries, &opts);
CHECK(ret != 0,
"Failed to update the outer map batch ops",
"error=%s\n", strerror(errno));
/* batch operation - map_lookup */
for (op_index = 0; op_index < 2; ++op_index)
fetch_and_validate(outer_map_fd, &opts,
batch_size[op_index], false);
/* batch operation - map_lookup_delete */
if (outer_map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
fetch_and_validate(outer_map_fd, &opts,
max_entries, true /*delete*/);
/* close all map fds */
for (map_index = 0; map_index < max_entries; map_index++)
close(inner_map_fds[map_index]);
close(outer_map_fd);
free(inner_map_fds);
free(outer_map_keys);
}
void test_map_in_map_batch_ops_array(void)
{
_map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY);
printf("%s:PASS with inner ARRAY map\n", __func__);
_map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH);
printf("%s:PASS with inner HASH map\n", __func__);
}
void test_map_in_map_batch_ops_hash(void)
{
_map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_ARRAY);
printf("%s:PASS with inner ARRAY map\n", __func__);
_map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_HASH);
printf("%s:PASS with inner HASH map\n", __func__);
}
| linux-master | tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c |
{
"calls: invalid kfunc call not eliminated",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = REJECT,
.errstr = "invalid kernel function call not eliminated in verifier pass",
},
{
"calls: invalid kfunc call unreachable",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: invalid kfunc call: ptr_to_mem to struct with non-scalar",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_fail1", 2 },
},
},
{
"calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_fail2", 2 },
},
},
{
"calls: invalid kfunc call: ptr_to_mem to struct with FAM",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_fail3", 2 },
},
},
{
"calls: invalid kfunc call: reg->type != PTR_TO_CTX",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "R1 must have zero offset when passed to release func or trusted arg to kfunc",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_pass_ctx", 2 },
},
},
{
"calls: invalid kfunc call: void * not allowed in func proto without mem size arg",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "arg#0 pointer type UNKNOWN must point to scalar",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_mem_len_fail1", 2 },
},
},
{
"calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "Possibly NULL pointer passed to trusted arg0",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 3 },
{ "bpf_kfunc_call_test_release", 5 },
},
},
{
"calls: invalid kfunc call: reg->off must be zero when passed to release kfunc",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "R1 must have zero offset when passed to release func",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 3 },
{ "bpf_kfunc_call_memb_release", 8 },
},
},
{
"calls: invalid kfunc call: don't match first member type when passed to release kfunc",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_memb_acquire", 1 },
{ "bpf_kfunc_call_memb1_release", 5 },
},
},
{
"calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 3 },
{ "bpf_kfunc_call_test_offset", 9 },
{ "bpf_kfunc_call_test_release", 12 },
},
.result_unpriv = REJECT,
.result = REJECT,
.errstr = "ptr R1 off=-4 disallowed",
},
{
"calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 3 },
{ "bpf_kfunc_call_test_release", 9 },
{ "bpf_kfunc_call_test_release", 13 },
{ "bpf_kfunc_call_test_release", 17 },
},
.result_unpriv = REJECT,
.result = REJECT,
.errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
},
{
"calls: invalid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 3 },
{ "bpf_kfunc_call_test_ref", 8 },
{ "bpf_kfunc_call_test_ref", 10 },
},
.result_unpriv = REJECT,
.result = REJECT,
.errstr = "R1 must be",
},
{
"calls: valid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 3 },
{ "bpf_kfunc_call_test_ref", 8 },
{ "bpf_kfunc_call_test_release", 10 },
},
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"calls: basic sanity",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: not on unprivileged",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 1,
},
{
"calls: div by 0 in subprog",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(BPF_REG_2, 0),
BPF_MOV32_IMM(BPF_REG_3, 1),
BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
},
{
"calls: multiple ret types in subprog 1",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "R0 invalid mem access 'scalar'",
},
{
"calls: multiple ret types in subprog 2",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
offsetof(struct __sk_buff, data)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_8b = { 16 },
.result = REJECT,
.errstr = "R0 min value is outside of the allowed memory range",
},
{
"calls: overlapping caller/callee",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "last insn is not an exit or jmp",
.result = REJECT,
},
{
"calls: wrong recursive calls",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 4),
BPF_JMP_IMM(BPF_JA, 0, 0, 4),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "jump out of range",
.result = REJECT,
},
{
"calls: wrong src reg",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "BPF_CALL uses reserved fields",
.result = REJECT,
},
{
"calls: wrong off value",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "BPF_CALL uses reserved fields",
.result = REJECT,
},
{
"calls: jump back loop",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn 0 to 0",
.result = REJECT,
},
{
"calls: conditional call",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "jump out of range",
.result = REJECT,
},
{
"calls: conditional call 2",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: conditional call 3",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_JMP_IMM(BPF_JA, 0, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "back-edge from insn",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 1,
},
{
"calls: conditional call 4",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -5),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: conditional call 5",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
},
{
"calls: conditional call 6",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "infinite loop detected",
.result = REJECT,
},
{
"calls: using r0 returned by callee",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{
"calls: using uninit r0 from callee",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "!read_ok",
.result = REJECT,
},
{
"calls: callee is using r1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_ACT,
.result = ACCEPT,
.retval = TEST_DATA_LEN,
},
{
"calls: callee using args1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = POINTER_VALUE,
},
{
"calls: callee using wrong args2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "R2 !read_ok",
.result = REJECT,
},
{
"calls: callee using two args",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
offsetof(struct __sk_buff, len)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
offsetof(struct __sk_buff, len)),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
},
{
"calls: callee changing pkt pointers",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
/* clear_all_pkt_pointers() has to walk all frames
* to make sure that pkt pointers in the caller
* are cleared when callee is calling a helper that
* adjusts packet size
*/
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R6 invalid mem access 'scalar'",
.prog_type = BPF_PROG_TYPE_XDP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: ptr null check in subprog",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.fixup_map_hash_48b = { 3 },
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 0,
},
{
"calls: two calls with args",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = TEST_DATA_LEN + TEST_DATA_LEN,
},
{
"calls: calls with stack arith",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 42,
},
{
"calls: calls with misaligned stack access",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
.errstr = "misaligned stack access",
.result = REJECT,
},
{
"calls: calls control flow, jump test",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 43),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 43,
},
{
"calls: calls control flow, jump test 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 43),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "jump out of range from insn 1 to 4",
.result = REJECT,
},
{
"calls: two calls with bad jump",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "jump out of range from insn 11 to 9",
.result = REJECT,
},
{
"calls: recursive call. test1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge",
.result = REJECT,
},
{
"calls: recursive call. test2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge",
.result = REJECT,
},
{
"calls: unreachable code",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "unreachable insn 6",
.result = REJECT,
},
{
"calls: invalid call",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "invalid destination",
.result = REJECT,
},
{
"calls: invalid call 2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "invalid destination",
.result = REJECT,
},
{
"calls: jumping across function bodies. test1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "jump out of range",
.result = REJECT,
},
{
"calls: jumping across function bodies. test2",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "jump out of range",
.result = REJECT,
},
{
"calls: call without exit",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "not an exit",
.result = REJECT,
},
{
"calls: call into middle of ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "last insn",
.result = REJECT,
},
{
"calls: call into middle of other call",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "last insn",
.result = REJECT,
},
{
"calls: subprog call with ld_abs in main prog",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"calls: two calls with bad fallthrough",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "not an exit",
.result = REJECT,
},
{
"calls: two calls with stack read",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.result = ACCEPT,
},
{
"calls: two calls with stack write",
.insns = {
/* main prog */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
/* write into stack frame of main prog */
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 2 */
/* read from stack frame of main prog */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.result = ACCEPT,
},
{
"calls: stack overflow using two frames (pre-call access)",
.insns = {
/* prog 1 */
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
/* prog 2 */
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.errstr = "combined stack size",
.result = REJECT,
},
{
"calls: stack overflow using two frames (post-call access)",
.insns = {
/* prog 1 */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
BPF_EXIT_INSN(),
/* prog 2 */
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.errstr = "combined stack size",
.result = REJECT,
},
{
"calls: stack depth check using three frames. test1",
.insns = {
/* main */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* A */
BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
BPF_EXIT_INSN(),
/* B */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
/* stack_main=32, stack_A=256, stack_B=64
* and max(main+A, main+A+B) < 512
*/
.result = ACCEPT,
},
{
"calls: stack depth check using three frames. test2",
.insns = {
/* main */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* A */
BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
BPF_EXIT_INSN(),
/* B */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
/* stack_main=32, stack_A=64, stack_B=256
* and max(main+A, main+A+B) < 512
*/
.result = ACCEPT,
},
{
"calls: stack depth check using three frames. test3",
.insns = {
/* main */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* A */
BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, -3),
/* B */
BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
/* stack_main=64, stack_A=224, stack_B=256
* and max(main+A, main+A+B) > 512
*/
.errstr = "combined stack",
.result = REJECT,
},
{
"calls: stack depth check using three frames. test4",
/* void main(void) {
* func1(0);
* func1(1);
* func2(1);
* }
* void func1(int alloc_or_recurse) {
* if (alloc_or_recurse) {
* frame_pointer[-300] = 1;
* } else {
* func2(alloc_or_recurse);
* }
* }
* void func2(int alloc_or_recurse) {
* if (alloc_or_recurse) {
* frame_pointer[-300] = 1;
* }
* }
*/
.insns = {
/* main */
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* A */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
BPF_EXIT_INSN(),
/* B */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.result = REJECT,
.errstr = "combined stack",
},
{
"calls: stack depth check using three frames. test5",
.insns = {
/* main */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
BPF_EXIT_INSN(),
/* A */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
BPF_EXIT_INSN(),
/* B */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
BPF_EXIT_INSN(),
/* C */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
BPF_EXIT_INSN(),
/* D */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
BPF_EXIT_INSN(),
/* E */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
BPF_EXIT_INSN(),
/* F */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
BPF_EXIT_INSN(),
/* G */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
BPF_EXIT_INSN(),
/* H */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.errstr = "call stack",
.result = REJECT,
},
{
"calls: stack depth check in dead code",
.insns = {
/* main */
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
BPF_EXIT_INSN(),
/* A */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* B */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
BPF_EXIT_INSN(),
/* C */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
BPF_EXIT_INSN(),
/* D */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
BPF_EXIT_INSN(),
/* E */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
BPF_EXIT_INSN(),
/* F */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
BPF_EXIT_INSN(),
/* G */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
BPF_EXIT_INSN(),
/* H */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.errstr = "call stack",
.result = REJECT,
},
{
"calls: spill into caller stack frame",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.errstr = "cannot spill",
.result = REJECT,
},
{
"calls: write into caller stack frame",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.result = ACCEPT,
.retval = 42,
},
{
"calls: write into callee stack frame",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.errstr = "cannot return stack pointer",
.result = REJECT,
},
{
"calls: two calls with stack write and void return",
.insns = {
/* main prog */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
/* subprog 2 */
/* write into stack frame of main prog */
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_EXIT_INSN(), /* void return */
},
.prog_type = BPF_PROG_TYPE_XDP,
.result = ACCEPT,
},
{
"calls: ambiguous return value",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "allowed for",
.result_unpriv = REJECT,
.errstr = "R0 !read_ok",
.result = REJECT,
},
{
"calls: two calls that return map_value",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
/* fetch secound map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
/* call 3rd function twice */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* first time with fp-8 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* second time with fp-16 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
/* subprog 2 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
/* write map_value_ptr into stack frame of main prog */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), /* return 0 */
},
.prog_type = BPF_PROG_TYPE_XDP,
.fixup_map_hash_8b = { 23 },
.result = ACCEPT,
},
{
"calls: two calls that return map_value with bool condition",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
/* call 3rd function twice */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* first time with fp-8 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* second time with fp-16 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
/* fetch secound map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
/* subprog 2 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), /* return 0 */
/* write map_value_ptr into stack frame of main prog */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(), /* return 1 */
},
.prog_type = BPF_PROG_TYPE_XDP,
.fixup_map_hash_8b = { 23 },
.result = ACCEPT,
},
{
"calls: two calls that return map_value with incorrect bool check",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
/* call 3rd function twice */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* first time with fp-8 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* second time with fp-16 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
/* fetch secound map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
/* subprog 2 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), /* return 0 */
/* write map_value_ptr into stack frame of main prog */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(), /* return 1 */
},
.prog_type = BPF_PROG_TYPE_XDP,
.fixup_map_hash_8b = { 23 },
.result = REJECT,
.errstr = "invalid read from stack R7 off=-16 size=8",
},
{
"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* 1st lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
/* write map_value_ptr into stack frame of main prog at fp-8 */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_8, 1),
/* 2nd lookup from map */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
/* write map_value_ptr into stack frame of main prog at fp-16 */
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_9, 1),
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
BPF_EXIT_INSN(),
/* subprog 2 */
/* if arg2 == 1 do *arg1 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
/* if arg4 == 1 do *arg3 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_8b = { 12, 22 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=8 off=2 size=8",
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* 1st lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
/* write map_value_ptr into stack frame of main prog at fp-8 */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_8, 1),
/* 2nd lookup from map */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
/* write map_value_ptr into stack frame of main prog at fp-16 */
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_9, 1),
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
BPF_EXIT_INSN(),
/* subprog 2 */
/* if arg2 == 1 do *arg1 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
/* if arg4 == 1 do *arg3 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_8b = { 12, 22 },
.result = ACCEPT,
},
{
"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* 1st lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
/* write map_value_ptr into stack frame of main prog at fp-8 */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_8, 1),
/* 2nd lookup from map */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_9, 0), // 26
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
/* write map_value_ptr into stack frame of main prog at fp-16 */
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_9, 1),
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
BPF_JMP_IMM(BPF_JA, 0, 0, -30),
/* subprog 2 */
/* if arg2 == 1 do *arg1 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
/* if arg4 == 1 do *arg3 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, -8),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_8b = { 12, 22 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=8 off=2 size=8",
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: two calls that receive map_value_ptr_or_null via arg. test1",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* 1st lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_8, 1),
/* 2nd lookup from map */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_9, 1),
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
/* subprog 2 */
/* if arg2 == 1 do *arg1 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
/* if arg4 == 1 do *arg3 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_8b = { 12, 22 },
.result = ACCEPT,
},
{
"calls: two calls that receive map_value_ptr_or_null via arg. test2",
.insns = {
/* main prog */
/* pass fp-16, fp-8 into a function */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
/* 1st lookup from map */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_8, 1),
/* 2nd lookup from map */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_9, 1),
/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
/* subprog 2 */
/* if arg2 == 1 do *arg1 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
/* if arg4 == 0 do *arg3 = 0 */
BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
/* fetch map_value_ptr from the stack of this function */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_8b = { 12, 22 },
.result = REJECT,
.errstr = "R0 invalid mem access 'scalar'",
},
{
"calls: pkt_ptr spill into caller stack",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
/* now the pkt range is verified, read pkt_ptr from stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.retval = POINTER_VALUE,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 2",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
/* Marking is still kept, but not in all cases safe. */
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
/* now the pkt range is verified, read pkt_ptr from stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "invalid access to packet",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 3",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
/* Marking is still kept and safe here. */
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* now the pkt range is verified, read pkt_ptr from stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 4",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
/* Check marking propagated. */
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 5",
.insns = {
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
/* spill checked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "same insn cannot be used with different",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 6",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
/* spill checked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "R4 invalid mem access",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 7",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
/* spill checked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "R4 invalid mem access",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 8",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
/* spill checked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 9",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_5, 0),
/* spill unchecked pkt_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_5, 1),
/* don't read back pkt_ptr from stack here */
/* write 4 bytes into packet */
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "invalid access to packet",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: caller stack init to zero or map_value_or_null",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
/* fetch map_value_or_null or const_zero from stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
/* store into map_value */
BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
/* subprog 1 */
/* if (ctx == 0) return; */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
/* else bpf_map_lookup() and *(fp - 8) = r0 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 13 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"calls: stack init to zero and pruning",
.insns = {
/* first make allocated_stack 16 byte */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
/* now fork the execution such that the false branch
* of JGT insn will be verified second and it skisp zero
* init of fp-8 stack slot. If stack liveness marking
* is missing live_read marks from call map_lookup
* processing then pruning will incorrectly assume
* that fp-8 stack slot was unused in the fall-through
* branch and will accept the program incorrectly
*/
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 7 },
.errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"calls: ctx read at start of subprog",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"calls: cross frame pruning",
.insns = {
/* r8 = !!random();
* call pruner()
* if (r8)
* do something bad;
*/
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_8, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.errstr = "!read_ok",
.result = REJECT,
},
{
"calls: cross frame pruning - liveness propagation",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_8, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_IMM(BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_9, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.errstr = "!read_ok",
.result = REJECT,
},
/* Make sure that verifier.c:states_equal() considers IDs from all
* frames when building 'idmap' for check_ids().
*/
{
"calls: check_ids() across call boundary",
.insns = {
/* Function main() */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
/* fp[-24] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1,
0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -24),
/* fp[-32] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1,
0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -32),
/* call foo(&fp[-24], &fp[-32]) ; both arguments have IDs in the current
* ; stack frame
*/
BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -24),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
BPF_CALL_REL(2),
/* exit 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* Function foo()
*
* r9 = &frame[0].fp[-24] ; save arguments in the callee saved registers,
* r8 = &frame[0].fp[-32] ; arguments are pointers to pointers to map value
*/
BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_2),
/* r7 = ktime_get_ns() */
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
/* r6 = ktime_get_ns() */
BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
/* if r6 > r7 goto +1 ; no new information about the state is derived from
* ; this check, thus produced verifier states differ
* ; only in 'insn_idx'
* r9 = r8
*/
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_8),
/* r9 = *r9 ; verifier get's to this point via two paths:
* ; (I) one including r9 = r8, verified first;
* ; (II) one excluding r9 = r8, verified next.
* ; After load of *r9 to r9 the frame[0].fp[-24].id == r9.id.
* ; Suppose that checkpoint is created here via path (I).
* ; When verifying via (II) the r9.id must be compared against
* ; frame[0].fp[-24].id, otherwise (I) and (II) would be
* ; incorrectly deemed equivalent.
* if r9 == 0 goto <exit>
*/
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1),
/* r8 = *r8 ; read map value via r8, this is not safe
* r0 = *r8 ; because r8 might be not equal to r9.
*/
BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_8, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8, 0),
/* exit 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.flags = BPF_F_TEST_STATE_FREQ,
.fixup_map_hash_8b = { 3, 9 },
.result = REJECT,
.errstr = "R8 invalid mem access 'map_value_or_null'",
.result_unpriv = REJECT,
.errstr_unpriv = "",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
| linux-master | tools/testing/selftests/bpf/verifier/calls.c |
#define BPF_SOCK_ADDR_STORE(field, off, res, err, flgs) \
{ \
"wide store to bpf_sock_addr." #field "[" #off "]", \
.insns = { \
BPF_MOV64_IMM(BPF_REG_0, 1), \
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, \
offsetof(struct bpf_sock_addr, field[off])), \
BPF_EXIT_INSN(), \
}, \
.result = res, \
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, \
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, \
.errstr = err, \
.flags = flgs, \
}
/* user_ip6[0] is u64 aligned */
BPF_SOCK_ADDR_STORE(user_ip6, 0, ACCEPT,
NULL, 0),
BPF_SOCK_ADDR_STORE(user_ip6, 1, REJECT,
"invalid bpf_context access off=12 size=8",
F_NEEDS_EFFICIENT_UNALIGNED_ACCESS),
BPF_SOCK_ADDR_STORE(user_ip6, 2, ACCEPT,
NULL, 0),
BPF_SOCK_ADDR_STORE(user_ip6, 3, REJECT,
"invalid bpf_context access off=20 size=8",
F_NEEDS_EFFICIENT_UNALIGNED_ACCESS),
/* msg_src_ip6[0] is _not_ u64 aligned */
BPF_SOCK_ADDR_STORE(msg_src_ip6, 0, REJECT,
"invalid bpf_context access off=44 size=8",
F_NEEDS_EFFICIENT_UNALIGNED_ACCESS),
BPF_SOCK_ADDR_STORE(msg_src_ip6, 1, ACCEPT,
NULL, 0),
BPF_SOCK_ADDR_STORE(msg_src_ip6, 2, REJECT,
"invalid bpf_context access off=52 size=8",
F_NEEDS_EFFICIENT_UNALIGNED_ACCESS),
BPF_SOCK_ADDR_STORE(msg_src_ip6, 3, REJECT,
"invalid bpf_context access off=56 size=8", 0),
#undef BPF_SOCK_ADDR_STORE
#define BPF_SOCK_ADDR_LOAD(field, off, res, err, flgs) \
{ \
"wide load from bpf_sock_addr." #field "[" #off "]", \
.insns = { \
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, \
offsetof(struct bpf_sock_addr, field[off])), \
BPF_MOV64_IMM(BPF_REG_0, 1), \
BPF_EXIT_INSN(), \
}, \
.result = res, \
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, \
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, \
.errstr = err, \
.flags = flgs, \
}
/* user_ip6[0] is u64 aligned */
BPF_SOCK_ADDR_LOAD(user_ip6, 0, ACCEPT,
NULL, 0),
BPF_SOCK_ADDR_LOAD(user_ip6, 1, REJECT,
"invalid bpf_context access off=12 size=8",
F_NEEDS_EFFICIENT_UNALIGNED_ACCESS),
BPF_SOCK_ADDR_LOAD(user_ip6, 2, ACCEPT,
NULL, 0),
BPF_SOCK_ADDR_LOAD(user_ip6, 3, REJECT,
"invalid bpf_context access off=20 size=8",
F_NEEDS_EFFICIENT_UNALIGNED_ACCESS),
/* msg_src_ip6[0] is _not_ u64 aligned */
BPF_SOCK_ADDR_LOAD(msg_src_ip6, 0, REJECT,
"invalid bpf_context access off=44 size=8",
F_NEEDS_EFFICIENT_UNALIGNED_ACCESS),
BPF_SOCK_ADDR_LOAD(msg_src_ip6, 1, ACCEPT,
NULL, 0),
BPF_SOCK_ADDR_LOAD(msg_src_ip6, 2, REJECT,
"invalid bpf_context access off=52 size=8",
F_NEEDS_EFFICIENT_UNALIGNED_ACCESS),
BPF_SOCK_ADDR_LOAD(msg_src_ip6, 3, REJECT,
"invalid bpf_context access off=56 size=8", 0),
#undef BPF_SOCK_ADDR_LOAD
| linux-master | tools/testing/selftests/bpf/verifier/wide_access.c |
{
"jit: lsh, rsh, arsh by 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 0xff),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
{
"jit: lsh, rsh, arsh by reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_4, 1),
BPF_MOV64_IMM(BPF_REG_1, 0xff),
BPF_ALU64_REG(BPF_LSH, BPF_REG_1, BPF_REG_0),
BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_RSH, BPF_REG_1, BPF_REG_4),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_1),
BPF_ALU32_REG(BPF_RSH, BPF_REG_4, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 0xff, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ARSH, BPF_REG_4, BPF_REG_4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
{
"jit: mov32 for ldimm64, 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
{
"jit: mov32 for ldimm64, 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
{
"jit: various mul tests",
.insns = {
BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 0xefefef),
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0xefefef),
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
BPF_LD_IMM64(BPF_REG_2, 0x2ad4d4aaULL),
BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 0x2b),
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
BPF_LD_IMM64(BPF_REG_5, 0xeeff0d413122ULL),
BPF_ALU32_REG(BPF_MUL, BPF_REG_5, BPF_REG_1),
BPF_JMP_REG(BPF_JEQ, BPF_REG_5, BPF_REG_0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
{
"jit: various div tests",
.insns = {
BPF_LD_IMM64(BPF_REG_2, 0xefeffeULL),
BPF_LD_IMM64(BPF_REG_0, 0xeeff0d413122ULL),
BPF_LD_IMM64(BPF_REG_1, 0xfefeeeULL),
BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_3, 0xeeff0d413122ULL),
BPF_ALU64_IMM(BPF_DIV, BPF_REG_3, 0xfefeeeULL),
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_2, 0xaa93ULL),
BPF_ALU64_IMM(BPF_MOD, BPF_REG_1, 0xbeefULL),
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_1, 0xfefeeeULL),
BPF_LD_IMM64(BPF_REG_3, 0xbeefULL),
BPF_ALU64_REG(BPF_MOD, BPF_REG_1, BPF_REG_3),
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_2, 0x5ee1dULL),
BPF_LD_IMM64(BPF_REG_1, 0xfefeeeULL),
BPF_LD_IMM64(BPF_REG_3, 0x2bULL),
BPF_ALU32_REG(BPF_DIV, BPF_REG_1, BPF_REG_3),
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_ALU32_REG(BPF_DIV, BPF_REG_1, BPF_REG_1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
{
"jit: jsgt, jslt",
.insns = {
BPF_LD_IMM64(BPF_REG_1, 0x80000000ULL),
BPF_LD_IMM64(BPF_REG_2, 0x0ULL),
BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_JMP_REG(BPF_JSLT, BPF_REG_2, BPF_REG_1, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
{
"jit: torturous jumps, imm8 nop jmp and pure jump padding",
.insns = { },
.fill_helper = bpf_fill_torturous_jumps,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
},
{
"jit: torturous jumps, imm32 nop jmp and jmp_cond padding",
.insns = { },
.fill_helper = bpf_fill_torturous_jumps,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 2,
},
{
"jit: torturous jumps in subprog",
.insns = { },
.fill_helper = bpf_fill_torturous_jumps,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 3,
},
| linux-master | tools/testing/selftests/bpf/verifier/jit.c |
{
"ld_abs: check calling conv, r1",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr = "R1 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r2",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr = "R2 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r3",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.errstr = "R3 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r4",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
BPF_EXIT_INSN(),
},
.errstr = "R4 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r5",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.errstr = "R5 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r7",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_7, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"ld_abs: tests on r6 and skb data reload helper",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 42 /* ultimate return value */,
},
{
"ld_abs: invalid op 1",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LD_ABS(BPF_DW, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "unknown opcode",
},
{
"ld_abs: invalid op 2",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 256),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "unknown opcode",
},
{
"ld_abs: nmap reduced",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LD_ABS(BPF_H, 12),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
BPF_LD_ABS(BPF_H, 12),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
BPF_MOV32_IMM(BPF_REG_0, 18),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
BPF_LD_IND(BPF_W, BPF_REG_7, 14),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
BPF_MOV32_IMM(BPF_REG_0, 280971478),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
BPF_LD_ABS(BPF_H, 12),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
BPF_MOV32_IMM(BPF_REG_0, 22),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
BPF_LD_IND(BPF_H, BPF_REG_7, 14),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
BPF_MOV32_IMM(BPF_REG_0, 17366),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV32_IMM(BPF_REG_0, 256),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.data = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 256,
},
{
"ld_abs: div + abs, test 1",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
BPF_LD_ABS(BPF_B, 3),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
BPF_LD_ABS(BPF_B, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
BPF_LD_IND(BPF_B, BPF_REG_8, -70),
BPF_EXIT_INSN(),
},
.data = {
10, 20, 30, 40, 50,
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 10,
},
{
"ld_abs: div + abs, test 2",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
BPF_LD_ABS(BPF_B, 3),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
BPF_LD_ABS(BPF_B, 128),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
BPF_LD_IND(BPF_B, BPF_REG_8, -70),
BPF_EXIT_INSN(),
},
.data = {
10, 20, 30, 40, 50,
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 0,
},
{
"ld_abs: div + abs, test 3",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
BPF_LD_ABS(BPF_B, 3),
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
},
.data = {
10, 20, 30, 40, 50,
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 0,
},
{
"ld_abs: div + abs, test 4",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
BPF_LD_ABS(BPF_B, 256),
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
},
.data = {
10, 20, 30, 40, 50,
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 0,
},
{
"ld_abs: vlan + abs, test 1",
.insns = { },
.data = {
0x34,
},
.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 0xbef,
},
{
"ld_abs: vlan + abs, test 2",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_vlan_push),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_EXIT_INSN(),
},
.data = {
0x34,
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 42,
},
{
"ld_abs: jump around ld_abs",
.insns = { },
.data = {
10, 11,
},
.fill_helper = bpf_fill_jump_around_ld_abs,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 10,
},
| linux-master | tools/testing/selftests/bpf/verifier/ld_abs.c |
/* Common tests */
{
"map_kptr: BPF_ST imm != 0",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "BPF_ST imm must be 0 when storing to kptr at off=0",
},
{
"map_kptr: size != bpf_size_to_bytes(BPF_DW)",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "kptr access size must be BPF_DW",
},
{
"map_kptr: map_value non-const var_off",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "kptr access cannot have variable offset",
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"map_kptr: bpf_kptr_xchg non-const var_off",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "R1 doesn't have constant offset. kptr has to be at the constant offset",
},
{
"map_kptr: unaligned boundary load/store",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 7),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "kptr access misaligned expected=0 off=7",
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"map_kptr: reject var_off != 0",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "variable untrusted_ptr_ access var_off=(0x0; 0x7) disallowed",
},
/* Tests for unreferened PTR_TO_BTF_ID */
{
"map_kptr: unref: reject btf_struct_ids_match == false",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc expected=ptr_prog_test",
},
{
"map_kptr: unref: loaded pointer marked as untrusted",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "R0 invalid mem access 'untrusted_ptr_or_null_'",
},
{
"map_kptr: unref: correct in kernel type size",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "access beyond struct prog_test_ref_kfunc at off 32 size 8",
},
{
"map_kptr: unref: inherit PTR_UNTRUSTED on struct walk",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 16),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_this_cpu_ptr),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "R1 type=untrusted_ptr_ expected=percpu_ptr_",
},
{
"map_kptr: unref: no reference state created",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = ACCEPT,
},
{
"map_kptr: unref: bpf_kptr_xchg rejected",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "off=0 kptr isn't referenced kptr",
},
/* Tests for referenced PTR_TO_BTF_ID */
{
"map_kptr: ref: loaded pointer marked as untrusted",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_this_cpu_ptr),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "R1 type=rcu_ptr_or_null_ expected=percpu_ptr_",
},
{
"map_kptr: ref: reject off != 0",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member",
},
{
"map_kptr: ref: reference state created and released on xchg",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "Unreleased reference id=5 alloc_insn=20",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 15 },
}
},
{
"map_kptr: ref: reject STX",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "store to referenced kptr disallowed",
},
{
"map_kptr: ref: reject ST",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 8, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "store to referenced kptr disallowed",
},
{
"map_kptr: reject helper access to kptr",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
.errstr = "kptr cannot be accessed indirectly by helper",
},
| linux-master | tools/testing/selftests/bpf/verifier/map_kptr.c |
{
"check bpf_perf_event_data->sample_period byte load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period) + 7),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"check bpf_perf_event_data->sample_period half load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period) + 6),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"check bpf_perf_event_data->sample_period word load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
#else
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period) + 4),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"check bpf_perf_event_data->sample_period dword load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
| linux-master | tools/testing/selftests/bpf/verifier/perf_event_sample_period.c |
{
"access skb fields ok",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, queue_mapping)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, protocol)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, vlan_present)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, vlan_tci)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, napi_id)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"access skb fields bad1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"access skb fields bad2",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 4 },
.errstr = "different pointers",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"access skb fields bad3",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -12),
},
.fixup_map_hash_8b = { 6 },
.errstr = "different pointers",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"access skb fields bad4",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -13),
},
.fixup_map_hash_8b = { 7 },
.errstr = "different pointers",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"invalid access __sk_buff family",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, family)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"invalid access __sk_buff remote_ip4",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip4)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"invalid access __sk_buff local_ip4",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip4)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"invalid access __sk_buff remote_ip6",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"invalid access __sk_buff local_ip6",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"invalid access __sk_buff remote_port",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_port)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"invalid access __sk_buff remote_port",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_port)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"valid access __sk_buff family",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, family)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access __sk_buff remote_ip4",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip4)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access __sk_buff local_ip4",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip4)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access __sk_buff remote_ip6",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6[3])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access __sk_buff local_ip6",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6[3])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access __sk_buff remote_port",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_port)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access __sk_buff remote_port",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_port)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"invalid access of tc_classid for SK_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
.errstr = "invalid bpf_context access",
},
{
"invalid access of skb->mark for SK_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
.errstr = "invalid bpf_context access",
},
{
"check skb->mark is not writeable by SK_SKB",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
.errstr = "invalid bpf_context access",
},
{
"check skb->tc_index is writeable by SK_SKB",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tc_index)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"check skb->priority is writeable by SK_SKB",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, priority)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"direct packet read for SK_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"direct packet write for SK_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"overlapping checks for direct packet access SK_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"check skb->mark is not writeable by sockets",
.insns = {
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.errstr_unpriv = "R1 leaks addr",
.result = REJECT,
},
{
"check skb->tc_index is not writeable by sockets",
.insns = {
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.errstr_unpriv = "R1 leaks addr",
.result = REJECT,
},
{
"check cb access: byte",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 3),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"__sk_buff->hash, offset 0, byte store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, hash)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"__sk_buff->tc_index, offset 3, byte store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tc_index) + 3),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check skb->hash byte load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#else
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check skb->hash byte load permitted 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check skb->hash byte load permitted 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check skb->hash byte load permitted 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3),
#else
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: byte, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check cb access: half",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: half, unaligned",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 1),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check __sk_buff->hash, offset 0, half store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, hash)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check __sk_buff->tc_index, offset 2, half store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tc_index) + 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check skb->hash half load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check skb->hash half load permitted 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check skb->hash half load not permitted, unaligned 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 1),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3),
#endif
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"check skb->hash half load not permitted, unaligned 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 1),
#endif
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"check cb access: half, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check cb access: word",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: word, unaligned 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check cb access: word, unaligned 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 1),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check cb access: word, unaligned 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check cb access: word, unaligned 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 3),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check cb access: double",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: double, unaligned 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check cb access: double, unaligned 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check cb access: double, oob 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, oob 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check __sk_buff->ifindex dw store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, ifindex)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check __sk_buff->ifindex dw load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, ifindex)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check out of range skb->cb access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 256),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.errstr_unpriv = "",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_ACT,
},
{
"write skb fields from socket prog",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.errstr_unpriv = "R1 leaks addr",
.result_unpriv = REJECT,
},
{
"write skb fields from tc_cls_act prog",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tc_index)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tstamp)),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tstamp)),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "",
.result_unpriv = REJECT,
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"check skb->data half load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data) + 2),
#endif
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
},
{
"read gso_segs from CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, gso_segs)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"read gso_segs from CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, gso_segs)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"write gso_segs from CGROUP_SKB",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, gso_segs)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.result_unpriv = REJECT,
.errstr = "invalid bpf_context access off=164 size=4",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"read gso_segs from CLS",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, gso_segs)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"read gso_size from CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, gso_size)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"read gso_size from CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, gso_size)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"write gso_size from CGROUP_SKB",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, gso_size)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.result_unpriv = REJECT,
.errstr = "invalid bpf_context access off=176 size=4",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"read gso_size from CLS",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, gso_size)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"padding after gso_size is not accessible",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetofend(struct __sk_buff, gso_size)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.result_unpriv = REJECT,
.errstr = "invalid bpf_context access off=180 size=4",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"read hwtstamp from CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hwtstamp)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"read hwtstamp from CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, hwtstamp)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"write hwtstamp from CGROUP_SKB",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, hwtstamp)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.result_unpriv = REJECT,
.errstr = "invalid bpf_context access off=184 size=8",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"read hwtstamp from CLS",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hwtstamp)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"check wire_len is not readable by sockets",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, wire_len)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check wire_len is readable by tc classifier",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, wire_len)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"check wire_len is not writable by tc classifier",
.insns = {
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, wire_len)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "invalid bpf_context access",
.errstr_unpriv = "R1 leaks addr",
.result = REJECT,
},
{
"pkt > pkt_end taken check",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, // 0. r2 = *(u32 *)(r1 + data_end)
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, // 1. r4 = *(u32 *)(r1 + data)
offsetof(struct __sk_buff, data)),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_4), // 2. r3 = r4
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42), // 3. r3 += 42
BPF_MOV64_IMM(BPF_REG_1, 0), // 4. r1 = 0
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2), // 5. if r3 > r2 goto 8
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14), // 6. r4 += 14
BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), // 7. r1 = r4
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1), // 8. if r3 > r2 goto 10
BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9), // 9. r2 = *(u8 *)(r1 + 9)
BPF_MOV64_IMM(BPF_REG_0, 0), // 10. r0 = 0
BPF_EXIT_INSN(), // 11. exit
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"pkt_end < pkt taken check",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, // 0. r2 = *(u32 *)(r1 + data_end)
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, // 1. r4 = *(u32 *)(r1 + data)
offsetof(struct __sk_buff, data)),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_4), // 2. r3 = r4
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42), // 3. r3 += 42
BPF_MOV64_IMM(BPF_REG_1, 0), // 4. r1 = 0
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2), // 5. if r3 > r2 goto 8
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14), // 6. r4 += 14
BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), // 7. r1 = r4
BPF_JMP_REG(BPF_JLT, BPF_REG_2, BPF_REG_3, 1), // 8. if r2 < r3 goto 10
BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9), // 9. r2 = *(u8 *)(r1 + 9)
BPF_MOV64_IMM(BPF_REG_0, 0), // 10. r0 = 0
BPF_EXIT_INSN(), // 11. exit
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
| linux-master | tools/testing/selftests/bpf/verifier/ctx_skb.c |
{
"empty prog",
.insns = {
},
.errstr = "last insn is not an exit or jmp",
.result = REJECT,
},
{
"only exit insn",
.insns = {
BPF_EXIT_INSN(),
},
.errstr = "R0 !read_ok",
.result = REJECT,
},
{
"no bpf_exit",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
},
.errstr = "not an exit",
.result = REJECT,
},
| linux-master | tools/testing/selftests/bpf/verifier/basic.c |
{
"atomic compare-and-exchange smoketest - 64bit",
.insns = {
/* val = 3; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
/* old = atomic_cmpxchg(&val, 2, 4); */
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
/* if (old != 3) exit(2); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
/* if (val != 3) exit(3); */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* old = atomic_cmpxchg(&val, 3, 4); */
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
/* if (old != 3) exit(4); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
BPF_MOV64_IMM(BPF_REG_0, 4),
BPF_EXIT_INSN(),
/* if (val != 4) exit(5); */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
BPF_MOV64_IMM(BPF_REG_0, 5),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"atomic compare-and-exchange smoketest - 32bit",
.insns = {
/* val = 3; */
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 3),
/* old = atomic_cmpxchg(&val, 2, 4); */
BPF_MOV32_IMM(BPF_REG_1, 4),
BPF_MOV32_IMM(BPF_REG_0, 2),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -4),
/* if (old != 3) exit(2); */
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
BPF_MOV32_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
/* if (val != 3) exit(3); */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
BPF_MOV32_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* old = atomic_cmpxchg(&val, 3, 4); */
BPF_MOV32_IMM(BPF_REG_1, 4),
BPF_MOV32_IMM(BPF_REG_0, 3),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -4),
/* if (old != 3) exit(4); */
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
BPF_MOV32_IMM(BPF_REG_0, 4),
BPF_EXIT_INSN(),
/* if (val != 4) exit(5); */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
BPF_MOV32_IMM(BPF_REG_0, 5),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"Can't use cmpxchg on uninit src reg",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "!read_ok",
},
{
"Can't use cmpxchg on uninit memory",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid read from stack",
},
{
"BPF_W cmpxchg should zero top 32 bits",
.insns = {
/* r0 = U64_MAX; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
/* u64 val = r0; */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
/* r0 = (u32)atomic_cmpxchg((u32 *)&val, r0, 1); */
BPF_MOV32_IMM(BPF_REG_1, 1),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
/* r1 = 0x00000000FFFFFFFFull; */
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
/* if (r0 != r1) exit(1); */
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 2),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"Dest pointer in r0 - fail",
.insns = {
/* val = 0; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
/* r0 = &val */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
/* r0 = atomic_cmpxchg(&val, r0, 1); */
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
/* if (r0 != 0) exit(1); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr into mem",
},
{
"Dest pointer in r0 - succeed",
.insns = {
/* r0 = &val */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
/* val = r0; */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
/* r0 = atomic_cmpxchg(&val, r0, 0); */
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
/* r1 = *r0 */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr into mem",
},
{
"Dest pointer in r0 - succeed, check 2",
.insns = {
/* r0 = &val */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
/* val = r0; */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
/* r5 = &val */
BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
/* r0 = atomic_cmpxchg(&val, r0, r5); */
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
/* r1 = *r0 */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr into mem",
},
{
"Dest pointer in r0 - succeed, check 3",
.insns = {
/* r0 = &val */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
/* val = r0; */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
/* r5 = &val */
BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
/* r0 = atomic_cmpxchg(&val, r0, r5); */
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid size of register fill",
.errstr_unpriv = "R0 leaks addr into mem",
},
{
"Dest pointer in r0 - succeed, check 4",
.insns = {
/* r0 = &val */
BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
/* val = r0; */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
/* r5 = &val */
BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
/* r0 = atomic_cmpxchg(&val, r0, r5); */
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
/* r1 = *r10 */
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -8),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R10 partial copy of pointer",
},
{
"Dest pointer in r0 - succeed, check 5",
.insns = {
/* r0 = &val */
BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
/* val = r0; */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
/* r5 = &val */
BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
/* r0 = atomic_cmpxchg(&val, r0, r5); */
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
/* r1 = *r0 */
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -8),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R0 invalid mem access",
.errstr_unpriv = "R10 partial copy of pointer",
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
| linux-master | tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c |
{
"atomic exchange smoketest - 64bit",
.insns = {
/* val = 3; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
/* old = atomic_xchg(&val, 4); */
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_10, BPF_REG_1, -8),
/* if (old != 3) exit(1); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* if (val != 4) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"atomic exchange smoketest - 32bit",
.insns = {
/* val = 3; */
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 3),
/* old = atomic_xchg(&val, 4); */
BPF_MOV32_IMM(BPF_REG_1, 4),
BPF_ATOMIC_OP(BPF_W, BPF_XCHG, BPF_REG_10, BPF_REG_1, -4),
/* if (old != 3) exit(1); */
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* if (val != 4) exit(2); */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
BPF_MOV32_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
| linux-master | tools/testing/selftests/bpf/verifier/atomic_xchg.c |
{
"BPF_ATOMIC_AND without fetch",
.insns = {
/* val = 0x110; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
/* atomic_and(&val, 0x011); */
BPF_MOV64_IMM(BPF_REG_1, 0x011),
BPF_ATOMIC_OP(BPF_DW, BPF_AND, BPF_REG_10, BPF_REG_1, -8),
/* if (val != 0x010) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x010, 2),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
/* r1 should not be clobbered, no BPF_FETCH flag */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x011, 1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"BPF_ATOMIC_AND with fetch",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 123),
/* val = 0x110; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
/* old = atomic_fetch_and(&val, 0x011); */
BPF_MOV64_IMM(BPF_REG_1, 0x011),
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
/* if (old != 0x110) exit(3); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* if (val != 0x010) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_EXIT_INSN(),
/* Check R0 wasn't clobbered (for fear of x86 JIT bug) */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 123, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"BPF_ATOMIC_AND with fetch 32bit",
.insns = {
/* r0 = (s64) -1 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
/* val = 0x110; */
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x110),
/* old = atomic_fetch_and(&val, 0x011); */
BPF_MOV32_IMM(BPF_REG_1, 0x011),
BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
/* if (old != 0x110) exit(3); */
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
BPF_MOV32_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* if (val != 0x010) exit(2); */
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
BPF_MOV32_IMM(BPF_REG_1, 2),
BPF_EXIT_INSN(),
/* Check R0 wasn't clobbered (for fear of x86 JIT bug)
* It should be -1 so add 1 to get exit code.
*/
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"BPF_ATOMIC_AND with fetch - r0 as source reg",
.insns = {
/* val = 0x110; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
/* old = atomic_fetch_and(&val, 0x011); */
BPF_MOV64_IMM(BPF_REG_0, 0x011),
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_0, -8),
/* if (old != 0x110) exit(3); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x110, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* if (val != 0x010) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
| linux-master | tools/testing/selftests/bpf/verifier/atomic_and.c |
{
"precise: test 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), /* map_value_ptr -= map_value_ptr */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.fixup_map_array_48b = { 1 },
.result = VERBOSE_ACCEPT,
.errstr =
"mark_precise: frame0: last_idx 26 first_idx 20\
mark_precise: frame0: regs=r2 stack= before 25\
mark_precise: frame0: regs=r2 stack= before 24\
mark_precise: frame0: regs=r2 stack= before 23\
mark_precise: frame0: regs=r2 stack= before 22\
mark_precise: frame0: regs=r2 stack= before 20\
mark_precise: frame0: parent state regs=r2 stack=:\
mark_precise: frame0: last_idx 19 first_idx 10\
mark_precise: frame0: regs=r2,r9 stack= before 19\
mark_precise: frame0: regs=r9 stack= before 18\
mark_precise: frame0: regs=r8,r9 stack= before 17\
mark_precise: frame0: regs=r0,r9 stack= before 15\
mark_precise: frame0: regs=r0,r9 stack= before 14\
mark_precise: frame0: regs=r9 stack= before 13\
mark_precise: frame0: regs=r9 stack= before 12\
mark_precise: frame0: regs=r9 stack= before 11\
mark_precise: frame0: regs=r9 stack= before 10\
mark_precise: frame0: parent state regs= stack=:",
},
{
"precise: test 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), /* map_value_ptr -= map_value_ptr */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.fixup_map_array_48b = { 1 },
.result = VERBOSE_ACCEPT,
.flags = BPF_F_TEST_STATE_FREQ,
.errstr =
"26: (85) call bpf_probe_read_kernel#113\
mark_precise: frame0: last_idx 26 first_idx 22\
mark_precise: frame0: regs=r2 stack= before 25\
mark_precise: frame0: regs=r2 stack= before 24\
mark_precise: frame0: regs=r2 stack= before 23\
mark_precise: frame0: regs=r2 stack= before 22\
mark_precise: frame0: parent state regs=r2 stack=:\
mark_precise: frame0: last_idx 20 first_idx 20\
mark_precise: frame0: regs=r2,r9 stack= before 20\
mark_precise: frame0: parent state regs=r2,r9 stack=:\
mark_precise: frame0: last_idx 19 first_idx 17\
mark_precise: frame0: regs=r2,r9 stack= before 19\
mark_precise: frame0: regs=r9 stack= before 18\
mark_precise: frame0: regs=r8,r9 stack= before 17\
mark_precise: frame0: parent state regs= stack=:",
},
{
"precise: cross frame pruning",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_8, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_IMM(BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_9, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.flags = BPF_F_TEST_STATE_FREQ,
.errstr = "!read_ok",
.result = REJECT,
},
{
"precise: ST insn causing spi > allocated_stack",
.insns = {
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_3, -8, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_MOV64_IMM(BPF_REG_0, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.flags = BPF_F_TEST_STATE_FREQ,
.errstr = "mark_precise: frame0: last_idx 5 first_idx 5\
mark_precise: frame0: parent state regs=r4 stack=:\
mark_precise: frame0: last_idx 4 first_idx 2\
mark_precise: frame0: regs=r4 stack= before 4\
mark_precise: frame0: regs=r4 stack= before 3\
mark_precise: frame0: regs= stack=-8 before 2\
mark_precise: frame0: falling back to forcing all scalars precise\
force_precise: frame0: forcing r0 to be precise\
mark_precise: frame0: last_idx 5 first_idx 5\
mark_precise: frame0: parent state regs= stack=:",
.result = VERBOSE_ACCEPT,
.retval = -1,
},
{
"precise: STX insn causing spi > allocated_stack",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_MOV64_IMM(BPF_REG_0, -1),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.flags = BPF_F_TEST_STATE_FREQ,
.errstr = "mark_precise: frame0: last_idx 6 first_idx 6\
mark_precise: frame0: parent state regs=r4 stack=:\
mark_precise: frame0: last_idx 5 first_idx 3\
mark_precise: frame0: regs=r4 stack= before 5\
mark_precise: frame0: regs=r4 stack= before 4\
mark_precise: frame0: regs= stack=-8 before 3\
mark_precise: frame0: falling back to forcing all scalars precise\
force_precise: frame0: forcing r0 to be precise\
force_precise: frame0: forcing r0 to be precise\
force_precise: frame0: forcing r0 to be precise\
force_precise: frame0: forcing r0 to be precise\
mark_precise: frame0: last_idx 6 first_idx 6\
mark_precise: frame0: parent state regs= stack=:",
.result = VERBOSE_ACCEPT,
.retval = -1,
},
{
"precise: mark_chain_precision for ARG_CONST_ALLOC_SIZE_OR_ZERO",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, ingress_ifindex)),
BPF_LD_MAP_FD(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 0, 1),
BPF_MOV64_IMM(BPF_REG_2, 0x1000),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 42),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_ringbuf = { 1 },
.prog_type = BPF_PROG_TYPE_XDP,
.flags = BPF_F_TEST_STATE_FREQ | F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
.errstr = "invalid access to memory, mem_size=1 off=42 size=8",
.result = REJECT,
},
{
"precise: program doesn't prematurely prune branches",
.insns = {
BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0x400),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_8, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0x80000000),
BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 0x401),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 2),
BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 1),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0),
BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 1),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
BPF_LD_MAP_FD(BPF_REG_4, 0),
BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 10),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_6, 8192),
BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_3, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 13 },
.prog_type = BPF_PROG_TYPE_XDP,
.result = REJECT,
.errstr = "register with unbounded min value is not allowed",
},
| linux-master | tools/testing/selftests/bpf/verifier/precise.c |
{
"jump test 1",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 2",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 14),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 11),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 5),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 3",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 19),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_JMP_IMM(BPF_JA, 0, 0, 15),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
BPF_JMP_IMM(BPF_JA, 0, 0, 11),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
BPF_JMP_IMM(BPF_JA, 0, 0, 7),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
BPF_JMP_IMM(BPF_JA, 0, 0, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 24 },
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = -ENOENT,
},
{
"jump test 4",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 5",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 6",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_1, 16),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, -20),
},
.result = ACCEPT,
.retval = 2,
},
{
"jump test 7",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_JMP_IMM(BPF_JA, 0, 0, -20),
},
.result = ACCEPT,
.retval = 3,
},
{
"jump test 8",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_1, 16),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_JMP_IMM(BPF_JA, 0, 0, -20),
},
.result = ACCEPT,
.retval = 3,
},
{
"jump/call test 9",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -20),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "jump out of range from insn 1 to 4",
},
{
"jump/call test 10",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -20),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "last insn is not an exit or jmp",
},
{
"jump/call test 11",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 26),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -31),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 3,
},
{
"jump & dead code elimination",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 32767),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_3, 0, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0x8000, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -32767),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 0, 1),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 2,
},
| linux-master | tools/testing/selftests/bpf/verifier/jump.c |
{
"invalid src register in STX",
.insns = {
BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "R15 is invalid",
.result = REJECT,
},
{
"invalid dst register in STX",
.insns = {
BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
BPF_EXIT_INSN(),
},
.errstr = "R14 is invalid",
.result = REJECT,
},
{
"invalid dst register in ST",
.insns = {
BPF_ST_MEM(BPF_B, 14, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "R14 is invalid",
.result = REJECT,
},
{
"invalid src register in LDX",
.insns = {
BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
BPF_EXIT_INSN(),
},
.errstr = "R12 is invalid",
.result = REJECT,
},
{
"invalid dst register in LDX",
.insns = {
BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.errstr = "R11 is invalid",
.result = REJECT,
},
| linux-master | tools/testing/selftests/bpf/verifier/basic_stx_ldx.c |
{
"valid 1,2,4,8-byte reads from bpf_sk_lookup",
.insns = {
/* 1-byte read from family field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family) + 3),
/* 2-byte read from family field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family) + 2),
/* 4-byte read from family field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family)),
/* 1-byte read from protocol field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol) + 3),
/* 2-byte read from protocol field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol) + 2),
/* 4-byte read from protocol field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol)),
/* 1-byte read from remote_ip4 field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4) + 3),
/* 2-byte read from remote_ip4 field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4) + 2),
/* 4-byte read from remote_ip4 field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4)),
/* 1-byte read from remote_ip6 field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 4),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 5),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 6),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 7),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 8),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 9),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 10),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 11),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 12),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 13),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 14),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 15),
/* 2-byte read from remote_ip6 field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 4),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 6),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 8),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 10),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 12),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 14),
/* 4-byte read from remote_ip6 field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 4),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 12),
/* 1-byte read from remote_port field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port) + 3),
/* 2-byte read from remote_port field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port) + 2),
/* 4-byte read from remote_port field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port)),
/* 1-byte read from local_ip4 field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4) + 3),
/* 2-byte read from local_ip4 field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4) + 2),
/* 4-byte read from local_ip4 field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4)),
/* 1-byte read from local_ip6 field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 4),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 5),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 6),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 7),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 8),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 9),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 10),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 11),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 12),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 13),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 14),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 15),
/* 2-byte read from local_ip6 field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 4),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 6),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 8),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 10),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 12),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 14),
/* 4-byte read from local_ip6 field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 4),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 12),
/* 1-byte read from local_port field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port) + 3),
/* 2-byte read from local_port field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port) + 2),
/* 4-byte read from local_port field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port)),
/* 1-byte read from ingress_ifindex field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex) + 3),
/* 2-byte read from ingress_ifindex field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex) + 2),
/* 4-byte read from ingress_ifindex field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex)),
/* 8-byte read from sk field */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, sk)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.runs = -1,
},
/* invalid 8-byte reads from a 4-byte fields in bpf_sk_lookup */
{
"invalid 8-byte read from bpf_sk_lookup family field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 8-byte read from bpf_sk_lookup protocol field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid 8-byte read from bpf_sk_lookup remote_ip4 field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 8-byte read from bpf_sk_lookup remote_ip6 field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid 8-byte read from bpf_sk_lookup remote_port field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid 8-byte read from bpf_sk_lookup local_ip4 field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 8-byte read from bpf_sk_lookup local_ip6 field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid 8-byte read from bpf_sk_lookup local_port field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid 8-byte read from bpf_sk_lookup ingress_ifindex field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
/* invalid 1,2,4-byte reads from 8-byte fields in bpf_sk_lookup */
{
"invalid 4-byte read from bpf_sk_lookup sk field",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, sk)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 2-byte read from bpf_sk_lookup sk field",
.insns = {
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, sk)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 1-byte read from bpf_sk_lookup sk field",
.insns = {
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, sk)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
/* out of bounds and unaligned reads from bpf_sk_lookup */
{
"invalid 4-byte read past end of bpf_sk_lookup",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
sizeof(struct bpf_sk_lookup)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 4-byte unaligned read from bpf_sk_lookup at odd offset",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid 4-byte unaligned read from bpf_sk_lookup at even offset",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 2),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
/* in-bound and out-of-bound writes to bpf_sk_lookup */
{
"invalid 8-byte write to bpf_sk_lookup",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 4-byte write to bpf_sk_lookup",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 2-byte write to bpf_sk_lookup",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 1-byte write to bpf_sk_lookup",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 4-byte write past end of bpf_sk_lookup",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
sizeof(struct bpf_sk_lookup)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
| linux-master | tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c |
{
"junk insn",
.insns = {
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "unknown opcode 00",
.result = REJECT,
},
{
"junk insn2",
.insns = {
BPF_RAW_INSN(1, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_LDX uses reserved fields",
.result = REJECT,
},
{
"junk insn3",
.insns = {
BPF_RAW_INSN(-1, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "unknown opcode ff",
.result = REJECT,
},
{
"junk insn4",
.insns = {
BPF_RAW_INSN(-1, -1, -1, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "unknown opcode ff",
.result = REJECT,
},
{
"junk insn5",
.insns = {
BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "BPF_ALU uses reserved fields",
.result = REJECT,
},
| linux-master | tools/testing/selftests/bpf/verifier/junk_insn.c |
#define __INVALID_ATOMIC_ACCESS_TEST(op) \
{ \
"atomic " #op " access through non-pointer ", \
.insns = { \
BPF_MOV64_IMM(BPF_REG_0, 1), \
BPF_MOV64_IMM(BPF_REG_1, 0), \
BPF_ATOMIC_OP(BPF_DW, op, BPF_REG_1, BPF_REG_0, -8), \
BPF_MOV64_IMM(BPF_REG_0, 0), \
BPF_EXIT_INSN(), \
}, \
.result = REJECT, \
.errstr = "R1 invalid mem access 'scalar'" \
}
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD),
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD),
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_AND),
__INVALID_ATOMIC_ACCESS_TEST(BPF_AND | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_OR),
__INVALID_ATOMIC_ACCESS_TEST(BPF_OR | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR),
__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_XCHG),
__INVALID_ATOMIC_ACCESS_TEST(BPF_CMPXCHG),
| linux-master | tools/testing/selftests/bpf/verifier/atomic_invalid.c |
/* instructions used to output a skb based software event, produced
* from code snippet:
* struct TMP {
* uint64_t tmp;
* } tt;
* tt.tmp = 5;
* bpf_perf_event_output(skb, &connection_tracking_event_map, 0,
* &tt, sizeof(tt));
* return 1;
*
* the bpf assembly from llvm is:
* 0: b7 02 00 00 05 00 00 00 r2 = 5
* 1: 7b 2a f8 ff 00 00 00 00 *(u64 *)(r10 - 8) = r2
* 2: bf a4 00 00 00 00 00 00 r4 = r10
* 3: 07 04 00 00 f8 ff ff ff r4 += -8
* 4: 18 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r2 = 0ll
* 6: b7 03 00 00 00 00 00 00 r3 = 0
* 7: b7 05 00 00 08 00 00 00 r5 = 8
* 8: 85 00 00 00 19 00 00 00 call 25
* 9: b7 00 00 00 01 00 00 00 r0 = 1
* 10: 95 00 00 00 00 00 00 00 exit
*
* The reason I put the code here instead of fill_helpers is that map fixup
* is against the insns, instead of filled prog.
*/
#define __PERF_EVENT_INSNS__ \
BPF_MOV64_IMM(BPF_REG_2, 5), \
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), \
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), \
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), \
BPF_LD_MAP_FD(BPF_REG_2, 0), \
BPF_MOV64_IMM(BPF_REG_3, 0), \
BPF_MOV64_IMM(BPF_REG_5, 8), \
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
BPF_FUNC_perf_event_output), \
BPF_MOV64_IMM(BPF_REG_0, 1), \
BPF_EXIT_INSN(),
{
"perfevent for sockops",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_SOCK_OPS,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},
{
"perfevent for tc",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},
{
"perfevent for lwt out",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_LWT_OUT,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},
{
"perfevent for xdp",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_XDP,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},
{
"perfevent for socket filter",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},
{
"perfevent for sk_skb",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_SK_SKB,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},
{
"perfevent for cgroup skb",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},
{
"perfevent for cgroup dev",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_CGROUP_DEVICE,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},
{
"perfevent for cgroup sysctl",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},
{
"perfevent for cgroup sockopt",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},
| linux-master | tools/testing/selftests/bpf/verifier/event_output.c |
{
"scale: scale test 1",
.insns = { },
.data = { },
.fill_helper = bpf_fill_scale,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
},
{
"scale: scale test 2",
.insns = { },
.data = { },
.fill_helper = bpf_fill_scale,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 2,
},
| linux-master | tools/testing/selftests/bpf/verifier/scale.c |
{
"dead code: start",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R9 !read_ok",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
{
"dead code: mid 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: mid 2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 4),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 1,
},
{
"dead code: end 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: end 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: end 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_JMP_IMM(BPF_JA, 0, 0, -5),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: tail of main + func",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
{
"dead code: tail of main + two functions",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
{
"dead code: function in the middle and mid of another func",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 7),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 7, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
{
"dead code: middle of main before call",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 2, 1),
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
{
"dead code: start of a function",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
{
"dead code: zero extension",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
| linux-master | tools/testing/selftests/bpf/verifier/dead_code.c |
{
"jset32: BPF_K",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
/* reg, high bits shouldn't be tested */
BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, -2, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_EXIT_INSN(),
BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 1, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 0,
.data64 = { 1ULL << 63, }
},
{ .retval = 2,
.data64 = { 1, }
},
{ .retval = 2,
.data64 = { 1ULL << 63 | 1, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jset32: BPF_X",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000),
BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 0,
.data64 = { 1ULL << 63, }
},
{ .retval = 2,
.data64 = { 1, }
},
{ .retval = 2,
.data64 = { 1ULL << 63 | 1, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jset32: ignores upper bits",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_7, 0x8000000000000000),
BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000),
BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
{
"jset32: min/max deduction",
.insns = {
BPF_RAND_UEXT_R7,
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 0x10, 1),
BPF_EXIT_INSN(),
BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x10, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R9 !read_ok",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jeq32: BPF_K",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, -1, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 2,
.retvals = {
{ .retval = 0,
.data64 = { -2, }
},
{ .retval = 2,
.data64 = { -1, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jeq32: BPF_X",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_LD_IMM64(BPF_REG_8, 0x7000000000000001),
BPF_JMP32_REG(BPF_JEQ, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 0,
.data64 = { 2, }
},
{ .retval = 2,
.data64 = { 1, }
},
{ .retval = 2,
.data64 = { 1ULL << 63 | 1, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jeq32: min/max deduction",
.insns = {
BPF_RAND_UEXT_R7,
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, 0x10, 1),
BPF_EXIT_INSN(),
BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, 0xf, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R9 !read_ok",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jne32: BPF_K",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, -1, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 2,
.retvals = {
{ .retval = 2,
.data64 = { 1, }
},
{ .retval = 0,
.data64 = { -1, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jne32: BPF_X",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
BPF_JMP32_REG(BPF_JNE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 0,
.data64 = { 1, }
},
{ .retval = 2,
.data64 = { 2, }
},
{ .retval = 2,
.data64 = { 1ULL << 63 | 2, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jne32: min/max deduction",
.insns = {
BPF_RAND_UEXT_R7,
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, 0x10, 1),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x10, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R9 !read_ok",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jge32: BPF_K",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, UINT_MAX - 1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 2,
.data64 = { UINT_MAX, }
},
{ .retval = 2,
.data64 = { UINT_MAX - 1, }
},
{ .retval = 0,
.data64 = { 0, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jge32: BPF_X",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LD_IMM64(BPF_REG_8, UINT_MAX | 1ULL << 32),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 2,
.data64 = { UINT_MAX, }
},
{ .retval = 0,
.data64 = { INT_MAX, }
},
{ .retval = 0,
.data64 = { (UINT_MAX - 1) | 2ULL << 32, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jge32: min/max deduction",
.insns = {
BPF_RAND_UEXT_R7,
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x7ffffff0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jgt32: BPF_K",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_IMM(BPF_JGT, BPF_REG_7, UINT_MAX - 1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 2,
.data64 = { UINT_MAX, }
},
{ .retval = 0,
.data64 = { UINT_MAX - 1, }
},
{ .retval = 0,
.data64 = { 0, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jgt32: BPF_X",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LD_IMM64(BPF_REG_8, (UINT_MAX - 1) | 1ULL << 32),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 2,
.data64 = { UINT_MAX, }
},
{ .retval = 0,
.data64 = { UINT_MAX - 1, }
},
{ .retval = 0,
.data64 = { (UINT_MAX - 1) | 2ULL << 32, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jgt32: min/max deduction",
.insns = {
BPF_RAND_UEXT_R7,
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 0x7ffffff0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jle32: BPF_K",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, INT_MAX, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 2,
.data64 = { INT_MAX - 1, }
},
{ .retval = 0,
.data64 = { UINT_MAX, }
},
{ .retval = 2,
.data64 = { INT_MAX, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jle32: BPF_X",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LD_IMM64(BPF_REG_8, (INT_MAX - 1) | 2ULL << 32),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 0,
.data64 = { INT_MAX | 1ULL << 32, }
},
{ .retval = 2,
.data64 = { INT_MAX - 2, }
},
{ .retval = 0,
.data64 = { UINT_MAX, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jle32: min/max deduction",
.insns = {
BPF_RAND_UEXT_R7,
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, 0x7ffffff0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jlt32: BPF_K",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_IMM(BPF_JLT, BPF_REG_7, INT_MAX, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 0,
.data64 = { INT_MAX, }
},
{ .retval = 0,
.data64 = { UINT_MAX, }
},
{ .retval = 2,
.data64 = { INT_MAX - 1, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jlt32: BPF_X",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LD_IMM64(BPF_REG_8, INT_MAX | 2ULL << 32),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 0,
.data64 = { INT_MAX | 1ULL << 32, }
},
{ .retval = 0,
.data64 = { UINT_MAX, }
},
{ .retval = 2,
.data64 = { (INT_MAX - 1) | 3ULL << 32, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jlt32: min/max deduction",
.insns = {
BPF_RAND_UEXT_R7,
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0x7ffffff0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jsge32: BPF_K",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 2,
.data64 = { 0, }
},
{ .retval = 2,
.data64 = { -1, }
},
{ .retval = 0,
.data64 = { -2, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jsge32: BPF_X",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LD_IMM64(BPF_REG_8, (__u32)-1 | 2ULL << 32),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 2,
.data64 = { -1, }
},
{ .retval = 2,
.data64 = { 0x7fffffff | 1ULL << 32, }
},
{ .retval = 0,
.data64 = { -2, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jsge32: min/max deduction",
.insns = {
BPF_RAND_UEXT_R7,
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0x7ffffff0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jsgt32: BPF_K",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_IMM(BPF_JSGT, BPF_REG_7, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 0,
.data64 = { (__u32)-2, }
},
{ .retval = 0,
.data64 = { -1, }
},
{ .retval = 2,
.data64 = { 1, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jsgt32: BPF_X",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 0,
.data64 = { 0x7ffffffe, }
},
{ .retval = 0,
.data64 = { 0x1ffffffffULL, }
},
{ .retval = 2,
.data64 = { 0x7fffffff, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jsgt32: min/max deduction",
.insns = {
BPF_RAND_SEXT_R7,
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_LD_IMM64(BPF_REG_8, (__u32)(-2) | 1ULL << 32),
BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, -2, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jsle32: BPF_K",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_IMM(BPF_JSLE, BPF_REG_7, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 2,
.data64 = { (__u32)-2, }
},
{ .retval = 2,
.data64 = { -1, }
},
{ .retval = 0,
.data64 = { 1, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jsle32: BPF_X",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 2,
.data64 = { 0x7ffffffe, }
},
{ .retval = 2,
.data64 = { (__u32)-1, }
},
{ .retval = 0,
.data64 = { 0x7fffffff | 2ULL << 32, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jsle32: min/max deduction",
.insns = {
BPF_RAND_UEXT_R7,
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_7, 0x7ffffff0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jslt32: BPF_K",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 2,
.data64 = { (__u32)-2, }
},
{ .retval = 0,
.data64 = { -1, }
},
{ .retval = 0,
.data64 = { 1, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jslt32: BPF_X",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LD_IMM64(BPF_REG_8, 0x7fffffff | 1ULL << 32),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 3,
.retvals = {
{ .retval = 2,
.data64 = { 0x7ffffffe, }
},
{ .retval = 2,
.data64 = { 0xffffffff, }
},
{ .retval = 0,
.data64 = { 0x7fffffff | 2ULL << 32, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jslt32: min/max deduction",
.insns = {
BPF_RAND_SEXT_R7,
BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
BPF_LD_IMM64(BPF_REG_8, (__u32)(-1) | 1ULL << 32),
BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jgt32: range bound deduction, reg op imm",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
BPF_JMP32_IMM(BPF_JGT, BPF_REG_0, 1, 5),
BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jgt32: range bound deduction, reg1 op reg2, reg1 unknown",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_JMP32_REG(BPF_JGT, BPF_REG_0, BPF_REG_2, 5),
BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jle32: range bound deduction, reg1 op reg2, reg2 unknown",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_JMP32_REG(BPF_JLE, BPF_REG_2, BPF_REG_0, 5),
BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jeq32/jne32: bounds checking",
.insns = {
BPF_MOV64_IMM(BPF_REG_6, 563),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU32_REG(BPF_OR, BPF_REG_2, BPF_REG_6),
BPF_JMP32_IMM(BPF_JNE, BPF_REG_2, 8, 5),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_2, 500, 2),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
},
| linux-master | tools/testing/selftests/bpf/verifier/jmp32.c |
{
"invalid call insn1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "unknown opcode 8d",
.result = REJECT,
},
{
"invalid call insn2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_CALL uses reserved",
.result = REJECT,
},
{
"invalid function call",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
BPF_EXIT_INSN(),
},
.errstr = "invalid func unknown#1234567",
.result = REJECT,
},
{
"invalid argument register",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
BPF_EXIT_INSN(),
},
.errstr = "R1 !read_ok",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"non-invalid argument register",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
| linux-master | tools/testing/selftests/bpf/verifier/basic_call.c |
{
"BPF_ATOMIC_FETCH_ADD smoketest - 64bit",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
/* Write 3 to stack */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
/* Put a 1 in R1, add it to the 3 on the stack, and load the value back into R1 */
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
/* Check the value we loaded back was 3 */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* Load value from stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
/* Check value loaded from stack was 4 */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"BPF_ATOMIC_FETCH_ADD smoketest - 32bit",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
/* Write 3 to stack */
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 3),
/* Put a 1 in R1, add it to the 3 on the stack, and load the value back into R1 */
BPF_MOV32_IMM(BPF_REG_1, 1),
BPF_ATOMIC_OP(BPF_W, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
/* Check the value we loaded back was 3 */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* Load value from stack */
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
/* Check value loaded from stack was 4 */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"Can't use ATM_FETCH_ADD on frame pointer",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_10, -8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr_unpriv = "R10 leaks addr into mem",
.errstr = "frame pointer is read only",
},
{
"Can't use ATM_FETCH_ADD on uninit src reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8),
BPF_EXIT_INSN(),
},
.result = REJECT,
/* It happens that the address leak check is first, but it would also be
* complain about the fact that we're trying to modify R10.
*/
.errstr = "!read_ok",
},
{
"Can't use ATM_FETCH_ADD on uninit dst reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_2, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.result = REJECT,
/* It happens that the address leak check is first, but it would also be
* complain about the fact that we're trying to modify R10.
*/
.errstr = "!read_ok",
},
{
"Can't use ATM_FETCH_ADD on kernel memory",
.insns = {
/* This is an fentry prog, context is array of the args of the
* kernel function being called. Load first arg into R2.
*/
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 0),
/* First arg of bpf_fentry_test7 is a pointer to a struct.
* Attempt to modify that struct. Verifier shouldn't let us
* because it's kernel memory.
*/
BPF_MOV64_IMM(BPF_REG_3, 1),
BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_2, BPF_REG_3, 0),
/* Done */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACING,
.expected_attach_type = BPF_TRACE_FENTRY,
.kfunc = "bpf_fentry_test7",
.result = REJECT,
.errstr = "only read is supported",
},
| linux-master | tools/testing/selftests/bpf/verifier/atomic_fetch_add.c |
{
"BPF_ATOMIC OR without fetch",
.insns = {
/* val = 0x110; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
/* atomic_or(&val, 0x011); */
BPF_MOV64_IMM(BPF_REG_1, 0x011),
BPF_ATOMIC_OP(BPF_DW, BPF_OR, BPF_REG_10, BPF_REG_1, -8),
/* if (val != 0x111) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x111, 2),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
/* r1 should not be clobbered, no BPF_FETCH flag */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x011, 1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"BPF_ATOMIC OR with fetch",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 123),
/* val = 0x110; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
/* old = atomic_fetch_or(&val, 0x011); */
BPF_MOV64_IMM(BPF_REG_1, 0x011),
BPF_ATOMIC_OP(BPF_DW, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
/* if (old != 0x110) exit(3); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* if (val != 0x111) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x111, 2),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_EXIT_INSN(),
/* Check R0 wasn't clobbered (for fear of x86 JIT bug) */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 123, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"BPF_ATOMIC OR with fetch 32bit",
.insns = {
/* r0 = (s64) -1 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
/* val = 0x110; */
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x110),
/* old = atomic_fetch_or(&val, 0x011); */
BPF_MOV32_IMM(BPF_REG_1, 0x011),
BPF_ATOMIC_OP(BPF_W, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
/* if (old != 0x110) exit(3); */
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
BPF_MOV32_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* if (val != 0x111) exit(2); */
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x111, 2),
BPF_MOV32_IMM(BPF_REG_1, 2),
BPF_EXIT_INSN(),
/* Check R0 wasn't clobbered (for fear of x86 JIT bug)
* It should be -1 so add 1 to get exit code.
*/
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"BPF_W atomic_fetch_or should zero top 32 bits",
.insns = {
/* r1 = U64_MAX; */
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
/* u64 val = r1; */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
/* r1 = (u32)atomic_fetch_or((u32 *)&val, 2); */
BPF_MOV32_IMM(BPF_REG_1, 2),
BPF_ATOMIC_OP(BPF_W, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
/* r2 = 0x00000000FFFFFFFF; */
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 1),
/* if (r2 != r1) exit(1); */
BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_1, 2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
| linux-master | tools/testing/selftests/bpf/verifier/atomic_or.c |
{
"test1 ld_imm64",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM insn",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"test2 ld_imm64",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM insn",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"test3 ld_imm64",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test4 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test6 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"test7 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, 0, 0, 0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 1,
},
{
"test8 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
BPF_RAW_INSN(0, 0, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "uses reserved fields",
.result = REJECT,
},
{
"test9 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, 0, 0, 1, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test10 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test11 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test12 ld_imm64",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "not pointing to valid bpf_map",
.result = REJECT,
},
{
"test13 ld_imm64",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test14 ld_imm64: reject 2nd imm != 0",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_1,
BPF_PSEUDO_MAP_FD, 0, 0),
BPF_RAW_INSN(0, 0, 0, 0, 0xfefefe),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 1 },
.errstr = "unrecognized bpf_ld_imm64 insn",
.result = REJECT,
},
| linux-master | tools/testing/selftests/bpf/verifier/ld_imm64.c |
{
"add+sub+mul",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
BPF_MOV64_IMM(BPF_REG_2, 3),
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = -3,
},
{
"xor32 zero extend check",
.insns = {
BPF_MOV32_IMM(BPF_REG_2, -1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
BPF_MOV32_IMM(BPF_REG_0, 2),
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
},
{
"arsh32 on imm",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"arsh32 on imm 2",
.insns = {
BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = -16069393,
},
{
"arsh32 on reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"arsh32 on reg 2",
.insns = {
BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
BPF_MOV64_IMM(BPF_REG_1, 15),
BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 43724,
},
{
"arsh64 on imm",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"arsh64 on reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"lsh64 by 0 imm",
.insns = {
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_1, 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 1,
},
{
"rsh64 by 0 imm",
.insns = {
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 0),
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 1,
},
{
"arsh64 by 0 imm",
.insns = {
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 0),
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 1,
},
{
"lsh64 by 0 reg",
.insns = {
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_1, 1),
BPF_LD_IMM64(BPF_REG_2, 0),
BPF_ALU64_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 1,
},
{
"rsh64 by 0 reg",
.insns = {
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
BPF_LD_IMM64(BPF_REG_3, 0),
BPF_ALU64_REG(BPF_RSH, BPF_REG_1, BPF_REG_3),
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 1,
},
{
"arsh64 by 0 reg",
.insns = {
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
BPF_LD_IMM64(BPF_REG_3, 0),
BPF_ALU64_REG(BPF_ARSH, BPF_REG_1, BPF_REG_3),
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 1,
},
{
"invalid 64-bit BPF_END with BPF_TO_BE",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 0),
{
.code = BPF_ALU64 | BPF_END | BPF_TO_BE,
.dst_reg = BPF_REG_0,
.src_reg = 0,
.off = 0,
.imm = 32,
},
BPF_EXIT_INSN(),
},
.errstr = "unknown opcode df",
.result = REJECT,
},
{
"mov64 src == dst",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
// Check bounds are OK
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"mov64 src != dst",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
// Check bounds are OK
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
| linux-master | tools/testing/selftests/bpf/verifier/basic_instr.c |
{
"atomic dw/fetch and address leakage of (map ptr & -1) via stack slot",
.insns = {
BPF_LD_IMM64(BPF_REG_1, -1),
BPF_LD_MAP_FD(BPF_REG_8, 0),
BPF_LD_MAP_FD(BPF_REG_9, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 2, 4 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "leaking pointer from stack off -8",
},
{
"atomic dw/fetch and address leakage of (map ptr & -1) via returned value",
.insns = {
BPF_LD_IMM64(BPF_REG_1, -1),
BPF_LD_MAP_FD(BPF_REG_8, 0),
BPF_LD_MAP_FD(BPF_REG_9, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 2, 4 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "leaking pointer from stack off -8",
},
{
"atomic w/fetch and address leakage of (map ptr & -1) via stack slot",
.insns = {
BPF_LD_IMM64(BPF_REG_1, -1),
BPF_LD_MAP_FD(BPF_REG_8, 0),
BPF_LD_MAP_FD(BPF_REG_9, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 2, 4 },
.result = REJECT,
.errstr = "invalid size of register fill",
},
{
"atomic w/fetch and address leakage of (map ptr & -1) via returned value",
.insns = {
BPF_LD_IMM64(BPF_REG_1, -1),
BPF_LD_MAP_FD(BPF_REG_8, 0),
BPF_LD_MAP_FD(BPF_REG_9, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 2, 4 },
.result = REJECT,
.errstr = "invalid size of register fill",
},
#define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \
{ \
"atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg, \
.insns = { \
/* u64 val = operan1; */ \
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, operand1), \
/* u64 old = atomic_fetch_add(&val, operand2); */ \
BPF_MOV64_REG(dst_reg, BPF_REG_10), \
BPF_MOV64_IMM(src_reg, operand2), \
BPF_ATOMIC_OP(BPF_DW, op, \
dst_reg, src_reg, -8), \
/* if (old != operand1) exit(1); */ \
BPF_JMP_IMM(BPF_JEQ, src_reg, operand1, 2), \
BPF_MOV64_IMM(BPF_REG_0, 1), \
BPF_EXIT_INSN(), \
/* if (val != result) exit (2); */ \
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8), \
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, expect, 2), \
BPF_MOV64_IMM(BPF_REG_0, 2), \
BPF_EXIT_INSN(), \
/* exit(0); */ \
BPF_MOV64_IMM(BPF_REG_0, 0), \
BPF_EXIT_INSN(), \
}, \
.result = ACCEPT, \
}
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 1, BPF_ADD | BPF_FETCH, 2, 3),
__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 1, BPF_ADD | BPF_FETCH, 2, 3),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 1, BPF_ADD | BPF_FETCH, 2, 3),
__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 1, BPF_ADD | BPF_FETCH, 2, 3),
__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 1, BPF_ADD | BPF_FETCH, 2, 3),
__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 1, BPF_ADD | BPF_FETCH, 2, 3),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_XCHG, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_XCHG, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_XCHG, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_XCHG, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_XCHG, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_XCHG, 0x011, 0x011),
#undef __ATOMIC_FETCH_OP_TEST
| linux-master | tools/testing/selftests/bpf/verifier/atomic_fetch.c |
{
"BPF_ST_MEM stack imm non-zero",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 42),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -42),
/* if value is tracked correctly R0 is zero */
BPF_EXIT_INSN(),
},
.result = ACCEPT,
/* Use prog type that requires return value in range [0, 1] */
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.runs = -1,
},
{
"BPF_ST_MEM stack imm zero",
.insns = {
/* mark stack 0000 0000 */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
/* read and sum a few bytes */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, -8),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, -4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, -1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* if value is tracked correctly R0 is zero */
BPF_EXIT_INSN(),
},
.result = ACCEPT,
/* Use prog type that requires return value in range [0, 1] */
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.runs = -1,
},
{
"BPF_ST_MEM stack imm zero, variable offset",
.insns = {
/* set fp[-16], fp[-24] to zeros */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
/* r0 = random value in range [-32, -15] */
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
BPF_JMP_IMM(BPF_JLE, BPF_REG_0, 16, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 32),
/* fp[r0] = 0, make a variable offset write of zero,
* this should preserve zero marks on stack.
*/
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_10),
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
/* r0 = fp[-20], if variable offset write was tracked correctly
* r0 would be a known zero.
*/
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_10, -20),
/* Would fail return code verification if r0 range is not tracked correctly. */
BPF_EXIT_INSN(),
},
.result = ACCEPT,
/* Use prog type that requires return value in range [0, 1] */
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.runs = -1,
},
| linux-master | tools/testing/selftests/bpf/verifier/bpf_st_mem.c |
{
"direct map access, write test 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 4242),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.retval = 1,
},
{
"direct map access, write test 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 4242),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.retval = 1,
},
{
"direct map access, write test 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 4242),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.retval = 1,
},
{
"direct map access, write test 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 40),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 4242),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.retval = 1,
},
{
"direct map access, write test 5",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 32),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 4242),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.retval = 1,
},
{
"direct map access, write test 6",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 40),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 4, 4242),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "R1 min value is outside of the allowed memory range",
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"direct map access, write test 7",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, -1),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 4, 4242),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "direct value offset of 4294967295 is not allowed",
},
{
"direct map access, write test 8",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_1, -1, 4242),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.retval = 1,
},
{
"direct map access, write test 9",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 48),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 4242),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "invalid access to map value pointer",
},
{
"direct map access, write test 10",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 47),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 4),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.retval = 1,
},
{
"direct map access, write test 11",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 48),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 4),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "invalid access to map value pointer",
},
{
"direct map access, write test 12",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, (1<<29)),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 4),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "direct value offset of 536870912 is not allowed",
},
{
"direct map access, write test 13",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, (1<<29)-1),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 4),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "invalid access to map value pointer, value_size=48 off=536870911",
},
{
"direct map access, write test 14",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 47),
BPF_LD_MAP_VALUE(BPF_REG_2, 0, 46),
BPF_ST_MEM(BPF_H, BPF_REG_2, 0, 0xffff),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1, 3 },
.result = ACCEPT,
.retval = 0xff,
},
{
"direct map access, write test 15",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 46),
BPF_LD_MAP_VALUE(BPF_REG_2, 0, 46),
BPF_ST_MEM(BPF_H, BPF_REG_2, 0, 0xffff),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1, 3 },
.result = ACCEPT,
.retval = 0xffff,
},
{
"direct map access, write test 16",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 46),
BPF_LD_MAP_VALUE(BPF_REG_2, 0, 47),
BPF_ST_MEM(BPF_H, BPF_REG_2, 0, 0xffff),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1, 3 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=48 off=47 size=2",
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"direct map access, write test 17",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 46),
BPF_LD_MAP_VALUE(BPF_REG_2, 0, 46),
BPF_ST_MEM(BPF_H, BPF_REG_2, 1, 0xffff),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1, 3 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=48 off=47 size=2",
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"direct map access, write test 18",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 0),
BPF_ST_MEM(BPF_H, BPF_REG_1, 0, 42),
BPF_EXIT_INSN(),
},
.fixup_map_array_small = { 1 },
.result = REJECT,
.errstr = "R1 min value is outside of the allowed memory range",
},
{
"direct map access, write test 19",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 0),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_EXIT_INSN(),
},
.fixup_map_array_small = { 1 },
.result = ACCEPT,
.retval = 1,
},
{
"direct map access, write test 20",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 1),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_EXIT_INSN(),
},
.fixup_map_array_small = { 1 },
.result = REJECT,
.errstr = "invalid access to map value pointer",
},
{
"direct map access, invalid insn test 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, 0, 1, 0, 47),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "invalid bpf_ld_imm64 insn",
},
{
"direct map access, invalid insn test 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, 1, 0, 0, 47),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "BPF_LD_IMM64 uses reserved fields",
},
{
"direct map access, invalid insn test 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, ~0, 0, 0, 47),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "BPF_LD_IMM64 uses reserved fields",
},
{
"direct map access, invalid insn test 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, 0, ~0, 0, 47),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "invalid bpf_ld_imm64 insn",
},
{
"direct map access, invalid insn test 5",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, ~0, ~0, 0, 47),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "invalid bpf_ld_imm64 insn",
},
{
"direct map access, invalid insn test 6",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_FD, ~0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "BPF_LD_IMM64 uses reserved fields",
},
{
"direct map access, invalid insn test 7",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, ~0, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "invalid bpf_ld_imm64 insn",
},
{
"direct map access, invalid insn test 8",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_FD, ~0, ~0, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "invalid bpf_ld_imm64 insn",
},
{
"direct map access, invalid insn test 9",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 0, 0, 47),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 1 },
.result = REJECT,
.errstr = "unrecognized bpf_ld_imm64 insn",
},
| linux-master | tools/testing/selftests/bpf/verifier/direct_value_access.c |
{
"BPF_ATOMIC XOR without fetch",
.insns = {
/* val = 0x110; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
/* atomic_xor(&val, 0x011); */
BPF_MOV64_IMM(BPF_REG_1, 0x011),
BPF_ATOMIC_OP(BPF_DW, BPF_XOR, BPF_REG_10, BPF_REG_1, -8),
/* if (val != 0x101) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x101, 2),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
/* r1 should not be clobbered, no BPF_FETCH flag */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x011, 1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"BPF_ATOMIC XOR with fetch",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 123),
/* val = 0x110; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
/* old = atomic_fetch_xor(&val, 0x011); */
BPF_MOV64_IMM(BPF_REG_1, 0x011),
BPF_ATOMIC_OP(BPF_DW, BPF_XOR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
/* if (old != 0x110) exit(3); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* if (val != 0x101) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x101, 2),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_EXIT_INSN(),
/* Check R0 wasn't clobbered (fxor fear of x86 JIT bug) */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 123, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"BPF_ATOMIC XOR with fetch 32bit",
.insns = {
/* r0 = (s64) -1 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
/* val = 0x110; */
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x110),
/* old = atomic_fetch_xor(&val, 0x011); */
BPF_MOV32_IMM(BPF_REG_1, 0x011),
BPF_ATOMIC_OP(BPF_W, BPF_XOR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
/* if (old != 0x110) exit(3); */
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
BPF_MOV32_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* if (val != 0x101) exit(2); */
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x101, 2),
BPF_MOV32_IMM(BPF_REG_1, 2),
BPF_EXIT_INSN(),
/* Check R0 wasn't clobbered (fxor fear of x86 JIT bug)
* It should be -1 so add 1 to get exit code.
*/
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
| linux-master | tools/testing/selftests/bpf/verifier/atomic_xor.c |
#define BTF_TYPES \
.btf_strings = "\0int\0i\0ctx\0callback\0main\0", \
.btf_types = { \
/* 1: int */ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), \
/* 2: int* */ BTF_PTR_ENC(1), \
/* 3: void* */ BTF_PTR_ENC(0), \
/* 4: int __(void*) */ BTF_FUNC_PROTO_ENC(1, 1), \
BTF_FUNC_PROTO_ARG_ENC(7, 3), \
/* 5: int __(int, int*) */ BTF_FUNC_PROTO_ENC(1, 2), \
BTF_FUNC_PROTO_ARG_ENC(5, 1), \
BTF_FUNC_PROTO_ARG_ENC(7, 2), \
/* 6: main */ BTF_FUNC_ENC(20, 4), \
/* 7: callback */ BTF_FUNC_ENC(11, 5), \
BTF_END_RAW \
}
#define MAIN_TYPE 6
#define CALLBACK_TYPE 7
/* can't use BPF_CALL_REL, jit_subprogs adjusts IMM & OFF
* fields for pseudo calls
*/
#define PSEUDO_CALL_INSN() \
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_CALL, \
INSN_OFF_MASK, INSN_IMM_MASK)
/* can't use BPF_FUNC_loop constant,
* do_mix_fixups adjusts the IMM field
*/
#define HELPER_CALL_INSN() \
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, INSN_OFF_MASK, INSN_IMM_MASK)
{
"inline simple bpf_loop call",
.insns = {
/* main */
/* force verifier state branching to verify logic on first and
* subsequent bpf_loop insn processing steps
*/
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 777, 2),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 2),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 6),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* callback */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_insns = { PSEUDO_CALL_INSN() },
.unexpected_insns = { HELPER_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
.runs = 0,
.func_info = { { 0, MAIN_TYPE }, { 12, CALLBACK_TYPE } },
.func_info_cnt = 2,
BTF_TYPES
},
{
"don't inline bpf_loop call, flags non-zero",
.insns = {
/* main */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 9),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 7),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, -10),
/* callback */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_insns = { HELPER_CALL_INSN() },
.unexpected_insns = { PSEUDO_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
.runs = 0,
.func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } },
.func_info_cnt = 2,
BTF_TYPES
},
{
"don't inline bpf_loop call, callback non-constant",
.insns = {
/* main */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 777, 4), /* pick a random callback */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 10),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 3),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 8),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* callback */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* callback #2 */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_insns = { HELPER_CALL_INSN() },
.unexpected_insns = { PSEUDO_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
.runs = 0,
.func_info = {
{ 0, MAIN_TYPE },
{ 14, CALLBACK_TYPE },
{ 16, CALLBACK_TYPE }
},
.func_info_cnt = 3,
BTF_TYPES
},
{
"bpf_loop_inline and a dead func",
.insns = {
/* main */
/* A reference to callback #1 to make verifier count it as a func.
* This reference is overwritten below and callback #1 is dead.
*/
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 9),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 8),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* callback */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* callback #2 */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_insns = { PSEUDO_CALL_INSN() },
.unexpected_insns = { HELPER_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
.runs = 0,
.func_info = {
{ 0, MAIN_TYPE },
{ 10, CALLBACK_TYPE },
{ 12, CALLBACK_TYPE }
},
.func_info_cnt = 3,
BTF_TYPES
},
{
"bpf_loop_inline stack locations for loop vars",
.insns = {
/* main */
BPF_ST_MEM(BPF_W, BPF_REG_10, -12, 0x77),
/* bpf_loop call #1 */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 22),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
/* bpf_loop call #2 */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 2),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 16),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
/* call func and exit */
BPF_CALL_REL(2),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* func */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -32, 0x55),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 2),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 6),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* callback */
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_insns = {
BPF_ST_MEM(BPF_W, BPF_REG_10, -12, 0x77),
SKIP_INSNS(),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, -32),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, -24),
SKIP_INSNS(),
/* offsets are the same as in the first call */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -40),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, -32),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, -24),
SKIP_INSNS(),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -32, 0x55),
SKIP_INSNS(),
/* offsets differ from main because of different offset
* in BPF_ST_MEM instruction
*/
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, -40),
},
.unexpected_insns = { HELPER_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
.func_info = {
{ 0, MAIN_TYPE },
{ 16, MAIN_TYPE },
{ 25, CALLBACK_TYPE },
},
.func_info_cnt = 3,
BTF_TYPES
},
{
"inline bpf_loop call in a big program",
.insns = {},
.fill_helper = bpf_fill_big_prog_with_loop_1,
.expected_insns = { PSEUDO_CALL_INSN() },
.unexpected_insns = { HELPER_CALL_INSN() },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } },
.func_info_cnt = 2,
BTF_TYPES
},
#undef HELPER_CALL_INSN
#undef PSEUDO_CALL_INSN
#undef CALLBACK_TYPE
#undef MAIN_TYPE
#undef BTF_TYPES
| linux-master | tools/testing/selftests/bpf/verifier/bpf_loop_inline.c |
{
"jset: functional",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
/* reg, bit 63 or bit 0 set, taken */
BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
/* reg, bit 62, not taken */
BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_EXIT_INSN(),
/* imm, any bit set, taken */
BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
BPF_EXIT_INSN(),
/* imm, bit 31 set, taken */
BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
BPF_EXIT_INSN(),
/* all good - return r0 == 2 */
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.runs = 7,
.retvals = {
{ .retval = 2,
.data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
},
{ .retval = 2,
.data64 = { (1ULL << 63) | (1U << 31), }
},
{ .retval = 2,
.data64 = { (1ULL << 31) | (1U << 0), }
},
{ .retval = 2,
.data64 = { (__u32)-1, }
},
{ .retval = 2,
.data64 = { ~0x4000000000000000ULL, }
},
{ .retval = 0,
.data64 = { 0, }
},
{ .retval = 0,
.data64 = { ~0ULL, }
},
},
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jset: sign-extend",
.insns = {
BPF_DIRECT_PKT_R2,
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 2,
.data = { 1, 0, 0, 0, 0, 0, 0, 1, },
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jset: known const compare",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "R9 !read_ok",
.result_unpriv = REJECT,
.retval = 1,
.result = ACCEPT,
},
{
"jset: known const compare bad",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "!read_ok",
.result_unpriv = REJECT,
.errstr = "!read_ok",
.result = REJECT,
},
{
"jset: unknown const compare taken",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "!read_ok",
.result_unpriv = REJECT,
.errstr = "!read_ok",
.result = REJECT,
},
{
"jset: unknown const compare not taken",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "!read_ok",
.result_unpriv = REJECT,
.errstr = "!read_ok",
.result = REJECT,
},
{
"jset: half-known const compare",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "R9 !read_ok",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jset: range",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "R9 !read_ok",
.result_unpriv = REJECT,
.result = ACCEPT,
},
| linux-master | tools/testing/selftests/bpf/verifier/jset.c |
{
"BPF_ATOMIC bounds propagation, mem->reg",
.insns = {
/* a = 0; */
/*
* Note this is implemented with two separate instructions,
* where you might think one would suffice:
*
* BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
*
* This is because BPF_ST_MEM doesn't seem to set the stack slot
* type to 0 when storing an immediate.
*/
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
/* b = atomic_fetch_add(&a, 1); */
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
/* Verifier should be able to tell that this infinite loop isn't reachable. */
/* if (b) while (true) continue; */
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, -1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "back-edge",
},
| linux-master | tools/testing/selftests/bpf/verifier/atomic_bounds.c |
{
"sleepable fentry accept",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACING,
.expected_attach_type = BPF_TRACE_FENTRY,
.kfunc = "bpf_fentry_test1",
.result = ACCEPT,
.flags = BPF_F_SLEEPABLE,
.runs = -1,
},
{
"sleepable fexit accept",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACING,
.expected_attach_type = BPF_TRACE_FENTRY,
.kfunc = "bpf_fentry_test1",
.result = ACCEPT,
.flags = BPF_F_SLEEPABLE,
.runs = -1,
},
{
"sleepable fmod_ret accept",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACING,
.expected_attach_type = BPF_MODIFY_RETURN,
.kfunc = "bpf_fentry_test1",
.result = ACCEPT,
.flags = BPF_F_SLEEPABLE,
.runs = -1,
},
{
"sleepable iter accept",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACING,
.expected_attach_type = BPF_TRACE_ITER,
.kfunc = "task",
.result = ACCEPT,
.flags = BPF_F_SLEEPABLE,
.runs = -1,
},
{
"sleepable lsm accept",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_LSM,
.kfunc = "bpf",
.expected_attach_type = BPF_LSM_MAC,
.result = ACCEPT,
.flags = BPF_F_SLEEPABLE,
.runs = -1,
},
{
"sleepable uprobe accept",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_KPROBE,
.kfunc = "bpf_fentry_test1",
.result = ACCEPT,
.flags = BPF_F_SLEEPABLE,
.runs = -1,
},
{
"sleepable raw tracepoint reject",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACING,
.expected_attach_type = BPF_TRACE_RAW_TP,
.kfunc = "sched_switch",
.result = REJECT,
.errstr = "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable",
.flags = BPF_F_SLEEPABLE,
.runs = -1,
},
| linux-master | tools/testing/selftests/bpf/verifier/sleepable.c |
{
"ld_dw: xor semi-random 64 bit imms, test 1",
.insns = { },
.data = { },
.fill_helper = bpf_fill_rand_ld_dw,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 4090,
},
{
"ld_dw: xor semi-random 64 bit imms, test 2",
.insns = { },
.data = { },
.fill_helper = bpf_fill_rand_ld_dw,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 2047,
},
{
"ld_dw: xor semi-random 64 bit imms, test 3",
.insns = { },
.data = { },
.fill_helper = bpf_fill_rand_ld_dw,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 511,
},
{
"ld_dw: xor semi-random 64 bit imms, test 4",
.insns = { },
.data = { },
.fill_helper = bpf_fill_rand_ld_dw,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 5,
},
{
"ld_dw: xor semi-random 64 bit imms, test 5",
.insns = { },
.data = { },
.fill_helper = bpf_fill_rand_ld_dw,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1000000 - 6,
},
| linux-master | tools/testing/selftests/bpf/verifier/ld_dw.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <argp.h>
#include <linux/btf.h>
#include "local_storage_bench.skel.h"
#include "bench.h"
#include <test_btf.h>
static struct {
__u32 nr_maps;
__u32 hashmap_nr_keys_used;
} args = {
.nr_maps = 1000,
.hashmap_nr_keys_used = 1000,
};
enum {
ARG_NR_MAPS = 6000,
ARG_HASHMAP_NR_KEYS_USED = 6001,
};
static const struct argp_option opts[] = {
{ "nr_maps", ARG_NR_MAPS, "NR_MAPS", 0,
"Set number of local_storage maps"},
{ "hashmap_nr_keys_used", ARG_HASHMAP_NR_KEYS_USED, "NR_KEYS",
0, "When doing hashmap test, set number of hashmap keys test uses"},
{},
};
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
long ret;
switch (key) {
case ARG_NR_MAPS:
ret = strtol(arg, NULL, 10);
if (ret < 1 || ret > UINT_MAX) {
fprintf(stderr, "invalid nr_maps");
argp_usage(state);
}
args.nr_maps = ret;
break;
case ARG_HASHMAP_NR_KEYS_USED:
ret = strtol(arg, NULL, 10);
if (ret < 1 || ret > UINT_MAX) {
fprintf(stderr, "invalid hashmap_nr_keys_used");
argp_usage(state);
}
args.hashmap_nr_keys_used = ret;
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
const struct argp bench_local_storage_argp = {
.options = opts,
.parser = parse_arg,
};
/* Keep in sync w/ array of maps in bpf */
#define MAX_NR_MAPS 1000
/* keep in sync w/ same define in bpf */
#define HASHMAP_SZ 4194304
static void validate(void)
{
if (env.producer_cnt != 1) {
fprintf(stderr, "benchmark doesn't support multi-producer!\n");
exit(1);
}
if (env.consumer_cnt != 0) {
fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
if (args.nr_maps > MAX_NR_MAPS) {
fprintf(stderr, "nr_maps must be <= 1000\n");
exit(1);
}
if (args.hashmap_nr_keys_used > HASHMAP_SZ) {
fprintf(stderr, "hashmap_nr_keys_used must be <= %u\n", HASHMAP_SZ);
exit(1);
}
}
static struct {
struct local_storage_bench *skel;
void *bpf_obj;
struct bpf_map *array_of_maps;
} ctx;
static void prepopulate_hashmap(int fd)
{
int i, key, val;
/* local_storage gets will have BPF_LOCAL_STORAGE_GET_F_CREATE flag set, so
* populate the hashmap for a similar comparison
*/
for (i = 0; i < HASHMAP_SZ; i++) {
key = val = i;
if (bpf_map_update_elem(fd, &key, &val, 0)) {
fprintf(stderr, "Error prepopulating hashmap (key %d)\n", key);
exit(1);
}
}
}
static void __setup(struct bpf_program *prog, bool hashmap)
{
struct bpf_map *inner_map;
int i, fd, mim_fd, err;
LIBBPF_OPTS(bpf_map_create_opts, create_opts);
if (!hashmap)
create_opts.map_flags = BPF_F_NO_PREALLOC;
ctx.skel->rodata->num_maps = args.nr_maps;
ctx.skel->rodata->hashmap_num_keys = args.hashmap_nr_keys_used;
inner_map = bpf_map__inner_map(ctx.array_of_maps);
create_opts.btf_key_type_id = bpf_map__btf_key_type_id(inner_map);
create_opts.btf_value_type_id = bpf_map__btf_value_type_id(inner_map);
err = local_storage_bench__load(ctx.skel);
if (err) {
fprintf(stderr, "Error loading skeleton\n");
goto err_out;
}
create_opts.btf_fd = bpf_object__btf_fd(ctx.skel->obj);
mim_fd = bpf_map__fd(ctx.array_of_maps);
if (mim_fd < 0) {
fprintf(stderr, "Error getting map_in_map fd\n");
goto err_out;
}
for (i = 0; i < args.nr_maps; i++) {
if (hashmap)
fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int),
sizeof(int), HASHMAP_SZ, &create_opts);
else
fd = bpf_map_create(BPF_MAP_TYPE_TASK_STORAGE, NULL, sizeof(int),
sizeof(int), 0, &create_opts);
if (fd < 0) {
fprintf(stderr, "Error creating map %d: %d\n", i, fd);
goto err_out;
}
if (hashmap)
prepopulate_hashmap(fd);
err = bpf_map_update_elem(mim_fd, &i, &fd, 0);
if (err) {
fprintf(stderr, "Error updating array-of-maps w/ map %d\n", i);
goto err_out;
}
}
if (!bpf_program__attach(prog)) {
fprintf(stderr, "Error attaching bpf program\n");
goto err_out;
}
return;
err_out:
exit(1);
}
static void hashmap_setup(void)
{
struct local_storage_bench *skel;
setup_libbpf();
skel = local_storage_bench__open();
ctx.skel = skel;
ctx.array_of_maps = skel->maps.array_of_hash_maps;
skel->rodata->use_hashmap = 1;
skel->rodata->interleave = 0;
__setup(skel->progs.get_local, true);
}
static void local_storage_cache_get_setup(void)
{
struct local_storage_bench *skel;
setup_libbpf();
skel = local_storage_bench__open();
ctx.skel = skel;
ctx.array_of_maps = skel->maps.array_of_local_storage_maps;
skel->rodata->use_hashmap = 0;
skel->rodata->interleave = 0;
__setup(skel->progs.get_local, false);
}
static void local_storage_cache_get_interleaved_setup(void)
{
struct local_storage_bench *skel;
setup_libbpf();
skel = local_storage_bench__open();
ctx.skel = skel;
ctx.array_of_maps = skel->maps.array_of_local_storage_maps;
skel->rodata->use_hashmap = 0;
skel->rodata->interleave = 1;
__setup(skel->progs.get_local, false);
}
static void measure(struct bench_res *res)
{
res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
res->important_hits = atomic_swap(&ctx.skel->bss->important_hits, 0);
}
static inline void trigger_bpf_program(void)
{
syscall(__NR_getpgid);
}
static void *producer(void *input)
{
while (true)
trigger_bpf_program();
return NULL;
}
/* cache sequential and interleaved get benchs test local_storage get
* performance, specifically they demonstrate performance cliff of
* current list-plus-cache local_storage model.
*
* cache sequential get: call bpf_task_storage_get on n maps in order
* cache interleaved get: like "sequential get", but interleave 4 calls to the
* 'important' map (idx 0 in array_of_maps) for every 10 calls. Goal
* is to mimic environment where many progs are accessing their local_storage
* maps, with 'our' prog needing to access its map more often than others
*/
const struct bench bench_local_storage_cache_seq_get = {
.name = "local-storage-cache-seq-get",
.argp = &bench_local_storage_argp,
.validate = validate,
.setup = local_storage_cache_get_setup,
.producer_thread = producer,
.measure = measure,
.report_progress = local_storage_report_progress,
.report_final = local_storage_report_final,
};
const struct bench bench_local_storage_cache_interleaved_get = {
.name = "local-storage-cache-int-get",
.argp = &bench_local_storage_argp,
.validate = validate,
.setup = local_storage_cache_get_interleaved_setup,
.producer_thread = producer,
.measure = measure,
.report_progress = local_storage_report_progress,
.report_final = local_storage_report_final,
};
const struct bench bench_local_storage_cache_hashmap_control = {
.name = "local-storage-cache-hashmap-control",
.argp = &bench_local_storage_argp,
.validate = validate,
.setup = hashmap_setup,
.producer_thread = producer,
.measure = measure,
.report_progress = local_storage_report_progress,
.report_final = local_storage_report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_local_storage.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bench.h"
/* COUNT-GLOBAL benchmark */
static struct count_global_ctx {
struct counter hits;
} count_global_ctx;
static void *count_global_producer(void *input)
{
struct count_global_ctx *ctx = &count_global_ctx;
while (true) {
atomic_inc(&ctx->hits.value);
}
return NULL;
}
static void count_global_measure(struct bench_res *res)
{
struct count_global_ctx *ctx = &count_global_ctx;
res->hits = atomic_swap(&ctx->hits.value, 0);
}
/* COUNT-local benchmark */
static struct count_local_ctx {
struct counter *hits;
} count_local_ctx;
static void count_local_setup(void)
{
struct count_local_ctx *ctx = &count_local_ctx;
ctx->hits = calloc(env.producer_cnt, sizeof(*ctx->hits));
if (!ctx->hits)
exit(1);
}
static void *count_local_producer(void *input)
{
struct count_local_ctx *ctx = &count_local_ctx;
int idx = (long)input;
while (true) {
atomic_inc(&ctx->hits[idx].value);
}
return NULL;
}
static void count_local_measure(struct bench_res *res)
{
struct count_local_ctx *ctx = &count_local_ctx;
int i;
for (i = 0; i < env.producer_cnt; i++) {
res->hits += atomic_swap(&ctx->hits[i].value, 0);
}
}
const struct bench bench_count_global = {
.name = "count-global",
.producer_thread = count_global_producer,
.measure = count_global_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_count_local = {
.name = "count-local",
.setup = count_local_setup,
.producer_thread = count_local_producer,
.measure = count_local_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_count.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bench.h"
#include "trigger_bench.skel.h"
#include "trace_helpers.h"
/* BPF triggering benchmarks */
static struct trigger_ctx {
struct trigger_bench *skel;
} ctx;
static struct counter base_hits;
static void trigger_validate(void)
{
if (env.consumer_cnt != 0) {
fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
}
static void *trigger_base_producer(void *input)
{
while (true) {
(void)syscall(__NR_getpgid);
atomic_inc(&base_hits.value);
}
return NULL;
}
static void trigger_base_measure(struct bench_res *res)
{
res->hits = atomic_swap(&base_hits.value, 0);
}
static void *trigger_producer(void *input)
{
while (true)
(void)syscall(__NR_getpgid);
return NULL;
}
static void trigger_measure(struct bench_res *res)
{
res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
}
static void setup_ctx(void)
{
setup_libbpf();
ctx.skel = trigger_bench__open_and_load();
if (!ctx.skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
}
static void attach_bpf(struct bpf_program *prog)
{
struct bpf_link *link;
link = bpf_program__attach(prog);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
}
static void trigger_tp_setup(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_tp);
}
static void trigger_rawtp_setup(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_raw_tp);
}
static void trigger_kprobe_setup(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_kprobe);
}
static void trigger_fentry_setup(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_fentry);
}
static void trigger_fentry_sleep_setup(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_fentry_sleep);
}
static void trigger_fmodret_setup(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_fmodret);
}
/* make sure call is not inlined and not avoided by compiler, so __weak and
* inline asm volatile in the body of the function
*
* There is a performance difference between uprobing at nop location vs other
* instructions. So use two different targets, one of which starts with nop
* and another doesn't.
*
* GCC doesn't generate stack setup preample for these functions due to them
* having no input arguments and doing nothing in the body.
*/
__weak void uprobe_target_with_nop(void)
{
asm volatile ("nop");
}
__weak void uprobe_target_without_nop(void)
{
asm volatile ("");
}
static void *uprobe_base_producer(void *input)
{
while (true) {
uprobe_target_with_nop();
atomic_inc(&base_hits.value);
}
return NULL;
}
static void *uprobe_producer_with_nop(void *input)
{
while (true)
uprobe_target_with_nop();
return NULL;
}
static void *uprobe_producer_without_nop(void *input)
{
while (true)
uprobe_target_without_nop();
return NULL;
}
static void usetup(bool use_retprobe, bool use_nop)
{
size_t uprobe_offset;
struct bpf_link *link;
setup_libbpf();
ctx.skel = trigger_bench__open_and_load();
if (!ctx.skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
if (use_nop)
uprobe_offset = get_uprobe_offset(&uprobe_target_with_nop);
else
uprobe_offset = get_uprobe_offset(&uprobe_target_without_nop);
link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe,
use_retprobe,
-1 /* all PIDs */,
"/proc/self/exe",
uprobe_offset);
if (!link) {
fprintf(stderr, "failed to attach uprobe!\n");
exit(1);
}
ctx.skel->links.bench_trigger_uprobe = link;
}
static void uprobe_setup_with_nop(void)
{
usetup(false, true);
}
static void uretprobe_setup_with_nop(void)
{
usetup(true, true);
}
static void uprobe_setup_without_nop(void)
{
usetup(false, false);
}
static void uretprobe_setup_without_nop(void)
{
usetup(true, false);
}
const struct bench bench_trig_base = {
.name = "trig-base",
.validate = trigger_validate,
.producer_thread = trigger_base_producer,
.measure = trigger_base_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_tp = {
.name = "trig-tp",
.validate = trigger_validate,
.setup = trigger_tp_setup,
.producer_thread = trigger_producer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_rawtp = {
.name = "trig-rawtp",
.validate = trigger_validate,
.setup = trigger_rawtp_setup,
.producer_thread = trigger_producer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_kprobe = {
.name = "trig-kprobe",
.validate = trigger_validate,
.setup = trigger_kprobe_setup,
.producer_thread = trigger_producer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_fentry = {
.name = "trig-fentry",
.validate = trigger_validate,
.setup = trigger_fentry_setup,
.producer_thread = trigger_producer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_fentry_sleep = {
.name = "trig-fentry-sleep",
.validate = trigger_validate,
.setup = trigger_fentry_sleep_setup,
.producer_thread = trigger_producer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_fmodret = {
.name = "trig-fmodret",
.validate = trigger_validate,
.setup = trigger_fmodret_setup,
.producer_thread = trigger_producer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_uprobe_base = {
.name = "trig-uprobe-base",
.setup = NULL, /* no uprobe/uretprobe is attached */
.producer_thread = uprobe_base_producer,
.measure = trigger_base_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_uprobe_with_nop = {
.name = "trig-uprobe-with-nop",
.setup = uprobe_setup_with_nop,
.producer_thread = uprobe_producer_with_nop,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_uretprobe_with_nop = {
.name = "trig-uretprobe-with-nop",
.setup = uretprobe_setup_with_nop,
.producer_thread = uprobe_producer_with_nop,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_uprobe_without_nop = {
.name = "trig-uprobe-without-nop",
.setup = uprobe_setup_without_nop,
.producer_thread = uprobe_producer_without_nop,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_uretprobe_without_nop = {
.name = "trig-uretprobe-without-nop",
.setup = uretprobe_setup_without_nop,
.producer_thread = uprobe_producer_without_nop,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_trigger.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <sys/types.h>
#include <sys/socket.h>
#include <pthread.h>
#include <argp.h>
#include "bench.h"
#include "bench_local_storage_create.skel.h"
struct thread {
int *fds;
pthread_t *pthds;
int *pthd_results;
};
static struct bench_local_storage_create *skel;
static struct thread *threads;
static long create_owner_errs;
static int storage_type = BPF_MAP_TYPE_SK_STORAGE;
static int batch_sz = 32;
enum {
ARG_BATCH_SZ = 9000,
ARG_STORAGE_TYPE = 9001,
};
static const struct argp_option opts[] = {
{ "batch-size", ARG_BATCH_SZ, "BATCH_SIZE", 0,
"The number of storage creations in each batch" },
{ "storage-type", ARG_STORAGE_TYPE, "STORAGE_TYPE", 0,
"The type of local storage to test (socket or task)" },
{},
};
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
int ret;
switch (key) {
case ARG_BATCH_SZ:
ret = atoi(arg);
if (ret < 1) {
fprintf(stderr, "invalid batch-size\n");
argp_usage(state);
}
batch_sz = ret;
break;
case ARG_STORAGE_TYPE:
if (!strcmp(arg, "task")) {
storage_type = BPF_MAP_TYPE_TASK_STORAGE;
} else if (!strcmp(arg, "socket")) {
storage_type = BPF_MAP_TYPE_SK_STORAGE;
} else {
fprintf(stderr, "invalid storage-type (socket or task)\n");
argp_usage(state);
}
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
const struct argp bench_local_storage_create_argp = {
.options = opts,
.parser = parse_arg,
};
static void validate(void)
{
if (env.consumer_cnt != 0) {
fprintf(stderr,
"local-storage-create benchmark does not need consumer\n");
exit(1);
}
}
static void setup(void)
{
int i;
skel = bench_local_storage_create__open_and_load();
if (!skel) {
fprintf(stderr, "error loading skel\n");
exit(1);
}
skel->bss->bench_pid = getpid();
if (storage_type == BPF_MAP_TYPE_SK_STORAGE) {
if (!bpf_program__attach(skel->progs.socket_post_create)) {
fprintf(stderr, "Error attaching bpf program\n");
exit(1);
}
} else {
if (!bpf_program__attach(skel->progs.sched_process_fork)) {
fprintf(stderr, "Error attaching bpf program\n");
exit(1);
}
}
if (!bpf_program__attach(skel->progs.kmalloc)) {
fprintf(stderr, "Error attaching bpf program\n");
exit(1);
}
threads = calloc(env.producer_cnt, sizeof(*threads));
if (!threads) {
fprintf(stderr, "cannot alloc thread_res\n");
exit(1);
}
for (i = 0; i < env.producer_cnt; i++) {
struct thread *t = &threads[i];
if (storage_type == BPF_MAP_TYPE_SK_STORAGE) {
t->fds = malloc(batch_sz * sizeof(*t->fds));
if (!t->fds) {
fprintf(stderr, "cannot alloc t->fds\n");
exit(1);
}
} else {
t->pthds = malloc(batch_sz * sizeof(*t->pthds));
if (!t->pthds) {
fprintf(stderr, "cannot alloc t->pthds\n");
exit(1);
}
t->pthd_results = malloc(batch_sz * sizeof(*t->pthd_results));
if (!t->pthd_results) {
fprintf(stderr, "cannot alloc t->pthd_results\n");
exit(1);
}
}
}
}
static void measure(struct bench_res *res)
{
res->hits = atomic_swap(&skel->bss->create_cnts, 0);
res->drops = atomic_swap(&skel->bss->kmalloc_cnts, 0);
}
static void *sk_producer(void *input)
{
struct thread *t = &threads[(long)(input)];
int *fds = t->fds;
int i;
while (true) {
for (i = 0; i < batch_sz; i++) {
fds[i] = socket(AF_INET6, SOCK_DGRAM, 0);
if (fds[i] == -1)
atomic_inc(&create_owner_errs);
}
for (i = 0; i < batch_sz; i++) {
if (fds[i] != -1)
close(fds[i]);
}
}
return NULL;
}
static void *thread_func(void *arg)
{
return NULL;
}
static void *task_producer(void *input)
{
struct thread *t = &threads[(long)(input)];
pthread_t *pthds = t->pthds;
int *pthd_results = t->pthd_results;
int i;
while (true) {
for (i = 0; i < batch_sz; i++) {
pthd_results[i] = pthread_create(&pthds[i], NULL, thread_func, NULL);
if (pthd_results[i])
atomic_inc(&create_owner_errs);
}
for (i = 0; i < batch_sz; i++) {
if (!pthd_results[i])
pthread_join(pthds[i], NULL);;
}
}
return NULL;
}
static void *producer(void *input)
{
if (storage_type == BPF_MAP_TYPE_SK_STORAGE)
return sk_producer(input);
else
return task_producer(input);
}
static void report_progress(int iter, struct bench_res *res, long delta_ns)
{
double creates_per_sec, kmallocs_per_create;
creates_per_sec = res->hits / 1000.0 / (delta_ns / 1000000000.0);
kmallocs_per_create = (double)res->drops / res->hits;
printf("Iter %3d (%7.3lfus): ",
iter, (delta_ns - 1000000000) / 1000.0);
printf("creates %8.3lfk/s (%7.3lfk/prod), ",
creates_per_sec, creates_per_sec / env.producer_cnt);
printf("%3.2lf kmallocs/create\n", kmallocs_per_create);
}
static void report_final(struct bench_res res[], int res_cnt)
{
double creates_mean = 0.0, creates_stddev = 0.0;
long total_creates = 0, total_kmallocs = 0;
int i;
for (i = 0; i < res_cnt; i++) {
creates_mean += res[i].hits / 1000.0 / (0.0 + res_cnt);
total_creates += res[i].hits;
total_kmallocs += res[i].drops;
}
if (res_cnt > 1) {
for (i = 0; i < res_cnt; i++)
creates_stddev += (creates_mean - res[i].hits / 1000.0) *
(creates_mean - res[i].hits / 1000.0) /
(res_cnt - 1.0);
creates_stddev = sqrt(creates_stddev);
}
printf("Summary: creates %8.3lf \u00B1 %5.3lfk/s (%7.3lfk/prod), ",
creates_mean, creates_stddev, creates_mean / env.producer_cnt);
printf("%4.2lf kmallocs/create\n", (double)total_kmallocs / total_creates);
if (create_owner_errs || skel->bss->create_errs)
printf("%s() errors %ld create_errs %ld\n",
storage_type == BPF_MAP_TYPE_SK_STORAGE ?
"socket" : "pthread_create",
create_owner_errs,
skel->bss->create_errs);
}
/* Benchmark performance of creating bpf local storage */
const struct bench bench_local_storage_create = {
.name = "local-storage-create",
.argp = &bench_local_storage_create_argp,
.validate = validate,
.setup = setup,
.producer_thread = producer,
.measure = measure,
.report_progress = report_progress,
.report_final = report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_local_storage_create.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Bytedance */
#include "bench.h"
#include "bpf_hashmap_full_update_bench.skel.h"
#include "bpf_util.h"
/* BPF triggering benchmarks */
static struct ctx {
struct bpf_hashmap_full_update_bench *skel;
} ctx;
#define MAX_LOOP_NUM 10000
static void validate(void)
{
if (env.consumer_cnt != 0) {
fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
}
static void *producer(void *input)
{
while (true) {
/* trigger the bpf program */
syscall(__NR_getpgid);
}
return NULL;
}
static void measure(struct bench_res *res)
{
}
static void setup(void)
{
struct bpf_link *link;
int map_fd, i, max_entries;
setup_libbpf();
ctx.skel = bpf_hashmap_full_update_bench__open_and_load();
if (!ctx.skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
ctx.skel->bss->nr_loops = MAX_LOOP_NUM;
link = bpf_program__attach(ctx.skel->progs.benchmark);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
/* fill hash_map */
map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench);
max_entries = bpf_map__max_entries(ctx.skel->maps.hash_map_bench);
for (i = 0; i < max_entries; i++)
bpf_map_update_elem(map_fd, &i, &i, BPF_ANY);
}
static void hashmap_report_final(struct bench_res res[], int res_cnt)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
int i;
for (i = 0; i < nr_cpus; i++) {
u64 time = ctx.skel->bss->percpu_time[i];
if (!time)
continue;
printf("%d:hash_map_full_perf %lld events per sec\n",
i, ctx.skel->bss->nr_loops * 1000000000ll / time);
}
}
const struct bench bench_bpf_hashmap_full_update = {
.name = "bpf-hashmap-full-update",
.validate = validate,
.setup = setup,
.producer_thread = producer,
.measure = measure,
.report_progress = NULL,
.report_final = hashmap_report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <argp.h>
#include <linux/log2.h>
#include <pthread.h>
#include "bench.h"
#include "bloom_filter_bench.skel.h"
#include "bpf_util.h"
static struct ctx {
bool use_array_map;
bool use_hashmap;
bool hashmap_use_bloom;
bool count_false_hits;
struct bloom_filter_bench *skel;
int bloom_fd;
int hashmap_fd;
int array_map_fd;
pthread_mutex_t map_done_mtx;
pthread_cond_t map_done_cv;
bool map_done;
bool map_prepare_err;
__u32 next_map_idx;
} ctx = {
.map_done_mtx = PTHREAD_MUTEX_INITIALIZER,
.map_done_cv = PTHREAD_COND_INITIALIZER,
};
struct stat {
__u32 stats[3];
};
static struct {
__u32 nr_entries;
__u8 nr_hash_funcs;
__u8 value_size;
} args = {
.nr_entries = 1000,
.nr_hash_funcs = 3,
.value_size = 8,
};
enum {
ARG_NR_ENTRIES = 3000,
ARG_NR_HASH_FUNCS = 3001,
ARG_VALUE_SIZE = 3002,
};
static const struct argp_option opts[] = {
{ "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
"Set number of expected unique entries in the bloom filter"},
{ "nr_hash_funcs", ARG_NR_HASH_FUNCS, "NR_HASH_FUNCS", 0,
"Set number of hash functions in the bloom filter"},
{ "value_size", ARG_VALUE_SIZE, "VALUE_SIZE", 0,
"Set value size (in bytes) of bloom filter entries"},
{},
};
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
long ret;
switch (key) {
case ARG_NR_ENTRIES:
ret = strtol(arg, NULL, 10);
if (ret < 1 || ret > UINT_MAX) {
fprintf(stderr, "Invalid nr_entries count.");
argp_usage(state);
}
args.nr_entries = ret;
break;
case ARG_NR_HASH_FUNCS:
ret = strtol(arg, NULL, 10);
if (ret < 1 || ret > 15) {
fprintf(stderr,
"The bloom filter must use 1 to 15 hash functions.");
argp_usage(state);
}
args.nr_hash_funcs = ret;
break;
case ARG_VALUE_SIZE:
ret = strtol(arg, NULL, 10);
if (ret < 2 || ret > 256) {
fprintf(stderr,
"Invalid value size. Must be between 2 and 256 bytes");
argp_usage(state);
}
args.value_size = ret;
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
/* exported into benchmark runner */
const struct argp bench_bloom_map_argp = {
.options = opts,
.parser = parse_arg,
};
static void validate(void)
{
if (env.consumer_cnt != 0) {
fprintf(stderr,
"The bloom filter benchmarks do not support consumer\n");
exit(1);
}
}
static inline void trigger_bpf_program(void)
{
syscall(__NR_getpgid);
}
static void *producer(void *input)
{
while (true)
trigger_bpf_program();
return NULL;
}
static void *map_prepare_thread(void *arg)
{
__u32 val_size, i;
void *val = NULL;
int err;
val_size = args.value_size;
val = malloc(val_size);
if (!val) {
ctx.map_prepare_err = true;
goto done;
}
while (true) {
i = __atomic_add_fetch(&ctx.next_map_idx, 1, __ATOMIC_RELAXED);
if (i > args.nr_entries)
break;
again:
/* Populate hashmap, bloom filter map, and array map with the same
* random values
*/
err = syscall(__NR_getrandom, val, val_size, 0);
if (err != val_size) {
ctx.map_prepare_err = true;
fprintf(stderr, "failed to get random value: %d\n", -errno);
break;
}
if (ctx.use_hashmap) {
err = bpf_map_update_elem(ctx.hashmap_fd, val, val, BPF_NOEXIST);
if (err) {
if (err != -EEXIST) {
ctx.map_prepare_err = true;
fprintf(stderr, "failed to add elem to hashmap: %d\n",
-errno);
break;
}
goto again;
}
}
i--;
if (ctx.use_array_map) {
err = bpf_map_update_elem(ctx.array_map_fd, &i, val, 0);
if (err) {
ctx.map_prepare_err = true;
fprintf(stderr, "failed to add elem to array map: %d\n", -errno);
break;
}
}
if (ctx.use_hashmap && !ctx.hashmap_use_bloom)
continue;
err = bpf_map_update_elem(ctx.bloom_fd, NULL, val, 0);
if (err) {
ctx.map_prepare_err = true;
fprintf(stderr,
"failed to add elem to bloom filter map: %d\n", -errno);
break;
}
}
done:
pthread_mutex_lock(&ctx.map_done_mtx);
ctx.map_done = true;
pthread_cond_signal(&ctx.map_done_cv);
pthread_mutex_unlock(&ctx.map_done_mtx);
if (val)
free(val);
return NULL;
}
static void populate_maps(void)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
pthread_t map_thread;
int i, err, nr_rand_bytes;
ctx.bloom_fd = bpf_map__fd(ctx.skel->maps.bloom_map);
ctx.hashmap_fd = bpf_map__fd(ctx.skel->maps.hashmap);
ctx.array_map_fd = bpf_map__fd(ctx.skel->maps.array_map);
for (i = 0; i < nr_cpus; i++) {
err = pthread_create(&map_thread, NULL, map_prepare_thread,
NULL);
if (err) {
fprintf(stderr, "failed to create pthread: %d\n", -errno);
exit(1);
}
}
pthread_mutex_lock(&ctx.map_done_mtx);
while (!ctx.map_done)
pthread_cond_wait(&ctx.map_done_cv, &ctx.map_done_mtx);
pthread_mutex_unlock(&ctx.map_done_mtx);
if (ctx.map_prepare_err)
exit(1);
nr_rand_bytes = syscall(__NR_getrandom, ctx.skel->bss->rand_vals,
ctx.skel->rodata->nr_rand_bytes, 0);
if (nr_rand_bytes != ctx.skel->rodata->nr_rand_bytes) {
fprintf(stderr, "failed to get random bytes\n");
exit(1);
}
}
static void check_args(void)
{
if (args.value_size < 8) {
__u64 nr_unique_entries = 1ULL << (args.value_size * 8);
if (args.nr_entries > nr_unique_entries) {
fprintf(stderr,
"Not enough unique values for the nr_entries requested\n");
exit(1);
}
}
}
static struct bloom_filter_bench *setup_skeleton(void)
{
struct bloom_filter_bench *skel;
check_args();
setup_libbpf();
skel = bloom_filter_bench__open();
if (!skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
skel->rodata->hashmap_use_bloom = ctx.hashmap_use_bloom;
skel->rodata->count_false_hits = ctx.count_false_hits;
/* Resize number of entries */
bpf_map__set_max_entries(skel->maps.hashmap, args.nr_entries);
bpf_map__set_max_entries(skel->maps.array_map, args.nr_entries);
bpf_map__set_max_entries(skel->maps.bloom_map, args.nr_entries);
/* Set value size */
bpf_map__set_value_size(skel->maps.array_map, args.value_size);
bpf_map__set_value_size(skel->maps.bloom_map, args.value_size);
bpf_map__set_value_size(skel->maps.hashmap, args.value_size);
/* For the hashmap, we use the value as the key as well */
bpf_map__set_key_size(skel->maps.hashmap, args.value_size);
skel->bss->value_size = args.value_size;
/* Set number of hash functions */
bpf_map__set_map_extra(skel->maps.bloom_map, args.nr_hash_funcs);
if (bloom_filter_bench__load(skel)) {
fprintf(stderr, "failed to load skeleton\n");
exit(1);
}
return skel;
}
static void bloom_lookup_setup(void)
{
struct bpf_link *link;
ctx.use_array_map = true;
ctx.skel = setup_skeleton();
populate_maps();
link = bpf_program__attach(ctx.skel->progs.bloom_lookup);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
}
static void bloom_update_setup(void)
{
struct bpf_link *link;
ctx.use_array_map = true;
ctx.skel = setup_skeleton();
populate_maps();
link = bpf_program__attach(ctx.skel->progs.bloom_update);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
}
static void false_positive_setup(void)
{
struct bpf_link *link;
ctx.use_hashmap = true;
ctx.hashmap_use_bloom = true;
ctx.count_false_hits = true;
ctx.skel = setup_skeleton();
populate_maps();
link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
}
static void hashmap_with_bloom_setup(void)
{
struct bpf_link *link;
ctx.use_hashmap = true;
ctx.hashmap_use_bloom = true;
ctx.skel = setup_skeleton();
populate_maps();
link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
}
static void hashmap_no_bloom_setup(void)
{
struct bpf_link *link;
ctx.use_hashmap = true;
ctx.skel = setup_skeleton();
populate_maps();
link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
}
static void measure(struct bench_res *res)
{
unsigned long total_hits = 0, total_drops = 0, total_false_hits = 0;
static unsigned long last_hits, last_drops, last_false_hits;
unsigned int nr_cpus = bpf_num_possible_cpus();
int hit_key, drop_key, false_hit_key;
int i;
hit_key = ctx.skel->rodata->hit_key;
drop_key = ctx.skel->rodata->drop_key;
false_hit_key = ctx.skel->rodata->false_hit_key;
if (ctx.skel->bss->error != 0) {
fprintf(stderr, "error (%d) when searching the bloom filter\n",
ctx.skel->bss->error);
exit(1);
}
for (i = 0; i < nr_cpus; i++) {
struct stat *s = (void *)&ctx.skel->bss->percpu_stats[i];
total_hits += s->stats[hit_key];
total_drops += s->stats[drop_key];
total_false_hits += s->stats[false_hit_key];
}
res->hits = total_hits - last_hits;
res->drops = total_drops - last_drops;
res->false_hits = total_false_hits - last_false_hits;
last_hits = total_hits;
last_drops = total_drops;
last_false_hits = total_false_hits;
}
const struct bench bench_bloom_lookup = {
.name = "bloom-lookup",
.argp = &bench_bloom_map_argp,
.validate = validate,
.setup = bloom_lookup_setup,
.producer_thread = producer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_bloom_update = {
.name = "bloom-update",
.argp = &bench_bloom_map_argp,
.validate = validate,
.setup = bloom_update_setup,
.producer_thread = producer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_bloom_false_positive = {
.name = "bloom-false-positive",
.argp = &bench_bloom_map_argp,
.validate = validate,
.setup = false_positive_setup,
.producer_thread = producer,
.measure = measure,
.report_progress = false_hits_report_progress,
.report_final = false_hits_report_final,
};
const struct bench bench_hashmap_without_bloom = {
.name = "hashmap-without-bloom",
.argp = &bench_bloom_map_argp,
.validate = validate,
.setup = hashmap_no_bloom_setup,
.producer_thread = producer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_hashmap_with_bloom = {
.name = "hashmap-with-bloom",
.argp = &bench_bloom_map_argp,
.validate = validate,
.setup = hashmap_with_bloom_setup,
.producer_thread = producer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <asm/barrier.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
#include <sys/epoll.h>
#include <sys/mman.h>
#include <argp.h>
#include <stdlib.h>
#include "bench.h"
#include "ringbuf_bench.skel.h"
#include "perfbuf_bench.skel.h"
static struct {
bool back2back;
int batch_cnt;
bool sampled;
int sample_rate;
int ringbuf_sz; /* per-ringbuf, in bytes */
bool ringbuf_use_output; /* use slower output API */
int perfbuf_sz; /* per-CPU size, in pages */
} args = {
.back2back = false,
.batch_cnt = 500,
.sampled = false,
.sample_rate = 500,
.ringbuf_sz = 512 * 1024,
.ringbuf_use_output = false,
.perfbuf_sz = 128,
};
enum {
ARG_RB_BACK2BACK = 2000,
ARG_RB_USE_OUTPUT = 2001,
ARG_RB_BATCH_CNT = 2002,
ARG_RB_SAMPLED = 2003,
ARG_RB_SAMPLE_RATE = 2004,
};
static const struct argp_option opts[] = {
{ "rb-b2b", ARG_RB_BACK2BACK, NULL, 0, "Back-to-back mode"},
{ "rb-use-output", ARG_RB_USE_OUTPUT, NULL, 0, "Use bpf_ringbuf_output() instead of bpf_ringbuf_reserve()"},
{ "rb-batch-cnt", ARG_RB_BATCH_CNT, "CNT", 0, "Set BPF-side record batch count"},
{ "rb-sampled", ARG_RB_SAMPLED, NULL, 0, "Notification sampling"},
{ "rb-sample-rate", ARG_RB_SAMPLE_RATE, "RATE", 0, "Notification sample rate"},
{},
};
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
switch (key) {
case ARG_RB_BACK2BACK:
args.back2back = true;
break;
case ARG_RB_USE_OUTPUT:
args.ringbuf_use_output = true;
break;
case ARG_RB_BATCH_CNT:
args.batch_cnt = strtol(arg, NULL, 10);
if (args.batch_cnt < 0) {
fprintf(stderr, "Invalid batch count.");
argp_usage(state);
}
break;
case ARG_RB_SAMPLED:
args.sampled = true;
break;
case ARG_RB_SAMPLE_RATE:
args.sample_rate = strtol(arg, NULL, 10);
if (args.sample_rate < 0) {
fprintf(stderr, "Invalid perfbuf sample rate.");
argp_usage(state);
}
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
/* exported into benchmark runner */
const struct argp bench_ringbufs_argp = {
.options = opts,
.parser = parse_arg,
};
/* RINGBUF-LIBBPF benchmark */
static struct counter buf_hits;
static inline void bufs_trigger_batch(void)
{
(void)syscall(__NR_getpgid);
}
static void bufs_validate(void)
{
if (env.consumer_cnt != 1) {
fprintf(stderr, "rb-libbpf benchmark needs one consumer!\n");
exit(1);
}
if (args.back2back && env.producer_cnt > 1) {
fprintf(stderr, "back-to-back mode makes sense only for single-producer case!\n");
exit(1);
}
}
static void *bufs_sample_producer(void *input)
{
if (args.back2back) {
/* initial batch to get everything started */
bufs_trigger_batch();
return NULL;
}
while (true)
bufs_trigger_batch();
return NULL;
}
static struct ringbuf_libbpf_ctx {
struct ringbuf_bench *skel;
struct ring_buffer *ringbuf;
} ringbuf_libbpf_ctx;
static void ringbuf_libbpf_measure(struct bench_res *res)
{
struct ringbuf_libbpf_ctx *ctx = &ringbuf_libbpf_ctx;
res->hits = atomic_swap(&buf_hits.value, 0);
res->drops = atomic_swap(&ctx->skel->bss->dropped, 0);
}
static struct ringbuf_bench *ringbuf_setup_skeleton(void)
{
struct ringbuf_bench *skel;
setup_libbpf();
skel = ringbuf_bench__open();
if (!skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
skel->rodata->batch_cnt = args.batch_cnt;
skel->rodata->use_output = args.ringbuf_use_output ? 1 : 0;
if (args.sampled)
/* record data + header take 16 bytes */
skel->rodata->wakeup_data_size = args.sample_rate * 16;
bpf_map__set_max_entries(skel->maps.ringbuf, args.ringbuf_sz);
if (ringbuf_bench__load(skel)) {
fprintf(stderr, "failed to load skeleton\n");
exit(1);
}
return skel;
}
static int buf_process_sample(void *ctx, void *data, size_t len)
{
atomic_inc(&buf_hits.value);
return 0;
}
static void ringbuf_libbpf_setup(void)
{
struct ringbuf_libbpf_ctx *ctx = &ringbuf_libbpf_ctx;
struct bpf_link *link;
ctx->skel = ringbuf_setup_skeleton();
ctx->ringbuf = ring_buffer__new(bpf_map__fd(ctx->skel->maps.ringbuf),
buf_process_sample, NULL, NULL);
if (!ctx->ringbuf) {
fprintf(stderr, "failed to create ringbuf\n");
exit(1);
}
link = bpf_program__attach(ctx->skel->progs.bench_ringbuf);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
}
static void *ringbuf_libbpf_consumer(void *input)
{
struct ringbuf_libbpf_ctx *ctx = &ringbuf_libbpf_ctx;
while (ring_buffer__poll(ctx->ringbuf, -1) >= 0) {
if (args.back2back)
bufs_trigger_batch();
}
fprintf(stderr, "ringbuf polling failed!\n");
return NULL;
}
/* RINGBUF-CUSTOM benchmark */
struct ringbuf_custom {
__u64 *consumer_pos;
__u64 *producer_pos;
__u64 mask;
void *data;
int map_fd;
};
static struct ringbuf_custom_ctx {
struct ringbuf_bench *skel;
struct ringbuf_custom ringbuf;
int epoll_fd;
struct epoll_event event;
} ringbuf_custom_ctx;
static void ringbuf_custom_measure(struct bench_res *res)
{
struct ringbuf_custom_ctx *ctx = &ringbuf_custom_ctx;
res->hits = atomic_swap(&buf_hits.value, 0);
res->drops = atomic_swap(&ctx->skel->bss->dropped, 0);
}
static void ringbuf_custom_setup(void)
{
struct ringbuf_custom_ctx *ctx = &ringbuf_custom_ctx;
const size_t page_size = getpagesize();
struct bpf_link *link;
struct ringbuf_custom *r;
void *tmp;
int err;
ctx->skel = ringbuf_setup_skeleton();
ctx->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (ctx->epoll_fd < 0) {
fprintf(stderr, "failed to create epoll fd: %d\n", -errno);
exit(1);
}
r = &ctx->ringbuf;
r->map_fd = bpf_map__fd(ctx->skel->maps.ringbuf);
r->mask = args.ringbuf_sz - 1;
/* Map writable consumer page */
tmp = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
r->map_fd, 0);
if (tmp == MAP_FAILED) {
fprintf(stderr, "failed to mmap consumer page: %d\n", -errno);
exit(1);
}
r->consumer_pos = tmp;
/* Map read-only producer page and data pages. */
tmp = mmap(NULL, page_size + 2 * args.ringbuf_sz, PROT_READ, MAP_SHARED,
r->map_fd, page_size);
if (tmp == MAP_FAILED) {
fprintf(stderr, "failed to mmap data pages: %d\n", -errno);
exit(1);
}
r->producer_pos = tmp;
r->data = tmp + page_size;
ctx->event.events = EPOLLIN;
err = epoll_ctl(ctx->epoll_fd, EPOLL_CTL_ADD, r->map_fd, &ctx->event);
if (err < 0) {
fprintf(stderr, "failed to epoll add ringbuf: %d\n", -errno);
exit(1);
}
link = bpf_program__attach(ctx->skel->progs.bench_ringbuf);
if (!link) {
fprintf(stderr, "failed to attach program\n");
exit(1);
}
}
#define RINGBUF_BUSY_BIT (1 << 31)
#define RINGBUF_DISCARD_BIT (1 << 30)
#define RINGBUF_META_LEN 8
static inline int roundup_len(__u32 len)
{
/* clear out top 2 bits */
len <<= 2;
len >>= 2;
/* add length prefix */
len += RINGBUF_META_LEN;
/* round up to 8 byte alignment */
return (len + 7) / 8 * 8;
}
static void ringbuf_custom_process_ring(struct ringbuf_custom *r)
{
unsigned long cons_pos, prod_pos;
int *len_ptr, len;
bool got_new_data;
cons_pos = smp_load_acquire(r->consumer_pos);
while (true) {
got_new_data = false;
prod_pos = smp_load_acquire(r->producer_pos);
while (cons_pos < prod_pos) {
len_ptr = r->data + (cons_pos & r->mask);
len = smp_load_acquire(len_ptr);
/* sample not committed yet, bail out for now */
if (len & RINGBUF_BUSY_BIT)
return;
got_new_data = true;
cons_pos += roundup_len(len);
atomic_inc(&buf_hits.value);
}
if (got_new_data)
smp_store_release(r->consumer_pos, cons_pos);
else
break;
}
}
static void *ringbuf_custom_consumer(void *input)
{
struct ringbuf_custom_ctx *ctx = &ringbuf_custom_ctx;
int cnt;
do {
if (args.back2back)
bufs_trigger_batch();
cnt = epoll_wait(ctx->epoll_fd, &ctx->event, 1, -1);
if (cnt > 0)
ringbuf_custom_process_ring(&ctx->ringbuf);
} while (cnt >= 0);
fprintf(stderr, "ringbuf polling failed!\n");
return 0;
}
/* PERFBUF-LIBBPF benchmark */
static struct perfbuf_libbpf_ctx {
struct perfbuf_bench *skel;
struct perf_buffer *perfbuf;
} perfbuf_libbpf_ctx;
static void perfbuf_measure(struct bench_res *res)
{
struct perfbuf_libbpf_ctx *ctx = &perfbuf_libbpf_ctx;
res->hits = atomic_swap(&buf_hits.value, 0);
res->drops = atomic_swap(&ctx->skel->bss->dropped, 0);
}
static struct perfbuf_bench *perfbuf_setup_skeleton(void)
{
struct perfbuf_bench *skel;
setup_libbpf();
skel = perfbuf_bench__open();
if (!skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
skel->rodata->batch_cnt = args.batch_cnt;
if (perfbuf_bench__load(skel)) {
fprintf(stderr, "failed to load skeleton\n");
exit(1);
}
return skel;
}
static enum bpf_perf_event_ret
perfbuf_process_sample_raw(void *input_ctx, int cpu,
struct perf_event_header *e)
{
switch (e->type) {
case PERF_RECORD_SAMPLE:
atomic_inc(&buf_hits.value);
break;
case PERF_RECORD_LOST:
break;
default:
return LIBBPF_PERF_EVENT_ERROR;
}
return LIBBPF_PERF_EVENT_CONT;
}
static void perfbuf_libbpf_setup(void)
{
struct perfbuf_libbpf_ctx *ctx = &perfbuf_libbpf_ctx;
struct perf_event_attr attr;
struct bpf_link *link;
ctx->skel = perfbuf_setup_skeleton();
memset(&attr, 0, sizeof(attr));
attr.config = PERF_COUNT_SW_BPF_OUTPUT;
attr.type = PERF_TYPE_SOFTWARE;
attr.sample_type = PERF_SAMPLE_RAW;
/* notify only every Nth sample */
if (args.sampled) {
attr.sample_period = args.sample_rate;
attr.wakeup_events = args.sample_rate;
} else {
attr.sample_period = 1;
attr.wakeup_events = 1;
}
if (args.sample_rate > args.batch_cnt) {
fprintf(stderr, "sample rate %d is too high for given batch count %d\n",
args.sample_rate, args.batch_cnt);
exit(1);
}
ctx->perfbuf = perf_buffer__new_raw(bpf_map__fd(ctx->skel->maps.perfbuf),
args.perfbuf_sz, &attr,
perfbuf_process_sample_raw, NULL, NULL);
if (!ctx->perfbuf) {
fprintf(stderr, "failed to create perfbuf\n");
exit(1);
}
link = bpf_program__attach(ctx->skel->progs.bench_perfbuf);
if (!link) {
fprintf(stderr, "failed to attach program\n");
exit(1);
}
}
static void *perfbuf_libbpf_consumer(void *input)
{
struct perfbuf_libbpf_ctx *ctx = &perfbuf_libbpf_ctx;
while (perf_buffer__poll(ctx->perfbuf, -1) >= 0) {
if (args.back2back)
bufs_trigger_batch();
}
fprintf(stderr, "perfbuf polling failed!\n");
return NULL;
}
/* PERFBUF-CUSTOM benchmark */
/* copies of internal libbpf definitions */
struct perf_cpu_buf {
struct perf_buffer *pb;
void *base; /* mmap()'ed memory */
void *buf; /* for reconstructing segmented data */
size_t buf_size;
int fd;
int cpu;
int map_key;
};
struct perf_buffer {
perf_buffer_event_fn event_cb;
perf_buffer_sample_fn sample_cb;
perf_buffer_lost_fn lost_cb;
void *ctx; /* passed into callbacks */
size_t page_size;
size_t mmap_size;
struct perf_cpu_buf **cpu_bufs;
struct epoll_event *events;
int cpu_cnt; /* number of allocated CPU buffers */
int epoll_fd; /* perf event FD */
int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
};
static void *perfbuf_custom_consumer(void *input)
{
struct perfbuf_libbpf_ctx *ctx = &perfbuf_libbpf_ctx;
struct perf_buffer *pb = ctx->perfbuf;
struct perf_cpu_buf *cpu_buf;
struct perf_event_mmap_page *header;
size_t mmap_mask = pb->mmap_size - 1;
struct perf_event_header *ehdr;
__u64 data_head, data_tail;
size_t ehdr_size;
void *base;
int i, cnt;
while (true) {
if (args.back2back)
bufs_trigger_batch();
cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, -1);
if (cnt <= 0) {
fprintf(stderr, "perf epoll failed: %d\n", -errno);
exit(1);
}
for (i = 0; i < cnt; ++i) {
cpu_buf = pb->events[i].data.ptr;
header = cpu_buf->base;
base = ((void *)header) + pb->page_size;
data_head = ring_buffer_read_head(header);
data_tail = header->data_tail;
while (data_head != data_tail) {
ehdr = base + (data_tail & mmap_mask);
ehdr_size = ehdr->size;
if (ehdr->type == PERF_RECORD_SAMPLE)
atomic_inc(&buf_hits.value);
data_tail += ehdr_size;
}
ring_buffer_write_tail(header, data_tail);
}
}
return NULL;
}
const struct bench bench_rb_libbpf = {
.name = "rb-libbpf",
.argp = &bench_ringbufs_argp,
.validate = bufs_validate,
.setup = ringbuf_libbpf_setup,
.producer_thread = bufs_sample_producer,
.consumer_thread = ringbuf_libbpf_consumer,
.measure = ringbuf_libbpf_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_rb_custom = {
.name = "rb-custom",
.argp = &bench_ringbufs_argp,
.validate = bufs_validate,
.setup = ringbuf_custom_setup,
.producer_thread = bufs_sample_producer,
.consumer_thread = ringbuf_custom_consumer,
.measure = ringbuf_custom_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_pb_libbpf = {
.name = "pb-libbpf",
.argp = &bench_ringbufs_argp,
.validate = bufs_validate,
.setup = perfbuf_libbpf_setup,
.producer_thread = bufs_sample_producer,
.consumer_thread = perfbuf_libbpf_consumer,
.measure = perfbuf_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_pb_custom = {
.name = "pb-custom",
.argp = &bench_ringbufs_argp,
.validate = bufs_validate,
.setup = perfbuf_libbpf_setup,
.producer_thread = bufs_sample_producer,
.consumer_thread = perfbuf_custom_consumer,
.measure = perfbuf_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_ringbufs.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <argp.h>
#include <sys/prctl.h>
#include "local_storage_rcu_tasks_trace_bench.skel.h"
#include "bench.h"
#include <signal.h>
static struct {
__u32 nr_procs;
__u32 kthread_pid;
} args = {
.nr_procs = 1000,
.kthread_pid = 0,
};
enum {
ARG_NR_PROCS = 7000,
ARG_KTHREAD_PID = 7001,
};
static const struct argp_option opts[] = {
{ "nr_procs", ARG_NR_PROCS, "NR_PROCS", 0,
"Set number of user processes to spin up"},
{ "kthread_pid", ARG_KTHREAD_PID, "PID", 0,
"Pid of rcu_tasks_trace kthread for ticks tracking"},
{},
};
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
long ret;
switch (key) {
case ARG_NR_PROCS:
ret = strtol(arg, NULL, 10);
if (ret < 1 || ret > UINT_MAX) {
fprintf(stderr, "invalid nr_procs\n");
argp_usage(state);
}
args.nr_procs = ret;
break;
case ARG_KTHREAD_PID:
ret = strtol(arg, NULL, 10);
if (ret < 1) {
fprintf(stderr, "invalid kthread_pid\n");
argp_usage(state);
}
args.kthread_pid = ret;
break;
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
const struct argp bench_local_storage_rcu_tasks_trace_argp = {
.options = opts,
.parser = parse_arg,
};
#define MAX_SLEEP_PROCS 150000
static void validate(void)
{
if (env.producer_cnt != 1) {
fprintf(stderr, "benchmark doesn't support multi-producer!\n");
exit(1);
}
if (env.consumer_cnt != 0) {
fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
if (args.nr_procs > MAX_SLEEP_PROCS) {
fprintf(stderr, "benchmark supports up to %u sleeper procs!\n",
MAX_SLEEP_PROCS);
exit(1);
}
}
static long kthread_pid_ticks(void)
{
char procfs_path[100];
long stime;
FILE *f;
if (!args.kthread_pid)
return -1;
sprintf(procfs_path, "/proc/%u/stat", args.kthread_pid);
f = fopen(procfs_path, "r");
if (!f) {
fprintf(stderr, "couldn't open %s, exiting\n", procfs_path);
goto err_out;
}
if (fscanf(f, "%*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %ld", &stime) != 1) {
fprintf(stderr, "fscanf of %s failed, exiting\n", procfs_path);
goto err_out;
}
fclose(f);
return stime;
err_out:
if (f)
fclose(f);
exit(1);
return 0;
}
static struct {
struct local_storage_rcu_tasks_trace_bench *skel;
long prev_kthread_stime;
} ctx;
static void sleep_and_loop(void)
{
while (true) {
sleep(rand() % 4);
syscall(__NR_getpgid);
}
}
static void local_storage_tasks_trace_setup(void)
{
int i, err, forkret, runner_pid;
runner_pid = getpid();
for (i = 0; i < args.nr_procs; i++) {
forkret = fork();
if (forkret < 0) {
fprintf(stderr, "Error forking sleeper proc %u of %u, exiting\n", i,
args.nr_procs);
goto err_out;
}
if (!forkret) {
err = prctl(PR_SET_PDEATHSIG, SIGKILL);
if (err < 0) {
fprintf(stderr, "prctl failed with err %d, exiting\n", errno);
goto err_out;
}
if (getppid() != runner_pid) {
fprintf(stderr, "Runner died while spinning up procs, exiting\n");
goto err_out;
}
sleep_and_loop();
}
}
printf("Spun up %u procs (our pid %d)\n", args.nr_procs, runner_pid);
setup_libbpf();
ctx.skel = local_storage_rcu_tasks_trace_bench__open_and_load();
if (!ctx.skel) {
fprintf(stderr, "Error doing open_and_load, exiting\n");
goto err_out;
}
ctx.prev_kthread_stime = kthread_pid_ticks();
if (!bpf_program__attach(ctx.skel->progs.get_local)) {
fprintf(stderr, "Error attaching bpf program\n");
goto err_out;
}
if (!bpf_program__attach(ctx.skel->progs.pregp_step)) {
fprintf(stderr, "Error attaching bpf program\n");
goto err_out;
}
if (!bpf_program__attach(ctx.skel->progs.postgp)) {
fprintf(stderr, "Error attaching bpf program\n");
goto err_out;
}
return;
err_out:
exit(1);
}
static void measure(struct bench_res *res)
{
long ticks;
res->gp_ct = atomic_swap(&ctx.skel->bss->gp_hits, 0);
res->gp_ns = atomic_swap(&ctx.skel->bss->gp_times, 0);
ticks = kthread_pid_ticks();
res->stime = ticks - ctx.prev_kthread_stime;
ctx.prev_kthread_stime = ticks;
}
static void *producer(void *input)
{
while (true)
syscall(__NR_getpgid);
return NULL;
}
static void report_progress(int iter, struct bench_res *res, long delta_ns)
{
if (ctx.skel->bss->unexpected) {
fprintf(stderr, "Error: Unexpected order of bpf prog calls (postgp after pregp).");
fprintf(stderr, "Data can't be trusted, exiting\n");
exit(1);
}
if (env.quiet)
return;
printf("Iter %d\t avg tasks_trace grace period latency\t%lf ns\n",
iter, res->gp_ns / (double)res->gp_ct);
printf("Iter %d\t avg ticks per tasks_trace grace period\t%lf\n",
iter, res->stime / (double)res->gp_ct);
}
static void report_final(struct bench_res res[], int res_cnt)
{
struct basic_stats gp_stat;
grace_period_latency_basic_stats(res, res_cnt, &gp_stat);
printf("SUMMARY tasks_trace grace period latency");
printf("\tavg %.3lf us\tstddev %.3lf us\n", gp_stat.mean, gp_stat.stddev);
grace_period_ticks_basic_stats(res, res_cnt, &gp_stat);
printf("SUMMARY ticks per tasks_trace grace period");
printf("\tavg %.3lf\tstddev %.3lf\n", gp_stat.mean, gp_stat.stddev);
}
/* local-storage-tasks-trace: Benchmark performance of BPF local_storage's use
* of RCU Tasks-Trace.
*
* Stress RCU Tasks Trace by forking many tasks, all of which do no work aside
* from sleep() loop, and creating/destroying BPF task-local storage on wakeup.
* The number of forked tasks is configurable.
*
* exercising code paths which call call_rcu_tasks_trace while there are many
* thousands of tasks on the system should result in RCU Tasks-Trace having to
* do a noticeable amount of work.
*
* This should be observable by measuring rcu_tasks_trace_kthread CPU usage
* after the grace period has ended, or by measuring grace period latency.
*
* This benchmark uses both approaches, attaching to rcu_tasks_trace_pregp_step
* and rcu_tasks_trace_postgp functions to measure grace period latency and
* using /proc/PID/stat to measure rcu_tasks_trace_kthread kernel ticks
*/
const struct bench bench_local_storage_tasks_trace = {
.name = "local-storage-tasks-trace",
.argp = &bench_local_storage_rcu_tasks_trace_argp,
.validate = validate,
.setup = local_storage_tasks_trace_setup,
.producer_thread = producer,
.measure = measure,
.report_progress = report_progress,
.report_final = report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
#include <argp.h>
#include "bench.h"
#include "strncmp_bench.skel.h"
static struct strncmp_ctx {
struct strncmp_bench *skel;
} ctx;
static struct strncmp_args {
u32 cmp_str_len;
} args = {
.cmp_str_len = 32,
};
enum {
ARG_CMP_STR_LEN = 5000,
};
static const struct argp_option opts[] = {
{ "cmp-str-len", ARG_CMP_STR_LEN, "CMP_STR_LEN", 0,
"Set the length of compared string" },
{},
};
static error_t strncmp_parse_arg(int key, char *arg, struct argp_state *state)
{
switch (key) {
case ARG_CMP_STR_LEN:
args.cmp_str_len = strtoul(arg, NULL, 10);
if (!args.cmp_str_len ||
args.cmp_str_len >= sizeof(ctx.skel->bss->str)) {
fprintf(stderr, "Invalid cmp str len (limit %zu)\n",
sizeof(ctx.skel->bss->str));
argp_usage(state);
}
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
const struct argp bench_strncmp_argp = {
.options = opts,
.parser = strncmp_parse_arg,
};
static void strncmp_validate(void)
{
if (env.consumer_cnt != 0) {
fprintf(stderr, "strncmp benchmark doesn't support consumer!\n");
exit(1);
}
}
static void strncmp_setup(void)
{
int err;
char *target;
size_t i, sz;
sz = sizeof(ctx.skel->rodata->target);
if (!sz || sz < sizeof(ctx.skel->bss->str)) {
fprintf(stderr, "invalid string size (target %zu, src %zu)\n",
sz, sizeof(ctx.skel->bss->str));
exit(1);
}
setup_libbpf();
ctx.skel = strncmp_bench__open();
if (!ctx.skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
srandom(time(NULL));
target = ctx.skel->rodata->target;
for (i = 0; i < sz - 1; i++)
target[i] = '1' + random() % 9;
target[sz - 1] = '\0';
ctx.skel->rodata->cmp_str_len = args.cmp_str_len;
memcpy(ctx.skel->bss->str, target, args.cmp_str_len);
ctx.skel->bss->str[args.cmp_str_len] = '\0';
/* Make bss->str < rodata->target */
ctx.skel->bss->str[args.cmp_str_len - 1] -= 1;
err = strncmp_bench__load(ctx.skel);
if (err) {
fprintf(stderr, "failed to load skeleton\n");
strncmp_bench__destroy(ctx.skel);
exit(1);
}
}
static void strncmp_attach_prog(struct bpf_program *prog)
{
struct bpf_link *link;
link = bpf_program__attach(prog);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
}
static void strncmp_no_helper_setup(void)
{
strncmp_setup();
strncmp_attach_prog(ctx.skel->progs.strncmp_no_helper);
}
static void strncmp_helper_setup(void)
{
strncmp_setup();
strncmp_attach_prog(ctx.skel->progs.strncmp_helper);
}
static void *strncmp_producer(void *ctx)
{
while (true)
(void)syscall(__NR_getpgid);
return NULL;
}
static void strncmp_measure(struct bench_res *res)
{
res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
}
const struct bench bench_strncmp_no_helper = {
.name = "strncmp-no-helper",
.argp = &bench_strncmp_argp,
.validate = strncmp_validate,
.setup = strncmp_no_helper_setup,
.producer_thread = strncmp_producer,
.measure = strncmp_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_strncmp_helper = {
.name = "strncmp-helper",
.argp = &bench_strncmp_argp,
.validate = strncmp_validate,
.setup = strncmp_helper_setup,
.producer_thread = strncmp_producer,
.measure = strncmp_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_strncmp.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
#include <argp.h>
#include <stdbool.h>
#include <pthread.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/param.h>
#include <fcntl.h>
#include "bench.h"
#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "htab_mem_bench.skel.h"
struct htab_mem_use_case {
const char *name;
const char **progs;
/* Do synchronization between addition thread and deletion thread */
bool need_sync;
};
static struct htab_mem_ctx {
const struct htab_mem_use_case *uc;
struct htab_mem_bench *skel;
pthread_barrier_t *notify;
int fd;
} ctx;
const char *ow_progs[] = {"overwrite", NULL};
const char *batch_progs[] = {"batch_add_batch_del", NULL};
const char *add_del_progs[] = {"add_only", "del_only", NULL};
const static struct htab_mem_use_case use_cases[] = {
{ .name = "overwrite", .progs = ow_progs },
{ .name = "batch_add_batch_del", .progs = batch_progs },
{ .name = "add_del_on_diff_cpu", .progs = add_del_progs, .need_sync = true },
};
static struct htab_mem_args {
u32 value_size;
const char *use_case;
bool preallocated;
} args = {
.value_size = 8,
.use_case = "overwrite",
.preallocated = false,
};
enum {
ARG_VALUE_SIZE = 10000,
ARG_USE_CASE = 10001,
ARG_PREALLOCATED = 10002,
};
static const struct argp_option opts[] = {
{ "value-size", ARG_VALUE_SIZE, "VALUE_SIZE", 0,
"Set the value size of hash map (default 8)" },
{ "use-case", ARG_USE_CASE, "USE_CASE", 0,
"Set the use case of hash map: overwrite|batch_add_batch_del|add_del_on_diff_cpu" },
{ "preallocated", ARG_PREALLOCATED, NULL, 0, "use preallocated hash map" },
{},
};
static error_t htab_mem_parse_arg(int key, char *arg, struct argp_state *state)
{
switch (key) {
case ARG_VALUE_SIZE:
args.value_size = strtoul(arg, NULL, 10);
if (args.value_size > 4096) {
fprintf(stderr, "too big value size %u\n", args.value_size);
argp_usage(state);
}
break;
case ARG_USE_CASE:
args.use_case = strdup(arg);
if (!args.use_case) {
fprintf(stderr, "no mem for use-case\n");
argp_usage(state);
}
break;
case ARG_PREALLOCATED:
args.preallocated = true;
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
const struct argp bench_htab_mem_argp = {
.options = opts,
.parser = htab_mem_parse_arg,
};
static void htab_mem_validate(void)
{
if (!strcmp(use_cases[2].name, args.use_case) && env.producer_cnt % 2) {
fprintf(stderr, "%s needs an even number of producers\n", args.use_case);
exit(1);
}
}
static int htab_mem_bench_init_barriers(void)
{
pthread_barrier_t *barriers;
unsigned int i, nr;
if (!ctx.uc->need_sync)
return 0;
nr = (env.producer_cnt + 1) / 2;
barriers = calloc(nr, sizeof(*barriers));
if (!barriers)
return -1;
/* Used for synchronization between two threads */
for (i = 0; i < nr; i++)
pthread_barrier_init(&barriers[i], NULL, 2);
ctx.notify = barriers;
return 0;
}
static void htab_mem_bench_exit_barriers(void)
{
unsigned int i, nr;
if (!ctx.notify)
return;
nr = (env.producer_cnt + 1) / 2;
for (i = 0; i < nr; i++)
pthread_barrier_destroy(&ctx.notify[i]);
free(ctx.notify);
}
static const struct htab_mem_use_case *htab_mem_find_use_case_or_exit(const char *name)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(use_cases); i++) {
if (!strcmp(name, use_cases[i].name))
return &use_cases[i];
}
fprintf(stderr, "no such use-case: %s\n", name);
fprintf(stderr, "available use case:");
for (i = 0; i < ARRAY_SIZE(use_cases); i++)
fprintf(stderr, " %s", use_cases[i].name);
fprintf(stderr, "\n");
exit(1);
}
static void htab_mem_setup(void)
{
struct bpf_map *map;
const char **names;
int err;
setup_libbpf();
ctx.uc = htab_mem_find_use_case_or_exit(args.use_case);
err = htab_mem_bench_init_barriers();
if (err) {
fprintf(stderr, "failed to init barrier\n");
exit(1);
}
ctx.fd = cgroup_setup_and_join("/htab_mem");
if (ctx.fd < 0)
goto cleanup;
ctx.skel = htab_mem_bench__open();
if (!ctx.skel) {
fprintf(stderr, "failed to open skeleton\n");
goto cleanup;
}
map = ctx.skel->maps.htab;
bpf_map__set_value_size(map, args.value_size);
/* Ensure that different CPUs can operate on different subset */
bpf_map__set_max_entries(map, MAX(8192, 64 * env.nr_cpus));
if (args.preallocated)
bpf_map__set_map_flags(map, bpf_map__map_flags(map) & ~BPF_F_NO_PREALLOC);
names = ctx.uc->progs;
while (*names) {
struct bpf_program *prog;
prog = bpf_object__find_program_by_name(ctx.skel->obj, *names);
if (!prog) {
fprintf(stderr, "no such program %s\n", *names);
goto cleanup;
}
bpf_program__set_autoload(prog, true);
names++;
}
ctx.skel->bss->nr_thread = env.producer_cnt;
err = htab_mem_bench__load(ctx.skel);
if (err) {
fprintf(stderr, "failed to load skeleton\n");
goto cleanup;
}
err = htab_mem_bench__attach(ctx.skel);
if (err) {
fprintf(stderr, "failed to attach skeleton\n");
goto cleanup;
}
return;
cleanup:
htab_mem_bench__destroy(ctx.skel);
htab_mem_bench_exit_barriers();
if (ctx.fd >= 0) {
close(ctx.fd);
cleanup_cgroup_environment();
}
exit(1);
}
static void htab_mem_add_fn(pthread_barrier_t *notify)
{
while (true) {
/* Do addition */
(void)syscall(__NR_getpgid, 0);
/* Notify deletion thread to do deletion */
pthread_barrier_wait(notify);
/* Wait for deletion to complete */
pthread_barrier_wait(notify);
}
}
static void htab_mem_delete_fn(pthread_barrier_t *notify)
{
while (true) {
/* Wait for addition to complete */
pthread_barrier_wait(notify);
/* Do deletion */
(void)syscall(__NR_getppid);
/* Notify addition thread to do addition */
pthread_barrier_wait(notify);
}
}
static void *htab_mem_producer(void *arg)
{
pthread_barrier_t *notify;
int seq;
if (!ctx.uc->need_sync) {
while (true)
(void)syscall(__NR_getpgid, 0);
return NULL;
}
seq = (long)arg;
notify = &ctx.notify[seq / 2];
if (seq & 1)
htab_mem_delete_fn(notify);
else
htab_mem_add_fn(notify);
return NULL;
}
static void htab_mem_read_mem_cgrp_file(const char *name, unsigned long *value)
{
char buf[32];
ssize_t got;
int fd;
fd = openat(ctx.fd, name, O_RDONLY);
if (fd < 0) {
/* cgroup v1 ? */
fprintf(stderr, "no %s\n", name);
*value = 0;
return;
}
got = read(fd, buf, sizeof(buf) - 1);
if (got <= 0) {
*value = 0;
return;
}
buf[got] = 0;
*value = strtoull(buf, NULL, 0);
close(fd);
}
static void htab_mem_measure(struct bench_res *res)
{
res->hits = atomic_swap(&ctx.skel->bss->op_cnt, 0) / env.producer_cnt;
htab_mem_read_mem_cgrp_file("memory.current", &res->gp_ct);
}
static void htab_mem_report_progress(int iter, struct bench_res *res, long delta_ns)
{
double loop, mem;
loop = res->hits / 1000.0 / (delta_ns / 1000000000.0);
mem = res->gp_ct / 1048576.0;
printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0);
printf("per-prod-op %7.2lfk/s, memory usage %7.2lfMiB\n", loop, mem);
}
static void htab_mem_report_final(struct bench_res res[], int res_cnt)
{
double mem_mean = 0.0, mem_stddev = 0.0;
double loop_mean = 0.0, loop_stddev = 0.0;
unsigned long peak_mem;
int i;
for (i = 0; i < res_cnt; i++) {
loop_mean += res[i].hits / 1000.0 / (0.0 + res_cnt);
mem_mean += res[i].gp_ct / 1048576.0 / (0.0 + res_cnt);
}
if (res_cnt > 1) {
for (i = 0; i < res_cnt; i++) {
loop_stddev += (loop_mean - res[i].hits / 1000.0) *
(loop_mean - res[i].hits / 1000.0) /
(res_cnt - 1.0);
mem_stddev += (mem_mean - res[i].gp_ct / 1048576.0) *
(mem_mean - res[i].gp_ct / 1048576.0) /
(res_cnt - 1.0);
}
loop_stddev = sqrt(loop_stddev);
mem_stddev = sqrt(mem_stddev);
}
htab_mem_read_mem_cgrp_file("memory.peak", &peak_mem);
printf("Summary: per-prod-op %7.2lf \u00B1 %7.2lfk/s, memory usage %7.2lf \u00B1 %7.2lfMiB,"
" peak memory usage %7.2lfMiB\n",
loop_mean, loop_stddev, mem_mean, mem_stddev, peak_mem / 1048576.0);
cleanup_cgroup_environment();
}
const struct bench bench_htab_mem = {
.name = "htab-mem",
.argp = &bench_htab_mem_argp,
.validate = htab_mem_validate,
.setup = htab_mem_setup,
.producer_thread = htab_mem_producer,
.measure = htab_mem_measure,
.report_progress = htab_mem_report_progress,
.report_final = htab_mem_report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_htab_mem.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Isovalent */
#include <sys/random.h>
#include <argp.h>
#include "bench.h"
#include "bpf_hashmap_lookup.skel.h"
#include "bpf_util.h"
/* BPF triggering benchmarks */
static struct ctx {
struct bpf_hashmap_lookup *skel;
} ctx;
/* only available to kernel, so define it here */
#define BPF_MAX_LOOPS (1<<23)
#define MAX_KEY_SIZE 1024 /* the size of the key map */
static struct {
__u32 key_size;
__u32 map_flags;
__u32 max_entries;
__u32 nr_entries;
__u32 nr_loops;
} args = {
.key_size = 4,
.map_flags = 0,
.max_entries = 1000,
.nr_entries = 500,
.nr_loops = 1000000,
};
enum {
ARG_KEY_SIZE = 8001,
ARG_MAP_FLAGS,
ARG_MAX_ENTRIES,
ARG_NR_ENTRIES,
ARG_NR_LOOPS,
};
static const struct argp_option opts[] = {
{ "key_size", ARG_KEY_SIZE, "KEY_SIZE", 0,
"The hashmap key size (max 1024)"},
{ "map_flags", ARG_MAP_FLAGS, "MAP_FLAGS", 0,
"The hashmap flags passed to BPF_MAP_CREATE"},
{ "max_entries", ARG_MAX_ENTRIES, "MAX_ENTRIES", 0,
"The hashmap max entries"},
{ "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
"The number of entries to insert/lookup"},
{ "nr_loops", ARG_NR_LOOPS, "NR_LOOPS", 0,
"The number of loops for the benchmark"},
{},
};
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
long ret;
switch (key) {
case ARG_KEY_SIZE:
ret = strtol(arg, NULL, 10);
if (ret < 1 || ret > MAX_KEY_SIZE) {
fprintf(stderr, "invalid key_size");
argp_usage(state);
}
args.key_size = ret;
break;
case ARG_MAP_FLAGS:
ret = strtol(arg, NULL, 0);
if (ret < 0 || ret > UINT_MAX) {
fprintf(stderr, "invalid map_flags");
argp_usage(state);
}
args.map_flags = ret;
break;
case ARG_MAX_ENTRIES:
ret = strtol(arg, NULL, 10);
if (ret < 1 || ret > UINT_MAX) {
fprintf(stderr, "invalid max_entries");
argp_usage(state);
}
args.max_entries = ret;
break;
case ARG_NR_ENTRIES:
ret = strtol(arg, NULL, 10);
if (ret < 1 || ret > UINT_MAX) {
fprintf(stderr, "invalid nr_entries");
argp_usage(state);
}
args.nr_entries = ret;
break;
case ARG_NR_LOOPS:
ret = strtol(arg, NULL, 10);
if (ret < 1 || ret > BPF_MAX_LOOPS) {
fprintf(stderr, "invalid nr_loops: %ld (min=1 max=%u)\n",
ret, BPF_MAX_LOOPS);
argp_usage(state);
}
args.nr_loops = ret;
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
const struct argp bench_hashmap_lookup_argp = {
.options = opts,
.parser = parse_arg,
};
static void validate(void)
{
if (env.consumer_cnt != 0) {
fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
if (args.nr_entries > args.max_entries) {
fprintf(stderr, "args.nr_entries is too big! (max %u, got %u)\n",
args.max_entries, args.nr_entries);
exit(1);
}
}
static void *producer(void *input)
{
while (true) {
/* trigger the bpf program */
syscall(__NR_getpgid);
}
return NULL;
}
static void measure(struct bench_res *res)
{
}
static inline void patch_key(u32 i, u32 *key)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
*key = i + 1;
#else
*key = __builtin_bswap32(i + 1);
#endif
/* the rest of key is random */
}
static void setup(void)
{
struct bpf_link *link;
int map_fd;
int ret;
int i;
setup_libbpf();
ctx.skel = bpf_hashmap_lookup__open();
if (!ctx.skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
bpf_map__set_max_entries(ctx.skel->maps.hash_map_bench, args.max_entries);
bpf_map__set_key_size(ctx.skel->maps.hash_map_bench, args.key_size);
bpf_map__set_value_size(ctx.skel->maps.hash_map_bench, 8);
bpf_map__set_map_flags(ctx.skel->maps.hash_map_bench, args.map_flags);
ctx.skel->bss->nr_entries = args.nr_entries;
ctx.skel->bss->nr_loops = args.nr_loops / args.nr_entries;
if (args.key_size > 4) {
for (i = 1; i < args.key_size/4; i++)
ctx.skel->bss->key[i] = 2654435761 * i;
}
ret = bpf_hashmap_lookup__load(ctx.skel);
if (ret) {
bpf_hashmap_lookup__destroy(ctx.skel);
fprintf(stderr, "failed to load map: %s", strerror(-ret));
exit(1);
}
/* fill in the hash_map */
map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench);
for (u64 i = 0; i < args.nr_entries; i++) {
patch_key(i, ctx.skel->bss->key);
bpf_map_update_elem(map_fd, ctx.skel->bss->key, &i, BPF_ANY);
}
link = bpf_program__attach(ctx.skel->progs.benchmark);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
}
static inline double events_from_time(u64 time)
{
if (time)
return args.nr_loops * 1000000000llu / time / 1000000.0L;
return 0;
}
static int compute_events(u64 *times, double *events_mean, double *events_stddev, u64 *mean_time)
{
int i, n = 0;
*events_mean = 0;
*events_stddev = 0;
*mean_time = 0;
for (i = 0; i < 32; i++) {
if (!times[i])
break;
*mean_time += times[i];
*events_mean += events_from_time(times[i]);
n += 1;
}
if (!n)
return 0;
*mean_time /= n;
*events_mean /= n;
if (n > 1) {
for (i = 0; i < n; i++) {
double events_i = *events_mean - events_from_time(times[i]);
*events_stddev += events_i * events_i / (n - 1);
}
*events_stddev = sqrt(*events_stddev);
}
return n;
}
static void hashmap_report_final(struct bench_res res[], int res_cnt)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
double events_mean, events_stddev;
u64 mean_time;
int i, n;
for (i = 0; i < nr_cpus; i++) {
n = compute_events(ctx.skel->bss->percpu_times[i], &events_mean,
&events_stddev, &mean_time);
if (n == 0)
continue;
if (env.quiet) {
/* we expect only one cpu to be present */
if (env.affinity)
printf("%.3lf\n", events_mean);
else
printf("cpu%02d %.3lf\n", i, events_mean);
} else {
printf("cpu%02d: lookup %.3lfM ± %.3lfM events/sec"
" (approximated from %d samples of ~%lums)\n",
i, events_mean, 2*events_stddev,
n, mean_time / 1000000);
}
}
}
const struct bench bench_bpf_hashmap_lookup = {
.name = "bpf-hashmap-lookup",
.argp = &bench_hashmap_lookup_argp,
.validate = validate,
.setup = setup,
.producer_thread = producer,
.measure = measure,
.report_progress = NULL,
.report_final = hashmap_report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <argp.h>
#include "bench.h"
#include "bpf_loop_bench.skel.h"
/* BPF triggering benchmarks */
static struct ctx {
struct bpf_loop_bench *skel;
} ctx;
static struct {
__u32 nr_loops;
} args = {
.nr_loops = 10,
};
enum {
ARG_NR_LOOPS = 4000,
};
static const struct argp_option opts[] = {
{ "nr_loops", ARG_NR_LOOPS, "nr_loops", 0,
"Set number of loops for the bpf_loop helper"},
{},
};
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
switch (key) {
case ARG_NR_LOOPS:
args.nr_loops = strtol(arg, NULL, 10);
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
/* exported into benchmark runner */
const struct argp bench_bpf_loop_argp = {
.options = opts,
.parser = parse_arg,
};
static void validate(void)
{
if (env.consumer_cnt != 0) {
fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
}
static void *producer(void *input)
{
while (true)
/* trigger the bpf program */
syscall(__NR_getpgid);
return NULL;
}
static void measure(struct bench_res *res)
{
res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
}
static void setup(void)
{
struct bpf_link *link;
setup_libbpf();
ctx.skel = bpf_loop_bench__open_and_load();
if (!ctx.skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
link = bpf_program__attach(ctx.skel->progs.benchmark);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
ctx.skel->bss->nr_loops = args.nr_loops;
}
const struct bench bench_bpf_loop = {
.name = "bpf-loop",
.argp = &bench_bpf_loop_argp,
.validate = validate,
.setup = setup,
.producer_thread = producer,
.measure = measure,
.report_progress = ops_report_progress,
.report_final = ops_report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_bpf_loop.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <fcntl.h>
#include "bench.h"
#include "test_overhead.skel.h"
/* BPF triggering benchmarks */
static struct ctx {
struct test_overhead *skel;
struct counter hits;
int fd;
} ctx;
static void validate(void)
{
if (env.producer_cnt != 1) {
fprintf(stderr, "benchmark doesn't support multi-producer!\n");
exit(1);
}
if (env.consumer_cnt != 0) {
fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
}
static void *producer(void *input)
{
char buf[] = "test_overhead";
int err;
while (true) {
err = write(ctx.fd, buf, sizeof(buf));
if (err < 0) {
fprintf(stderr, "write failed\n");
exit(1);
}
atomic_inc(&ctx.hits.value);
}
}
static void measure(struct bench_res *res)
{
res->hits = atomic_swap(&ctx.hits.value, 0);
}
static void setup_ctx(void)
{
setup_libbpf();
ctx.skel = test_overhead__open_and_load();
if (!ctx.skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
ctx.fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
if (ctx.fd < 0) {
fprintf(stderr, "failed to open /proc/self/comm: %d\n", -errno);
exit(1);
}
}
static void attach_bpf(struct bpf_program *prog)
{
struct bpf_link *link;
link = bpf_program__attach(prog);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
}
static void setup_base(void)
{
setup_ctx();
}
static void setup_kprobe(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.prog1);
}
static void setup_kretprobe(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.prog2);
}
static void setup_rawtp(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.prog3);
}
static void setup_fentry(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.prog4);
}
static void setup_fexit(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.prog5);
}
const struct bench bench_rename_base = {
.name = "rename-base",
.validate = validate,
.setup = setup_base,
.producer_thread = producer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_rename_kprobe = {
.name = "rename-kprobe",
.validate = validate,
.setup = setup_kprobe,
.producer_thread = producer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_rename_kretprobe = {
.name = "rename-kretprobe",
.validate = validate,
.setup = setup_kretprobe,
.producer_thread = producer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_rename_rawtp = {
.name = "rename-rawtp",
.validate = validate,
.setup = setup_rawtp,
.producer_thread = producer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_rename_fentry = {
.name = "rename-fentry",
.validate = validate,
.setup = setup_fentry,
.producer_thread = producer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_rename_fexit = {
.name = "rename-fexit",
.validate = validate,
.setup = setup_fexit,
.producer_thread = producer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
| linux-master | tools/testing/selftests/bpf/benchs/bench_rename.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/cgroup_skb.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("cgroup/skb")
__description("direct packet read test#1 for CGROUP_SKB")
__success __failure_unpriv
__msg_unpriv("invalid bpf_context access off=76 size=4")
__retval(0)
__naked void test_1_for_cgroup_skb(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_data]); \
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
r4 = *(u32*)(r1 + %[__sk_buff_len]); \
r5 = *(u32*)(r1 + %[__sk_buff_pkt_type]); \
r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
*(u32*)(r1 + %[__sk_buff_mark]) = r6; \
r7 = *(u32*)(r1 + %[__sk_buff_queue_mapping]); \
r8 = *(u32*)(r1 + %[__sk_buff_protocol]); \
r9 = *(u32*)(r1 + %[__sk_buff_vlan_present]); \
r0 = r2; \
r0 += 8; \
if r0 > r3 goto l0_%=; \
r0 = *(u8*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
__imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
__imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
__imm_const(__sk_buff_protocol, offsetof(struct __sk_buff, protocol)),
__imm_const(__sk_buff_queue_mapping, offsetof(struct __sk_buff, queue_mapping)),
__imm_const(__sk_buff_vlan_present, offsetof(struct __sk_buff, vlan_present))
: __clobber_all);
}
SEC("cgroup/skb")
__description("direct packet read test#2 for CGROUP_SKB")
__success __success_unpriv __retval(0)
__naked void test_2_for_cgroup_skb(void)
{
asm volatile (" \
r4 = *(u32*)(r1 + %[__sk_buff_vlan_tci]); \
r5 = *(u32*)(r1 + %[__sk_buff_vlan_proto]); \
r6 = *(u32*)(r1 + %[__sk_buff_priority]); \
*(u32*)(r1 + %[__sk_buff_priority]) = r6; \
r7 = *(u32*)(r1 + %[__sk_buff_ingress_ifindex]);\
r8 = *(u32*)(r1 + %[__sk_buff_tc_index]); \
r9 = *(u32*)(r1 + %[__sk_buff_hash]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_hash, offsetof(struct __sk_buff, hash)),
__imm_const(__sk_buff_ingress_ifindex, offsetof(struct __sk_buff, ingress_ifindex)),
__imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)),
__imm_const(__sk_buff_tc_index, offsetof(struct __sk_buff, tc_index)),
__imm_const(__sk_buff_vlan_proto, offsetof(struct __sk_buff, vlan_proto)),
__imm_const(__sk_buff_vlan_tci, offsetof(struct __sk_buff, vlan_tci))
: __clobber_all);
}
SEC("cgroup/skb")
__description("direct packet read test#3 for CGROUP_SKB")
__success __success_unpriv __retval(0)
__naked void test_3_for_cgroup_skb(void)
{
asm volatile (" \
r4 = *(u32*)(r1 + %[__sk_buff_cb_0]); \
r5 = *(u32*)(r1 + %[__sk_buff_cb_1]); \
r6 = *(u32*)(r1 + %[__sk_buff_cb_2]); \
r7 = *(u32*)(r1 + %[__sk_buff_cb_3]); \
r8 = *(u32*)(r1 + %[__sk_buff_cb_4]); \
r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \
*(u32*)(r1 + %[__sk_buff_cb_0]) = r4; \
*(u32*)(r1 + %[__sk_buff_cb_1]) = r5; \
*(u32*)(r1 + %[__sk_buff_cb_2]) = r6; \
*(u32*)(r1 + %[__sk_buff_cb_3]) = r7; \
*(u32*)(r1 + %[__sk_buff_cb_4]) = r8; \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])),
__imm_const(__sk_buff_cb_1, offsetof(struct __sk_buff, cb[1])),
__imm_const(__sk_buff_cb_2, offsetof(struct __sk_buff, cb[2])),
__imm_const(__sk_buff_cb_3, offsetof(struct __sk_buff, cb[3])),
__imm_const(__sk_buff_cb_4, offsetof(struct __sk_buff, cb[4])),
__imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
: __clobber_all);
}
SEC("cgroup/skb")
__description("direct packet read test#4 for CGROUP_SKB")
__success __success_unpriv __retval(0)
__naked void test_4_for_cgroup_skb(void)
{
asm volatile (" \
r2 = *(u32*)(r1 + %[__sk_buff_family]); \
r3 = *(u32*)(r1 + %[__sk_buff_remote_ip4]); \
r4 = *(u32*)(r1 + %[__sk_buff_local_ip4]); \
r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_0]); \
r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_1]); \
r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_2]); \
r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_3]); \
r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_0]); \
r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_1]); \
r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_2]); \
r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_3]); \
r7 = *(u32*)(r1 + %[__sk_buff_remote_port]); \
r8 = *(u32*)(r1 + %[__sk_buff_local_port]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_family, offsetof(struct __sk_buff, family)),
__imm_const(__sk_buff_local_ip4, offsetof(struct __sk_buff, local_ip4)),
__imm_const(__sk_buff_local_ip6_0, offsetof(struct __sk_buff, local_ip6[0])),
__imm_const(__sk_buff_local_ip6_1, offsetof(struct __sk_buff, local_ip6[1])),
__imm_const(__sk_buff_local_ip6_2, offsetof(struct __sk_buff, local_ip6[2])),
__imm_const(__sk_buff_local_ip6_3, offsetof(struct __sk_buff, local_ip6[3])),
__imm_const(__sk_buff_local_port, offsetof(struct __sk_buff, local_port)),
__imm_const(__sk_buff_remote_ip4, offsetof(struct __sk_buff, remote_ip4)),
__imm_const(__sk_buff_remote_ip6_0, offsetof(struct __sk_buff, remote_ip6[0])),
__imm_const(__sk_buff_remote_ip6_1, offsetof(struct __sk_buff, remote_ip6[1])),
__imm_const(__sk_buff_remote_ip6_2, offsetof(struct __sk_buff, remote_ip6[2])),
__imm_const(__sk_buff_remote_ip6_3, offsetof(struct __sk_buff, remote_ip6[3])),
__imm_const(__sk_buff_remote_port, offsetof(struct __sk_buff, remote_port))
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid access of tc_classid for CGROUP_SKB")
__failure __msg("invalid bpf_context access")
__failure_unpriv
__naked void tc_classid_for_cgroup_skb(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid access of data_meta for CGROUP_SKB")
__failure __msg("invalid bpf_context access")
__failure_unpriv
__naked void data_meta_for_cgroup_skb(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_data_meta]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid access of flow_keys for CGROUP_SKB")
__failure __msg("invalid bpf_context access")
__failure_unpriv
__naked void flow_keys_for_cgroup_skb(void)
{
asm volatile (" \
r0 = *(u32*)(r1 + %[__sk_buff_flow_keys]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_flow_keys, offsetof(struct __sk_buff, flow_keys))
: __clobber_all);
}
SEC("cgroup/skb")
__description("invalid write access to napi_id for CGROUP_SKB")
__failure __msg("invalid bpf_context access")
__failure_unpriv
__naked void napi_id_for_cgroup_skb(void)
{
asm volatile (" \
r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \
*(u32*)(r1 + %[__sk_buff_napi_id]) = r9; \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
: __clobber_all);
}
SEC("cgroup/skb")
__description("write tstamp from CGROUP_SKB")
__success __failure_unpriv
__msg_unpriv("invalid bpf_context access off=152 size=8")
__retval(0)
__naked void write_tstamp_from_cgroup_skb(void)
{
asm volatile (" \
r0 = 0; \
*(u64*)(r1 + %[__sk_buff_tstamp]) = r0; \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
: __clobber_all);
}
SEC("cgroup/skb")
__description("read tstamp from CGROUP_SKB")
__success __success_unpriv __retval(0)
__naked void read_tstamp_from_cgroup_skb(void)
{
asm volatile (" \
r0 = *(u64*)(r1 + %[__sk_buff_tstamp]); \
r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c |
#include "core_reloc_types.h"
void f(struct core_reloc_arrays___diff_arr_dim x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___diff_arr_dim.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <linux/pkt_cls.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include "test_iptunnel_common.h"
#include <bpf/bpf_endian.h>
static __always_inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
/* copy paste of jhash from kernel sources to make sure llvm
* can compile it into valid sequence of bpf instructions
*/
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
b -= a; b ^= rol32(a, 6); a += c; \
c -= b; c ^= rol32(b, 8); b += a; \
a -= c; a ^= rol32(c, 16); c += b; \
b -= a; b ^= rol32(a, 19); a += c; \
c -= b; c ^= rol32(b, 4); b += a; \
}
#define __jhash_final(a, b, c) \
{ \
c ^= b; c -= rol32(b, 14); \
a ^= c; a -= rol32(c, 11); \
b ^= a; b -= rol32(a, 25); \
c ^= b; c -= rol32(b, 16); \
a ^= c; a -= rol32(c, 4); \
b ^= a; b -= rol32(a, 14); \
c ^= b; c -= rol32(b, 24); \
}
#define JHASH_INITVAL 0xdeadbeef
typedef unsigned int u32;
static __noinline u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const unsigned char *k = key;
a = b = c = JHASH_INITVAL + length + initval;
while (length > 12) {
a += *(u32 *)(k);
b += *(u32 *)(k + 4);
c += *(u32 *)(k + 8);
__jhash_mix(a, b, c);
length -= 12;
k += 12;
}
switch (length) {
case 12: c += (u32)k[11]<<24;
case 11: c += (u32)k[10]<<16;
case 10: c += (u32)k[9]<<8;
case 9: c += k[8];
case 8: b += (u32)k[7]<<24;
case 7: b += (u32)k[6]<<16;
case 6: b += (u32)k[5]<<8;
case 5: b += k[4];
case 4: a += (u32)k[3]<<24;
case 3: a += (u32)k[2]<<16;
case 2: a += (u32)k[1]<<8;
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
break;
}
return c;
}
static __noinline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
b += initval;
c += initval;
__jhash_final(a, b, c);
return c;
}
static __noinline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
#define PCKT_FRAGMENTED 65343
#define IPV4_HDR_LEN_NO_OPT 20
#define IPV4_PLUS_ICMP_HDR 28
#define IPV6_PLUS_ICMP_HDR 48
#define RING_SIZE 2
#define MAX_VIPS 12
#define MAX_REALS 5
#define CTL_MAP_SIZE 16
#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE)
#define F_IPV6 (1 << 0)
#define F_HASH_NO_SRC_PORT (1 << 0)
#define F_ICMP (1 << 0)
#define F_SYN_SET (1 << 1)
struct packet_description {
union {
__be32 src;
__be32 srcv6[4];
};
union {
__be32 dst;
__be32 dstv6[4];
};
union {
__u32 ports;
__u16 port16[2];
};
__u8 proto;
__u8 flags;
};
struct ctl_value {
union {
__u64 value;
__u32 ifindex;
__u8 mac[6];
};
};
struct vip_meta {
__u32 flags;
__u32 vip_num;
};
struct real_definition {
union {
__be32 dst;
__be32 dstv6[4];
};
__u8 flags;
};
struct vip_stats {
__u64 bytes;
__u64 pkts;
};
struct eth_hdr {
unsigned char eth_dest[ETH_ALEN];
unsigned char eth_source[ETH_ALEN];
unsigned short eth_proto;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_VIPS);
__type(key, struct vip);
__type(value, struct vip_meta);
} vip_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, CH_RINGS_SIZE);
__type(key, __u32);
__type(value, __u32);
} ch_rings SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, MAX_REALS);
__type(key, __u32);
__type(value, struct real_definition);
} reals SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, MAX_VIPS);
__type(key, __u32);
__type(value, struct vip_stats);
} stats SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, CTL_MAP_SIZE);
__type(key, __u32);
__type(value, struct ctl_value);
} ctl_array SEC(".maps");
static __noinline __u32 get_packet_hash(struct packet_description *pckt, bool ipv6)
{
if (ipv6)
return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
pckt->ports, CH_RINGS_SIZE);
else
return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
}
static __noinline bool get_packet_dst(struct real_definition **real,
struct packet_description *pckt,
struct vip_meta *vip_info,
bool is_ipv6)
{
__u32 hash = get_packet_hash(pckt, is_ipv6);
__u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE;
__u32 *real_pos;
if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
return false;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
return false;
key = *real_pos;
*real = bpf_map_lookup_elem(&reals, &key);
if (!(*real))
return false;
return true;
}
static __noinline int parse_icmpv6(void *data, void *data_end, __u64 off,
struct packet_description *pckt)
{
struct icmp6hdr *icmp_hdr;
struct ipv6hdr *ip6h;
icmp_hdr = data + off;
if (icmp_hdr + 1 > data_end)
return TC_ACT_SHOT;
if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG)
return TC_ACT_OK;
off += sizeof(struct icmp6hdr);
ip6h = data + off;
if (ip6h + 1 > data_end)
return TC_ACT_SHOT;
pckt->proto = ip6h->nexthdr;
pckt->flags |= F_ICMP;
memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16);
memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16);
return TC_ACT_UNSPEC;
}
static __noinline int parse_icmp(void *data, void *data_end, __u64 off,
struct packet_description *pckt)
{
struct icmphdr *icmp_hdr;
struct iphdr *iph;
icmp_hdr = data + off;
if (icmp_hdr + 1 > data_end)
return TC_ACT_SHOT;
if (icmp_hdr->type != ICMP_DEST_UNREACH ||
icmp_hdr->code != ICMP_FRAG_NEEDED)
return TC_ACT_OK;
off += sizeof(struct icmphdr);
iph = data + off;
if (iph + 1 > data_end)
return TC_ACT_SHOT;
if (iph->ihl != 5)
return TC_ACT_SHOT;
pckt->proto = iph->protocol;
pckt->flags |= F_ICMP;
pckt->src = iph->daddr;
pckt->dst = iph->saddr;
return TC_ACT_UNSPEC;
}
static __noinline bool parse_udp(void *data, __u64 off, void *data_end,
struct packet_description *pckt)
{
struct udphdr *udp;
udp = data + off;
if (udp + 1 > data_end)
return false;
if (!(pckt->flags & F_ICMP)) {
pckt->port16[0] = udp->source;
pckt->port16[1] = udp->dest;
} else {
pckt->port16[0] = udp->dest;
pckt->port16[1] = udp->source;
}
return true;
}
static __noinline bool parse_tcp(void *data, __u64 off, void *data_end,
struct packet_description *pckt)
{
struct tcphdr *tcp;
tcp = data + off;
if (tcp + 1 > data_end)
return false;
if (tcp->syn)
pckt->flags |= F_SYN_SET;
if (!(pckt->flags & F_ICMP)) {
pckt->port16[0] = tcp->source;
pckt->port16[1] = tcp->dest;
} else {
pckt->port16[0] = tcp->dest;
pckt->port16[1] = tcp->source;
}
return true;
}
static __noinline int process_packet(void *data, __u64 off, void *data_end,
bool is_ipv6, struct __sk_buff *skb)
{
void *pkt_start = (void *)(long)skb->data;
struct packet_description pckt = {};
struct eth_hdr *eth = pkt_start;
struct bpf_tunnel_key tkey = {};
struct vip_stats *data_stats;
struct real_definition *dst;
struct vip_meta *vip_info;
struct ctl_value *cval;
__u32 v4_intf_pos = 1;
__u32 v6_intf_pos = 2;
struct ipv6hdr *ip6h;
struct vip vip = {};
struct iphdr *iph;
int tun_flag = 0;
__u16 pkt_bytes;
__u64 iph_len;
__u32 ifindex;
__u8 protocol;
__u32 vip_num;
int action;
tkey.tunnel_ttl = 64;
if (is_ipv6) {
ip6h = data + off;
if (ip6h + 1 > data_end)
return TC_ACT_SHOT;
iph_len = sizeof(struct ipv6hdr);
protocol = ip6h->nexthdr;
pckt.proto = protocol;
pkt_bytes = bpf_ntohs(ip6h->payload_len);
off += iph_len;
if (protocol == IPPROTO_FRAGMENT) {
return TC_ACT_SHOT;
} else if (protocol == IPPROTO_ICMPV6) {
action = parse_icmpv6(data, data_end, off, &pckt);
if (action >= 0)
return action;
off += IPV6_PLUS_ICMP_HDR;
} else {
memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16);
memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16);
}
} else {
iph = data + off;
if (iph + 1 > data_end)
return TC_ACT_SHOT;
if (iph->ihl != 5)
return TC_ACT_SHOT;
protocol = iph->protocol;
pckt.proto = protocol;
pkt_bytes = bpf_ntohs(iph->tot_len);
off += IPV4_HDR_LEN_NO_OPT;
if (iph->frag_off & PCKT_FRAGMENTED)
return TC_ACT_SHOT;
if (protocol == IPPROTO_ICMP) {
action = parse_icmp(data, data_end, off, &pckt);
if (action >= 0)
return action;
off += IPV4_PLUS_ICMP_HDR;
} else {
pckt.src = iph->saddr;
pckt.dst = iph->daddr;
}
}
protocol = pckt.proto;
if (protocol == IPPROTO_TCP) {
if (!parse_tcp(data, off, data_end, &pckt))
return TC_ACT_SHOT;
} else if (protocol == IPPROTO_UDP) {
if (!parse_udp(data, off, data_end, &pckt))
return TC_ACT_SHOT;
} else {
return TC_ACT_SHOT;
}
if (is_ipv6)
memcpy(vip.daddr.v6, pckt.dstv6, 16);
else
vip.daddr.v4 = pckt.dst;
vip.dport = pckt.port16[1];
vip.protocol = pckt.proto;
vip_info = bpf_map_lookup_elem(&vip_map, &vip);
if (!vip_info) {
vip.dport = 0;
vip_info = bpf_map_lookup_elem(&vip_map, &vip);
if (!vip_info)
return TC_ACT_SHOT;
pckt.port16[1] = 0;
}
if (vip_info->flags & F_HASH_NO_SRC_PORT)
pckt.port16[0] = 0;
if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6))
return TC_ACT_SHOT;
if (dst->flags & F_IPV6) {
cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos);
if (!cval)
return TC_ACT_SHOT;
ifindex = cval->ifindex;
memcpy(tkey.remote_ipv6, dst->dstv6, 16);
tun_flag = BPF_F_TUNINFO_IPV6;
} else {
cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos);
if (!cval)
return TC_ACT_SHOT;
ifindex = cval->ifindex;
tkey.remote_ipv4 = dst->dst;
}
vip_num = vip_info->vip_num;
data_stats = bpf_map_lookup_elem(&stats, &vip_num);
if (!data_stats)
return TC_ACT_SHOT;
data_stats->pkts++;
data_stats->bytes += pkt_bytes;
bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
*(u32 *)eth->eth_dest = tkey.remote_ipv4;
return bpf_redirect(ifindex, 0);
}
SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct eth_hdr *eth = data;
__u32 eth_proto;
__u32 nh_off;
nh_off = sizeof(struct eth_hdr);
if (data + nh_off > data_end)
return TC_ACT_SHOT;
eth_proto = eth->eth_proto;
if (eth_proto == bpf_htons(ETH_P_IP))
return process_packet(data, nh_off, data_end, false, ctx);
else if (eth_proto == bpf_htons(ETH_P_IPV6))
return process_packet(data, nh_off, data_end, true, ctx);
else
return TC_ACT_SHOT;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_l4lb_noinline.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
{
return skb->len;
}
__attribute__ ((noinline))
int f2(int val, struct __sk_buff *skb)
{
return f1(skb) + val;
}
__attribute__ ((noinline))
int f3(int val, struct __sk_buff *skb, int var)
{
return f2(var, skb) + val;
}
__attribute__ ((noinline))
int f4(struct __sk_buff *skb)
{
return f3(1, skb, 2);
}
__attribute__ ((noinline))
int f5(struct __sk_buff *skb)
{
return f4(skb);
}
__attribute__ ((noinline))
int f6(struct __sk_buff *skb)
{
return f5(skb);
}
__attribute__ ((noinline))
int f7(struct __sk_buff *skb)
{
return f6(skb);
}
__attribute__ ((noinline))
int f8(struct __sk_buff *skb)
{
return f7(skb);
}
SEC("tc")
__failure __msg("the call stack of 8 frames")
int global_func3(struct __sk_buff *skb)
{
return f8(skb);
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func3.c |
#include "core_reloc_types.h"
void f(struct core_reloc_arrays___err_non_array x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_non_array.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
volatile __u64 test_fmod_ret = 0;
SEC("fmod_ret/security_new_get_constant")
int BPF_PROG(fmod_ret_test, long val, int ret)
{
test_fmod_ret = 1;
return 120;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/fmod_ret_freplace.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <stdbool.h>
#ifdef ENABLE_ATOMICS_TESTS
bool skip_tests __attribute((__section__(".data"))) = false;
#else
bool skip_tests = true;
#endif
__u32 pid = 0;
__u64 add64_value = 1;
__u64 add64_result = 0;
__u32 add32_value = 1;
__u32 add32_result = 0;
__u64 add_stack_value_copy = 0;
__u64 add_stack_result = 0;
__u64 add_noreturn_value = 1;
SEC("raw_tp/sys_enter")
int add(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
#ifdef ENABLE_ATOMICS_TESTS
__u64 add_stack_value = 1;
add64_result = __sync_fetch_and_add(&add64_value, 2);
add32_result = __sync_fetch_and_add(&add32_value, 2);
add_stack_result = __sync_fetch_and_add(&add_stack_value, 2);
add_stack_value_copy = add_stack_value;
__sync_fetch_and_add(&add_noreturn_value, 2);
#endif
return 0;
}
__s64 sub64_value = 1;
__s64 sub64_result = 0;
__s32 sub32_value = 1;
__s32 sub32_result = 0;
__s64 sub_stack_value_copy = 0;
__s64 sub_stack_result = 0;
__s64 sub_noreturn_value = 1;
SEC("raw_tp/sys_enter")
int sub(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
#ifdef ENABLE_ATOMICS_TESTS
__u64 sub_stack_value = 1;
sub64_result = __sync_fetch_and_sub(&sub64_value, 2);
sub32_result = __sync_fetch_and_sub(&sub32_value, 2);
sub_stack_result = __sync_fetch_and_sub(&sub_stack_value, 2);
sub_stack_value_copy = sub_stack_value;
__sync_fetch_and_sub(&sub_noreturn_value, 2);
#endif
return 0;
}
__u64 and64_value = (0x110ull << 32);
__u64 and64_result = 0;
__u32 and32_value = 0x110;
__u32 and32_result = 0;
__u64 and_noreturn_value = (0x110ull << 32);
SEC("raw_tp/sys_enter")
int and(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
#ifdef ENABLE_ATOMICS_TESTS
and64_result = __sync_fetch_and_and(&and64_value, 0x011ull << 32);
and32_result = __sync_fetch_and_and(&and32_value, 0x011);
__sync_fetch_and_and(&and_noreturn_value, 0x011ull << 32);
#endif
return 0;
}
__u64 or64_value = (0x110ull << 32);
__u64 or64_result = 0;
__u32 or32_value = 0x110;
__u32 or32_result = 0;
__u64 or_noreturn_value = (0x110ull << 32);
SEC("raw_tp/sys_enter")
int or(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
#ifdef ENABLE_ATOMICS_TESTS
or64_result = __sync_fetch_and_or(&or64_value, 0x011ull << 32);
or32_result = __sync_fetch_and_or(&or32_value, 0x011);
__sync_fetch_and_or(&or_noreturn_value, 0x011ull << 32);
#endif
return 0;
}
__u64 xor64_value = (0x110ull << 32);
__u64 xor64_result = 0;
__u32 xor32_value = 0x110;
__u32 xor32_result = 0;
__u64 xor_noreturn_value = (0x110ull << 32);
SEC("raw_tp/sys_enter")
int xor(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
#ifdef ENABLE_ATOMICS_TESTS
xor64_result = __sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
xor32_result = __sync_fetch_and_xor(&xor32_value, 0x011);
__sync_fetch_and_xor(&xor_noreturn_value, 0x011ull << 32);
#endif
return 0;
}
__u64 cmpxchg64_value = 1;
__u64 cmpxchg64_result_fail = 0;
__u64 cmpxchg64_result_succeed = 0;
__u32 cmpxchg32_value = 1;
__u32 cmpxchg32_result_fail = 0;
__u32 cmpxchg32_result_succeed = 0;
SEC("raw_tp/sys_enter")
int cmpxchg(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
#ifdef ENABLE_ATOMICS_TESTS
cmpxchg64_result_fail = __sync_val_compare_and_swap(&cmpxchg64_value, 0, 3);
cmpxchg64_result_succeed = __sync_val_compare_and_swap(&cmpxchg64_value, 1, 2);
cmpxchg32_result_fail = __sync_val_compare_and_swap(&cmpxchg32_value, 0, 3);
cmpxchg32_result_succeed = __sync_val_compare_and_swap(&cmpxchg32_value, 1, 2);
#endif
return 0;
}
__u64 xchg64_value = 1;
__u64 xchg64_result = 0;
__u32 xchg32_value = 1;
__u32 xchg32_result = 0;
SEC("raw_tp/sys_enter")
int xchg(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
#ifdef ENABLE_ATOMICS_TESTS
__u64 val64 = 2;
__u32 val32 = 2;
xchg64_result = __sync_lock_test_and_set(&xchg64_value, val64);
xchg32_result = __sync_lock_test_and_set(&xchg32_value, val32);
#endif
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/atomics.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/usdt.bpf.h>
char _license[] SEC("license") = "GPL";
int count;
SEC("usdt")
int usdt0(struct pt_regs *ctx)
{
count++;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c |
#include "core_reloc_types.h"
void f(struct core_reloc_type_based x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_type_based.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
#undef SOCKMAP
#define TEST_MAP_TYPE BPF_MAP_TYPE_SOCKHASH
#include "./test_sockmap_kern.h"
| linux-master | tools/testing/selftests/bpf/progs/test_sockhash_kern.c |
#include "core_reloc_types.h"
void f(struct core_reloc_arrays___diff_arr_val_sz x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___diff_arr_val_sz.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct S {
int x;
};
struct C {
int x;
int y;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct S);
} map SEC(".maps");
enum E {
E_ITEM
};
static int global_data_x = 100;
static int volatile global_data_y = 500;
__noinline int foo(const struct S *s)
{
if (s)
return bpf_get_prandom_u32() < s->x;
return 0;
}
__noinline int bar(int *x)
{
if (x)
*x &= bpf_get_prandom_u32();
return 0;
}
__noinline int baz(volatile int *x)
{
if (x)
*x &= bpf_get_prandom_u32();
return 0;
}
__noinline int qux(enum E *e)
{
if (e)
return *e;
return 0;
}
__noinline int quux(int (*arr)[10])
{
if (arr)
return (*arr)[9];
return 0;
}
__noinline int quuz(int **p)
{
if (p)
*p = NULL;
return 0;
}
SEC("cgroup_skb/ingress")
__success
int global_func9(struct __sk_buff *skb)
{
int result = 0;
{
const struct S s = {.x = skb->len };
result |= foo(&s);
}
{
const __u32 key = 1;
const struct S *s = bpf_map_lookup_elem(&map, &key);
result |= foo(s);
}
{
const struct C c = {.x = skb->len, .y = skb->family };
result |= foo((const struct S *)&c);
}
{
result |= foo(NULL);
}
{
bar(&result);
bar(&global_data_x);
}
{
result |= baz(&global_data_y);
}
{
enum E e = E_ITEM;
result |= qux(&e);
}
{
int array[10] = {0};
result |= quux(&array);
}
{
int *p;
result |= quuz(&p);
}
return result ? 1 : 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func9.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#define ATTR __always_inline
#include "test_jhash.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, u32);
__type(value, u32);
__uint(max_entries, 256);
} array1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, u32);
__type(value, u32);
__uint(max_entries, 256);
} array2 SEC(".maps");
static __noinline int randmap(int v, const struct net_device *dev)
{
struct bpf_map *map = (struct bpf_map *)&array1;
int key = bpf_get_prandom_u32() & 0xff;
int *val;
if (bpf_get_prandom_u32() & 1)
map = (struct bpf_map *)&array2;
val = bpf_map_lookup_elem(map, &key);
if (val)
*val = bpf_get_prandom_u32() + v + dev->mtu;
return 0;
}
SEC("tp_btf/xdp_devmap_xmit")
int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device
*from_dev, const struct net_device *to_dev, int sent, int drops,
int err)
{
return randmap(from_dev->ifindex, from_dev);
}
SEC("fentry/eth_type_trans")
int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb,
struct net_device *dev, unsigned short protocol)
{
return randmap(dev->ifindex + skb->len, dev);
}
SEC("fexit/eth_type_trans")
int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb,
struct net_device *dev, unsigned short protocol)
{
return randmap(dev->ifindex + skb->len, dev);
}
volatile const int never;
struct __sk_bUfF /* it will not exist in vmlinux */ {
int len;
} __attribute__((preserve_access_index));
struct bpf_testmod_test_read_ctx /* it exists in bpf_testmod */ {
size_t len;
} __attribute__((preserve_access_index));
SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
void *ptr;
int nh_off, i = 0;
nh_off = 14;
/* pragma unroll doesn't work on large loops */
#define C do { \
ptr = data + i; \
if (ptr + nh_off > data_end) \
break; \
ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
if (never) { \
/* below is a dead code with unresolvable CO-RE relo */ \
i += ((struct __sk_bUfF *)ctx)->len; \
/* this CO-RE relo may or may not resolve
* depending on whether bpf_testmod is loaded.
*/ \
i += ((struct bpf_testmod_test_read_ctx *)ctx)->len; \
} \
} while (0);
#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
C30;C30;C30; /* 90 calls */
return 0;
}
typedef int (*func_proto_typedef___match)(long);
typedef int (*func_proto_typedef___doesnt_match)(char *);
typedef int (*func_proto_typedef_nested1)(func_proto_typedef___match);
int proto_out[3];
SEC("raw_tracepoint/sys_enter")
int core_relo_proto(void *ctx)
{
proto_out[0] = bpf_core_type_exists(func_proto_typedef___match);
proto_out[1] = bpf_core_type_exists(func_proto_typedef___doesnt_match);
proto_out[2] = bpf_core_type_exists(func_proto_typedef_nested1);
return 0;
}
char LICENSE[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/core_kern.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("fentry/bpf_fentry_test_sinfo")
__description("typedef: resolve")
__success __retval(0)
__naked void resolve_typedef(void)
{
asm volatile (" \
r1 = *(u64 *)(r1 +0); \
r2 = *(u64 *)(r1 +%[frags_offs]); \
r0 = 0; \
exit; \
" :
: __imm_const(frags_offs,
offsetof(struct skb_shared_info, frags))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_typedef.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
// Copyright (c) 2019 Facebook
#define STROBE_MAX_INTS 2
#define STROBE_MAX_STRS 25
#define STROBE_MAX_MAPS 13
#define STROBE_MAX_MAP_ENTRIES 20
#define NO_UNROLL
#define SUBPROGS
#include "strobemeta.h"
| linux-master | tools/testing/selftests/bpf/progs/strobemeta_subprogs.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct key_t {
int a;
int b;
int c;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 3);
__type(key, struct key_t);
__type(value, __u64);
} hashmap1 SEC(".maps");
__u32 key_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx)
{
void *key = ctx->key;
if (key == (void *)0)
return 0;
/* out of bound access w.r.t. hashmap1 */
key_sum += *(__u32 *)(key + sizeof(struct key_t));
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <string.h>
#include <linux/tcp.h>
#include <netinet/in.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "GPL";
__u32 page_size = 0;
SEC("cgroup/setsockopt")
int sockopt_qos_to_cc(struct bpf_sockopt *ctx)
{
void *optval_end = ctx->optval_end;
int *optval = ctx->optval;
char buf[TCP_CA_NAME_MAX];
char cc_reno[TCP_CA_NAME_MAX] = "reno";
char cc_cubic[TCP_CA_NAME_MAX] = "cubic";
if (ctx->level != SOL_IPV6 || ctx->optname != IPV6_TCLASS)
goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
if (bpf_getsockopt(ctx->sk, SOL_TCP, TCP_CONGESTION, &buf, sizeof(buf)))
return 0;
if (!tcp_cc_eq(buf, cc_cubic))
return 0;
if (*optval == 0x2d) {
if (bpf_setsockopt(ctx->sk, SOL_TCP, TCP_CONGESTION, &cc_reno,
sizeof(cc_reno)))
return 0;
}
return 1;
out:
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c |
#include "core_reloc_types.h"
void f(struct core_reloc_size___diff_sz x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
#include <linux/bpf.h>
#include <stdint.h>
#define TWFW_MAX_TIERS (64)
/*
* load is successful
* #define TWFW_MAX_TIERS (64u)$
*/
struct twfw_tier_value {
unsigned long mask[1];
};
struct rule {
uint8_t seqnum;
};
struct rules_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, struct rule);
__uint(max_entries, 1);
};
struct tiers_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, struct twfw_tier_value);
__uint(max_entries, 1);
};
struct rules_map rules SEC(".maps");
struct tiers_map tiers SEC(".maps");
SEC("cgroup_skb/ingress")
int twfw_verifier(struct __sk_buff* skb)
{
const uint32_t key = 0;
const struct twfw_tier_value* tier = bpf_map_lookup_elem(&tiers, &key);
if (!tier)
return 1;
struct rule* rule = bpf_map_lookup_elem(&rules, &key);
if (!rule)
return 1;
if (rule && rule->seqnum < TWFW_MAX_TIERS) {
/* rule->seqnum / 64 should always be 0 */
unsigned long mask = tier->mask[rule->seqnum / 64];
if (mask)
return 0;
}
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/twfw.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
int ca1_cnt = 0;
int ca2_cnt = 0;
static inline struct tcp_sock *tcp_sk(const struct sock *sk)
{
return (struct tcp_sock *)sk;
}
SEC("struct_ops/ca_update_1_init")
void BPF_PROG(ca_update_1_init, struct sock *sk)
{
ca1_cnt++;
}
SEC("struct_ops/ca_update_2_init")
void BPF_PROG(ca_update_2_init, struct sock *sk)
{
ca2_cnt++;
}
SEC("struct_ops/ca_update_cong_control")
void BPF_PROG(ca_update_cong_control, struct sock *sk,
const struct rate_sample *rs)
{
}
SEC("struct_ops/ca_update_ssthresh")
__u32 BPF_PROG(ca_update_ssthresh, struct sock *sk)
{
return tcp_sk(sk)->snd_ssthresh;
}
SEC("struct_ops/ca_update_undo_cwnd")
__u32 BPF_PROG(ca_update_undo_cwnd, struct sock *sk)
{
return tcp_sk(sk)->snd_cwnd;
}
SEC(".struct_ops.link")
struct tcp_congestion_ops ca_update_1 = {
.init = (void *)ca_update_1_init,
.cong_control = (void *)ca_update_cong_control,
.ssthresh = (void *)ca_update_ssthresh,
.undo_cwnd = (void *)ca_update_undo_cwnd,
.name = "tcp_ca_update",
};
SEC(".struct_ops.link")
struct tcp_congestion_ops ca_update_2 = {
.init = (void *)ca_update_2_init,
.cong_control = (void *)ca_update_cong_control,
.ssthresh = (void *)ca_update_ssthresh,
.undo_cwnd = (void *)ca_update_undo_cwnd,
.name = "tcp_ca_update",
};
SEC(".struct_ops.link")
struct tcp_congestion_ops ca_wrong = {
.cong_control = (void *)ca_update_cong_control,
.ssthresh = (void *)ca_update_ssthresh,
.undo_cwnd = (void *)ca_update_undo_cwnd,
.name = "tcp_ca_wrong",
};
SEC(".struct_ops")
struct tcp_congestion_ops ca_no_link = {
.cong_control = (void *)ca_update_cong_control,
.ssthresh = (void *)ca_update_ssthresh,
.undo_cwnd = (void *)ca_update_undo_cwnd,
.name = "tcp_ca_no_link",
};
| linux-master | tools/testing/selftests/bpf/progs/tcp_ca_update.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <sys/types.h>
#include <sys/socket.h>
#include "cgroup_tcp_skb.h"
char _license[] SEC("license") = "GPL";
__u16 g_sock_port = 0;
__u32 g_sock_state = 0;
int g_unexpected = 0;
__u32 g_packet_count = 0;
int needed_tcp_pkt(struct __sk_buff *skb, struct tcphdr *tcph)
{
struct ipv6hdr ip6h;
if (skb->protocol != bpf_htons(ETH_P_IPV6))
return 0;
if (bpf_skb_load_bytes(skb, 0, &ip6h, sizeof(ip6h)))
return 0;
if (ip6h.nexthdr != IPPROTO_TCP)
return 0;
if (bpf_skb_load_bytes(skb, sizeof(ip6h), tcph, sizeof(*tcph)))
return 0;
if (tcph->source != bpf_htons(g_sock_port) &&
tcph->dest != bpf_htons(g_sock_port))
return 0;
return 1;
}
/* Run accept() on a socket in the cgroup to receive a new connection. */
static int egress_accept(struct tcphdr *tcph)
{
if (g_sock_state == SYN_RECV_SENDING_SYN_ACK) {
if (tcph->fin || !tcph->syn || !tcph->ack)
g_unexpected++;
else
g_sock_state = SYN_RECV;
return 1;
}
return 0;
}
static int ingress_accept(struct tcphdr *tcph)
{
switch (g_sock_state) {
case INIT:
if (!tcph->syn || tcph->fin || tcph->ack)
g_unexpected++;
else
g_sock_state = SYN_RECV_SENDING_SYN_ACK;
break;
case SYN_RECV:
if (tcph->fin || tcph->syn || !tcph->ack)
g_unexpected++;
else
g_sock_state = ESTABLISHED;
break;
default:
return 0;
}
return 1;
}
/* Run connect() on a socket in the cgroup to start a new connection. */
static int egress_connect(struct tcphdr *tcph)
{
if (g_sock_state == INIT) {
if (!tcph->syn || tcph->fin || tcph->ack)
g_unexpected++;
else
g_sock_state = SYN_SENT;
return 1;
}
return 0;
}
static int ingress_connect(struct tcphdr *tcph)
{
if (g_sock_state == SYN_SENT) {
if (tcph->fin || !tcph->syn || !tcph->ack)
g_unexpected++;
else
g_sock_state = ESTABLISHED;
return 1;
}
return 0;
}
/* The connection is closed by the peer outside the cgroup. */
static int egress_close_remote(struct tcphdr *tcph)
{
switch (g_sock_state) {
case ESTABLISHED:
break;
case CLOSE_WAIT_SENDING_ACK:
if (tcph->fin || tcph->syn || !tcph->ack)
g_unexpected++;
else
g_sock_state = CLOSE_WAIT;
break;
case CLOSE_WAIT:
if (!tcph->fin)
g_unexpected++;
else
g_sock_state = LAST_ACK;
break;
default:
return 0;
}
return 1;
}
static int ingress_close_remote(struct tcphdr *tcph)
{
switch (g_sock_state) {
case ESTABLISHED:
if (tcph->fin)
g_sock_state = CLOSE_WAIT_SENDING_ACK;
break;
case LAST_ACK:
if (tcph->fin || tcph->syn || !tcph->ack)
g_unexpected++;
else
g_sock_state = CLOSED;
break;
default:
return 0;
}
return 1;
}
/* The connection is closed by the endpoint inside the cgroup. */
static int egress_close_local(struct tcphdr *tcph)
{
switch (g_sock_state) {
case ESTABLISHED:
if (tcph->fin)
g_sock_state = FIN_WAIT1;
break;
case TIME_WAIT_SENDING_ACK:
if (tcph->fin || tcph->syn || !tcph->ack)
g_unexpected++;
else
g_sock_state = TIME_WAIT;
break;
default:
return 0;
}
return 1;
}
static int ingress_close_local(struct tcphdr *tcph)
{
switch (g_sock_state) {
case ESTABLISHED:
break;
case FIN_WAIT1:
if (tcph->fin || tcph->syn || !tcph->ack)
g_unexpected++;
else
g_sock_state = FIN_WAIT2;
break;
case FIN_WAIT2:
if (!tcph->fin || tcph->syn || !tcph->ack)
g_unexpected++;
else
g_sock_state = TIME_WAIT_SENDING_ACK;
break;
default:
return 0;
}
return 1;
}
/* Check the types of outgoing packets of a server socket to make sure they
* are consistent with the state of the server socket.
*
* The connection is closed by the client side.
*/
SEC("cgroup_skb/egress")
int server_egress(struct __sk_buff *skb)
{
struct tcphdr tcph;
if (!needed_tcp_pkt(skb, &tcph))
return 1;
g_packet_count++;
/* Egress of the server socket. */
if (egress_accept(&tcph) || egress_close_remote(&tcph))
return 1;
g_unexpected++;
return 1;
}
/* Check the types of incoming packets of a server socket to make sure they
* are consistent with the state of the server socket.
*
* The connection is closed by the client side.
*/
SEC("cgroup_skb/ingress")
int server_ingress(struct __sk_buff *skb)
{
struct tcphdr tcph;
if (!needed_tcp_pkt(skb, &tcph))
return 1;
g_packet_count++;
/* Ingress of the server socket. */
if (ingress_accept(&tcph) || ingress_close_remote(&tcph))
return 1;
g_unexpected++;
return 1;
}
/* Check the types of outgoing packets of a server socket to make sure they
* are consistent with the state of the server socket.
*
* The connection is closed by the server side.
*/
SEC("cgroup_skb/egress")
int server_egress_srv(struct __sk_buff *skb)
{
struct tcphdr tcph;
if (!needed_tcp_pkt(skb, &tcph))
return 1;
g_packet_count++;
/* Egress of the server socket. */
if (egress_accept(&tcph) || egress_close_local(&tcph))
return 1;
g_unexpected++;
return 1;
}
/* Check the types of incoming packets of a server socket to make sure they
* are consistent with the state of the server socket.
*
* The connection is closed by the server side.
*/
SEC("cgroup_skb/ingress")
int server_ingress_srv(struct __sk_buff *skb)
{
struct tcphdr tcph;
if (!needed_tcp_pkt(skb, &tcph))
return 1;
g_packet_count++;
/* Ingress of the server socket. */
if (ingress_accept(&tcph) || ingress_close_local(&tcph))
return 1;
g_unexpected++;
return 1;
}
/* Check the types of outgoing packets of a client socket to make sure they
* are consistent with the state of the client socket.
*
* The connection is closed by the server side.
*/
SEC("cgroup_skb/egress")
int client_egress_srv(struct __sk_buff *skb)
{
struct tcphdr tcph;
if (!needed_tcp_pkt(skb, &tcph))
return 1;
g_packet_count++;
/* Egress of the server socket. */
if (egress_connect(&tcph) || egress_close_remote(&tcph))
return 1;
g_unexpected++;
return 1;
}
/* Check the types of incoming packets of a client socket to make sure they
* are consistent with the state of the client socket.
*
* The connection is closed by the server side.
*/
SEC("cgroup_skb/ingress")
int client_ingress_srv(struct __sk_buff *skb)
{
struct tcphdr tcph;
if (!needed_tcp_pkt(skb, &tcph))
return 1;
g_packet_count++;
/* Ingress of the server socket. */
if (ingress_connect(&tcph) || ingress_close_remote(&tcph))
return 1;
g_unexpected++;
return 1;
}
/* Check the types of outgoing packets of a client socket to make sure they
* are consistent with the state of the client socket.
*
* The connection is closed by the client side.
*/
SEC("cgroup_skb/egress")
int client_egress(struct __sk_buff *skb)
{
struct tcphdr tcph;
if (!needed_tcp_pkt(skb, &tcph))
return 1;
g_packet_count++;
/* Egress of the server socket. */
if (egress_connect(&tcph) || egress_close_local(&tcph))
return 1;
g_unexpected++;
return 1;
}
/* Check the types of incoming packets of a client socket to make sure they
* are consistent with the state of the client socket.
*
* The connection is closed by the client side.
*/
SEC("cgroup_skb/ingress")
int client_ingress(struct __sk_buff *skb)
{
struct tcphdr tcph;
if (!needed_tcp_pkt(skb, &tcph))
return 1;
g_packet_count++;
/* Ingress of the server socket. */
if (ingress_connect(&tcph) || ingress_close_local(&tcph))
return 1;
g_unexpected++;
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/cgroup_tcp_skb.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <errno.h>
int my_tid;
__u64 kprobe_res;
__u64 kprobe_multi_res;
__u64 kretprobe_res;
__u64 uprobe_res;
__u64 uretprobe_res;
__u64 tp_res;
__u64 pe_res;
__u64 fentry_res;
__u64 fexit_res;
__u64 fmod_ret_res;
__u64 lsm_res;
static void update(void *ctx, __u64 *res)
{
if (my_tid != (u32)bpf_get_current_pid_tgid())
return;
*res |= bpf_get_attach_cookie(ctx);
}
SEC("kprobe")
int handle_kprobe(struct pt_regs *ctx)
{
update(ctx, &kprobe_res);
return 0;
}
SEC("kretprobe")
int handle_kretprobe(struct pt_regs *ctx)
{
update(ctx, &kretprobe_res);
return 0;
}
SEC("uprobe")
int handle_uprobe(struct pt_regs *ctx)
{
update(ctx, &uprobe_res);
return 0;
}
SEC("uretprobe")
int handle_uretprobe(struct pt_regs *ctx)
{
update(ctx, &uretprobe_res);
return 0;
}
/* bpf_prog_array, used by kernel internally to keep track of attached BPF
* programs to a given BPF hook (e.g., for tracepoints) doesn't allow the same
* BPF program to be attached multiple times. So have three identical copies
* ready to attach to the same tracepoint.
*/
SEC("tp/syscalls/sys_enter_nanosleep")
int handle_tp1(struct pt_regs *ctx)
{
update(ctx, &tp_res);
return 0;
}
SEC("tp/syscalls/sys_enter_nanosleep")
int handle_tp2(struct pt_regs *ctx)
{
update(ctx, &tp_res);
return 0;
}
SEC("tp/syscalls/sys_enter_nanosleep")
int handle_tp3(void *ctx)
{
update(ctx, &tp_res);
return 1;
}
SEC("perf_event")
int handle_pe(struct pt_regs *ctx)
{
update(ctx, &pe_res);
return 0;
}
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(fentry_test1, int a)
{
update(ctx, &fentry_res);
return 0;
}
SEC("fexit/bpf_fentry_test1")
int BPF_PROG(fexit_test1, int a, int ret)
{
update(ctx, &fexit_res);
return 0;
}
SEC("fmod_ret/bpf_modify_return_test")
int BPF_PROG(fmod_ret_test, int _a, int *_b, int _ret)
{
update(ctx, &fmod_ret_res);
return 1234;
}
SEC("lsm/file_mprotect")
int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
unsigned long reqprot, unsigned long prot, int ret)
{
if (my_tid != (u32)bpf_get_current_pid_tgid())
return ret;
update(ctx, &lsm_res);
return -EPERM;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_bpf_cookie.c |
#include <linux/stddef.h>
#include <linux/ipv6.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
SEC("freplace/do_bind")
int new_do_bind(struct bpf_sock_addr *ctx)
{
struct sockaddr_in sa = {};
bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa));
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/freplace_connect4.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG);
__type(key, __u32);
__type(value, char);
} rdonly_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(map_flags, BPF_F_MMAPABLE);
__type(key, __u32);
__type(value, __u64);
} data_map SEC(".maps");
__u64 in_val = 0;
__u64 out_val = 0;
SEC("raw_tracepoint/sys_enter")
int test_mmap(void *ctx)
{
int zero = 0, one = 1, two = 2, far = 1500;
__u64 val, *p;
out_val = in_val;
/* data_map[2] = in_val; */
bpf_map_update_elem(&data_map, &two, (const void *)&in_val, 0);
/* data_map[1] = data_map[0] * 2; */
p = bpf_map_lookup_elem(&data_map, &zero);
if (p) {
val = (*p) * 2;
bpf_map_update_elem(&data_map, &one, &val, 0);
}
/* data_map[far] = in_val * 3; */
val = in_val * 3;
bpf_map_update_elem(&data_map, &far, &val, 0);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_mmap.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
extern const void bpf_fentry_test1 __ksym;
extern const void bpf_fentry_test2 __ksym;
extern const void bpf_fentry_test3 __ksym;
extern const void bpf_fentry_test4 __ksym;
extern const void bpf_modify_return_test __ksym;
extern const void bpf_fentry_test6 __ksym;
extern const void bpf_fentry_test7 __ksym;
extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
/* This function is here to have CONFIG_X86_KERNEL_IBT
* used and added to object BTF.
*/
int unused(void)
{
return CONFIG_X86_KERNEL_IBT ? 0 : 1;
}
__u64 test1_result = 0;
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(test1, int a)
{
__u64 addr = bpf_get_func_ip(ctx);
test1_result = (const void *) addr == &bpf_fentry_test1;
return 0;
}
__u64 test2_result = 0;
SEC("fexit/bpf_fentry_test2")
int BPF_PROG(test2, int a)
{
__u64 addr = bpf_get_func_ip(ctx);
test2_result = (const void *) addr == &bpf_fentry_test2;
return 0;
}
__u64 test3_result = 0;
SEC("kprobe/bpf_fentry_test3")
int test3(struct pt_regs *ctx)
{
__u64 addr = bpf_get_func_ip(ctx);
test3_result = (const void *) addr == &bpf_fentry_test3;
return 0;
}
__u64 test4_result = 0;
SEC("kretprobe/bpf_fentry_test4")
int BPF_KRETPROBE(test4)
{
__u64 addr = bpf_get_func_ip(ctx);
test4_result = (const void *) addr == &bpf_fentry_test4;
return 0;
}
__u64 test5_result = 0;
SEC("fmod_ret/bpf_modify_return_test")
int BPF_PROG(test5, int a, int *b, int ret)
{
__u64 addr = bpf_get_func_ip(ctx);
test5_result = (const void *) addr == &bpf_modify_return_test;
return ret;
}
__u64 test6_result = 0;
SEC("?kprobe")
int test6(struct pt_regs *ctx)
{
__u64 addr = bpf_get_func_ip(ctx);
test6_result = (const void *) addr == 0;
return 0;
}
unsigned long uprobe_trigger;
__u64 test7_result = 0;
SEC("uprobe//proc/self/exe:uprobe_trigger")
int BPF_UPROBE(test7)
{
__u64 addr = bpf_get_func_ip(ctx);
test7_result = (const void *) addr == (const void *) uprobe_trigger;
return 0;
}
__u64 test8_result = 0;
SEC("uretprobe//proc/self/exe:uprobe_trigger")
int BPF_URETPROBE(test8, int ret)
{
__u64 addr = bpf_get_func_ip(ctx);
test8_result = (const void *) addr == (const void *) uprobe_trigger;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/get_func_ip_test.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#define AF_INET6 10
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} sockops_netns_cookies SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} sk_msg_netns_cookies SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 2);
__type(key, __u32);
__type(value, __u64);
} sock_map SEC(".maps");
SEC("sockops")
int get_netns_cookie_sockops(struct bpf_sock_ops *ctx)
{
struct bpf_sock *sk = ctx->sk;
int *cookie;
__u32 key = 0;
if (ctx->family != AF_INET6)
return 1;
if (!sk)
return 1;
switch (ctx->op) {
case BPF_SOCK_OPS_TCP_CONNECT_CB:
cookie = bpf_sk_storage_get(&sockops_netns_cookies, sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!cookie)
return 1;
*cookie = bpf_get_netns_cookie(ctx);
break;
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
bpf_sock_map_update(ctx, &sock_map, &key, BPF_NOEXIST);
break;
default:
break;
}
return 1;
}
SEC("sk_msg")
int get_netns_cookie_sk_msg(struct sk_msg_md *msg)
{
struct bpf_sock *sk = msg->sk;
int *cookie;
if (msg->family != AF_INET6)
return 1;
if (!sk)
return 1;
cookie = bpf_sk_storage_get(&sk_msg_netns_cookies, sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!cookie)
return 1;
*cookie = bpf_get_netns_cookie(msg);
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/netns_cookie_prog.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
__noinline int foo(int (*arr)[10])
{
if (arr)
return (*arr)[9];
return 0;
}
SEC("cgroup_skb/ingress")
__failure __msg("invalid indirect read from stack")
int global_func16(struct __sk_buff *skb)
{
int array[10];
const int rv = foo(&array);
return rv ? 1 : 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_global_func16.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <linux/pkt_cls.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
static __always_inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
/* copy paste of jhash from kernel sources to make sure llvm
* can compile it into valid sequence of bpf instructions
*/
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
b -= a; b ^= rol32(a, 6); a += c; \
c -= b; c ^= rol32(b, 8); b += a; \
a -= c; a ^= rol32(c, 16); c += b; \
b -= a; b ^= rol32(a, 19); a += c; \
c -= b; c ^= rol32(b, 4); b += a; \
}
#define __jhash_final(a, b, c) \
{ \
c ^= b; c -= rol32(b, 14); \
a ^= c; a -= rol32(c, 11); \
b ^= a; b -= rol32(a, 25); \
c ^= b; c -= rol32(b, 16); \
a ^= c; a -= rol32(c, 4); \
b ^= a; b -= rol32(a, 14); \
c ^= b; c -= rol32(b, 24); \
}
#define JHASH_INITVAL 0xdeadbeef
typedef unsigned int u32;
static __noinline
u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const unsigned char *k = key;
a = b = c = JHASH_INITVAL + length + initval;
while (length > 12) {
a += *(u32 *)(k);
b += *(u32 *)(k + 4);
c += *(u32 *)(k + 8);
__jhash_mix(a, b, c);
length -= 12;
k += 12;
}
switch (length) {
case 12: c += (u32)k[11]<<24;
case 11: c += (u32)k[10]<<16;
case 10: c += (u32)k[9]<<8;
case 9: c += k[8];
case 8: b += (u32)k[7]<<24;
case 7: b += (u32)k[6]<<16;
case 6: b += (u32)k[5]<<8;
case 5: b += k[4];
case 4: a += (u32)k[3]<<24;
case 3: a += (u32)k[2]<<16;
case 2: a += (u32)k[1]<<8;
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
break;
}
return c;
}
__noinline
u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
b += initval;
c += initval;
__jhash_final(a, b, c);
return c;
}
__noinline
u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
struct flow_key {
union {
__be32 src;
__be32 srcv6[4];
};
union {
__be32 dst;
__be32 dstv6[4];
};
union {
__u32 ports;
__u16 port16[2];
};
__u8 proto;
};
struct packet_description {
struct flow_key flow;
__u8 flags;
};
struct ctl_value {
union {
__u64 value;
__u32 ifindex;
__u8 mac[6];
};
};
struct vip_definition {
union {
__be32 vip;
__be32 vipv6[4];
};
__u16 port;
__u16 family;
__u8 proto;
};
struct vip_meta {
__u32 flags;
__u32 vip_num;
};
struct real_pos_lru {
__u32 pos;
__u64 atime;
};
struct real_definition {
union {
__be32 dst;
__be32 dstv6[4];
};
__u8 flags;
};
struct lb_stats {
__u64 v2;
__u64 v1;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 512);
__type(key, struct vip_definition);
__type(value, struct vip_meta);
} vip_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, 300);
__uint(map_flags, 1U << 1);
__type(key, struct flow_key);
__type(value, struct real_pos_lru);
} lru_cache SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 12 * 655);
__type(key, __u32);
__type(value, __u32);
} ch_rings SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 40);
__type(key, __u32);
__type(value, struct real_definition);
} reals SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 515);
__type(key, __u32);
__type(value, struct lb_stats);
} stats SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 16);
__type(key, __u32);
__type(value, struct ctl_value);
} ctl_array SEC(".maps");
struct eth_hdr {
unsigned char eth_dest[6];
unsigned char eth_source[6];
unsigned short eth_proto;
};
static __noinline __u64 calc_offset(bool is_ipv6, bool is_icmp)
{
__u64 off = sizeof(struct eth_hdr);
if (is_ipv6) {
off += sizeof(struct ipv6hdr);
if (is_icmp)
off += sizeof(struct icmp6hdr) + sizeof(struct ipv6hdr);
} else {
off += sizeof(struct iphdr);
if (is_icmp)
off += sizeof(struct icmphdr) + sizeof(struct iphdr);
}
return off;
}
static __attribute__ ((noinline))
bool parse_udp(void *data, void *data_end,
bool is_ipv6, struct packet_description *pckt)
{
bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
__u64 off = calc_offset(is_ipv6, is_icmp);
struct udphdr *udp;
udp = data + off;
if (udp + 1 > data_end)
return false;
if (!is_icmp) {
pckt->flow.port16[0] = udp->source;
pckt->flow.port16[1] = udp->dest;
} else {
pckt->flow.port16[0] = udp->dest;
pckt->flow.port16[1] = udp->source;
}
return true;
}
static __attribute__ ((noinline))
bool parse_tcp(void *data, void *data_end,
bool is_ipv6, struct packet_description *pckt)
{
bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
__u64 off = calc_offset(is_ipv6, is_icmp);
struct tcphdr *tcp;
tcp = data + off;
if (tcp + 1 > data_end)
return false;
if (tcp->syn)
pckt->flags |= (1 << 1);
if (!is_icmp) {
pckt->flow.port16[0] = tcp->source;
pckt->flow.port16[1] = tcp->dest;
} else {
pckt->flow.port16[0] = tcp->dest;
pckt->flow.port16[1] = tcp->source;
}
return true;
}
static __attribute__ ((noinline))
bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
struct packet_description *pckt,
struct real_definition *dst, __u32 pkt_bytes)
{
struct eth_hdr *new_eth;
struct eth_hdr *old_eth;
struct ipv6hdr *ip6h;
__u32 ip_suffix;
void *data_end;
void *data;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
return false;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
ip6h = data + sizeof(struct eth_hdr);
old_eth = data + sizeof(struct ipv6hdr);
if (new_eth + 1 > data_end ||
old_eth + 1 > data_end || ip6h + 1 > data_end)
return false;
memcpy(new_eth->eth_dest, cval->mac, 6);
memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
new_eth->eth_proto = 56710;
ip6h->version = 6;
ip6h->priority = 0;
memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
ip6h->nexthdr = IPPROTO_IPV6;
ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0];
ip6h->payload_len =
bpf_htons(pkt_bytes + sizeof(struct ipv6hdr));
ip6h->hop_limit = 4;
ip6h->saddr.in6_u.u6_addr32[0] = 1;
ip6h->saddr.in6_u.u6_addr32[1] = 2;
ip6h->saddr.in6_u.u6_addr32[2] = 3;
ip6h->saddr.in6_u.u6_addr32[3] = ip_suffix;
memcpy(ip6h->daddr.in6_u.u6_addr32, dst->dstv6, 16);
return true;
}
static __attribute__ ((noinline))
bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
struct packet_description *pckt,
struct real_definition *dst, __u32 pkt_bytes)
{
__u32 ip_suffix = bpf_ntohs(pckt->flow.port16[0]);
struct eth_hdr *new_eth;
struct eth_hdr *old_eth;
__u16 *next_iph_u16;
struct iphdr *iph;
__u32 csum = 0;
void *data_end;
void *data;
ip_suffix <<= 15;
ip_suffix ^= pckt->flow.src;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
return false;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
iph = data + sizeof(struct eth_hdr);
old_eth = data + sizeof(struct iphdr);
if (new_eth + 1 > data_end ||
old_eth + 1 > data_end || iph + 1 > data_end)
return false;
memcpy(new_eth->eth_dest, cval->mac, 6);
memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
new_eth->eth_proto = 8;
iph->version = 4;
iph->ihl = 5;
iph->frag_off = 0;
iph->protocol = IPPROTO_IPIP;
iph->check = 0;
iph->tos = 1;
iph->tot_len = bpf_htons(pkt_bytes + sizeof(struct iphdr));
/* don't update iph->daddr, since it will overwrite old eth_proto
* and multiple iterations of bpf_prog_run() will fail
*/
iph->saddr = ((0xFFFF0000 & ip_suffix) | 4268) ^ dst->dst;
iph->ttl = 4;
next_iph_u16 = (__u16 *) iph;
#pragma clang loop unroll(full)
for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
csum += *next_iph_u16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
return false;
return true;
}
static __attribute__ ((noinline))
int swap_mac_and_send(void *data, void *data_end)
{
unsigned char tmp_mac[6];
struct eth_hdr *eth;
eth = data;
memcpy(tmp_mac, eth->eth_source, 6);
memcpy(eth->eth_source, eth->eth_dest, 6);
memcpy(eth->eth_dest, tmp_mac, 6);
return XDP_TX;
}
static __attribute__ ((noinline))
int send_icmp_reply(void *data, void *data_end)
{
struct icmphdr *icmp_hdr;
__u16 *next_iph_u16;
__u32 tmp_addr = 0;
struct iphdr *iph;
__u32 csum = 0;
__u64 off = 0;
if (data + sizeof(struct eth_hdr)
+ sizeof(struct iphdr) + sizeof(struct icmphdr) > data_end)
return XDP_DROP;
off += sizeof(struct eth_hdr);
iph = data + off;
off += sizeof(struct iphdr);
icmp_hdr = data + off;
icmp_hdr->type = 0;
icmp_hdr->checksum += 0x0007;
iph->ttl = 4;
tmp_addr = iph->daddr;
iph->daddr = iph->saddr;
iph->saddr = tmp_addr;
iph->check = 0;
next_iph_u16 = (__u16 *) iph;
#pragma clang loop unroll(full)
for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
csum += *next_iph_u16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
return swap_mac_and_send(data, data_end);
}
static __attribute__ ((noinline))
int send_icmp6_reply(void *data, void *data_end)
{
struct icmp6hdr *icmp_hdr;
struct ipv6hdr *ip6h;
__be32 tmp_addr[4];
__u64 off = 0;
if (data + sizeof(struct eth_hdr)
+ sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) > data_end)
return XDP_DROP;
off += sizeof(struct eth_hdr);
ip6h = data + off;
off += sizeof(struct ipv6hdr);
icmp_hdr = data + off;
icmp_hdr->icmp6_type = 129;
icmp_hdr->icmp6_cksum -= 0x0001;
ip6h->hop_limit = 4;
memcpy(tmp_addr, ip6h->saddr.in6_u.u6_addr32, 16);
memcpy(ip6h->saddr.in6_u.u6_addr32, ip6h->daddr.in6_u.u6_addr32, 16);
memcpy(ip6h->daddr.in6_u.u6_addr32, tmp_addr, 16);
return swap_mac_and_send(data, data_end);
}
static __attribute__ ((noinline))
int parse_icmpv6(void *data, void *data_end, __u64 off,
struct packet_description *pckt)
{
struct icmp6hdr *icmp_hdr;
struct ipv6hdr *ip6h;
icmp_hdr = data + off;
if (icmp_hdr + 1 > data_end)
return XDP_DROP;
if (icmp_hdr->icmp6_type == 128)
return send_icmp6_reply(data, data_end);
if (icmp_hdr->icmp6_type != 3)
return XDP_PASS;
off += sizeof(struct icmp6hdr);
ip6h = data + off;
if (ip6h + 1 > data_end)
return XDP_DROP;
pckt->flow.proto = ip6h->nexthdr;
pckt->flags |= (1 << 0);
memcpy(pckt->flow.srcv6, ip6h->daddr.in6_u.u6_addr32, 16);
memcpy(pckt->flow.dstv6, ip6h->saddr.in6_u.u6_addr32, 16);
return -1;
}
static __attribute__ ((noinline))
int parse_icmp(void *data, void *data_end, __u64 off,
struct packet_description *pckt)
{
struct icmphdr *icmp_hdr;
struct iphdr *iph;
icmp_hdr = data + off;
if (icmp_hdr + 1 > data_end)
return XDP_DROP;
if (icmp_hdr->type == 8)
return send_icmp_reply(data, data_end);
if ((icmp_hdr->type != 3) || (icmp_hdr->code != 4))
return XDP_PASS;
off += sizeof(struct icmphdr);
iph = data + off;
if (iph + 1 > data_end)
return XDP_DROP;
if (iph->ihl != 5)
return XDP_DROP;
pckt->flow.proto = iph->protocol;
pckt->flags |= (1 << 0);
pckt->flow.src = iph->daddr;
pckt->flow.dst = iph->saddr;
return -1;
}
static __attribute__ ((noinline))
__u32 get_packet_hash(struct packet_description *pckt,
bool hash_16bytes)
{
if (hash_16bytes)
return jhash_2words(jhash(pckt->flow.srcv6, 16, 12),
pckt->flow.ports, 24);
else
return jhash_2words(pckt->flow.src, pckt->flow.ports,
24);
}
__attribute__ ((noinline))
static bool get_packet_dst(struct real_definition **real,
struct packet_description *pckt,
struct vip_meta *vip_info,
bool is_ipv6, void *lru_map)
{
struct real_pos_lru new_dst_lru = { };
bool hash_16bytes = is_ipv6;
__u32 *real_pos, hash, key;
__u64 cur_time;
if (vip_info->flags & (1 << 2))
hash_16bytes = 1;
if (vip_info->flags & (1 << 3)) {
pckt->flow.port16[0] = pckt->flow.port16[1];
memset(pckt->flow.srcv6, 0, 16);
}
hash = get_packet_hash(pckt, hash_16bytes);
if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
return false;
key = 2 * vip_info->vip_num + hash % 2;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
return false;
key = *real_pos;
*real = bpf_map_lookup_elem(&reals, &key);
if (!(*real))
return false;
if (!(vip_info->flags & (1 << 1))) {
__u32 conn_rate_key = 512 + 2;
struct lb_stats *conn_rate_stats =
bpf_map_lookup_elem(&stats, &conn_rate_key);
if (!conn_rate_stats)
return true;
cur_time = bpf_ktime_get_ns();
if ((cur_time - conn_rate_stats->v2) >> 32 > 0xffFFFF) {
conn_rate_stats->v1 = 1;
conn_rate_stats->v2 = cur_time;
} else {
conn_rate_stats->v1 += 1;
if (conn_rate_stats->v1 >= 1)
return true;
}
if (pckt->flow.proto == IPPROTO_UDP)
new_dst_lru.atime = cur_time;
new_dst_lru.pos = key;
bpf_map_update_elem(lru_map, &pckt->flow, &new_dst_lru, 0);
}
return true;
}
__attribute__ ((noinline))
static void connection_table_lookup(struct real_definition **real,
struct packet_description *pckt,
void *lru_map)
{
struct real_pos_lru *dst_lru;
__u64 cur_time;
__u32 key;
dst_lru = bpf_map_lookup_elem(lru_map, &pckt->flow);
if (!dst_lru)
return;
if (pckt->flow.proto == IPPROTO_UDP) {
cur_time = bpf_ktime_get_ns();
if (cur_time - dst_lru->atime > 300000)
return;
dst_lru->atime = cur_time;
}
key = dst_lru->pos;
*real = bpf_map_lookup_elem(&reals, &key);
}
/* don't believe your eyes!
* below function has 6 arguments whereas bpf and llvm allow maximum of 5
* but since it's _static_ llvm can optimize one argument away
*/
__attribute__ ((noinline))
static int process_l3_headers_v6(struct packet_description *pckt,
__u8 *protocol, __u64 off,
__u16 *pkt_bytes, void *data,
void *data_end)
{
struct ipv6hdr *ip6h;
__u64 iph_len;
int action;
ip6h = data + off;
if (ip6h + 1 > data_end)
return XDP_DROP;
iph_len = sizeof(struct ipv6hdr);
*protocol = ip6h->nexthdr;
pckt->flow.proto = *protocol;
*pkt_bytes = bpf_ntohs(ip6h->payload_len);
off += iph_len;
if (*protocol == 45) {
return XDP_DROP;
} else if (*protocol == 59) {
action = parse_icmpv6(data, data_end, off, pckt);
if (action >= 0)
return action;
} else {
memcpy(pckt->flow.srcv6, ip6h->saddr.in6_u.u6_addr32, 16);
memcpy(pckt->flow.dstv6, ip6h->daddr.in6_u.u6_addr32, 16);
}
return -1;
}
__attribute__ ((noinline))
static int process_l3_headers_v4(struct packet_description *pckt,
__u8 *protocol, __u64 off,
__u16 *pkt_bytes, void *data,
void *data_end)
{
struct iphdr *iph;
int action;
iph = data + off;
if (iph + 1 > data_end)
return XDP_DROP;
if (iph->ihl != 5)
return XDP_DROP;
*protocol = iph->protocol;
pckt->flow.proto = *protocol;
*pkt_bytes = bpf_ntohs(iph->tot_len);
off += 20;
if (iph->frag_off & 65343)
return XDP_DROP;
if (*protocol == IPPROTO_ICMP) {
action = parse_icmp(data, data_end, off, pckt);
if (action >= 0)
return action;
} else {
pckt->flow.src = iph->saddr;
pckt->flow.dst = iph->daddr;
}
return -1;
}
__attribute__ ((noinline))
static int process_packet(void *data, __u64 off, void *data_end,
bool is_ipv6, struct xdp_md *xdp)
{
struct real_definition *dst = NULL;
struct packet_description pckt = { };
struct vip_definition vip = { };
struct lb_stats *data_stats;
void *lru_map = &lru_cache;
struct vip_meta *vip_info;
__u32 lru_stats_key = 513;
__u32 mac_addr_pos = 0;
__u32 stats_key = 512;
struct ctl_value *cval;
__u16 pkt_bytes;
__u8 protocol;
__u32 vip_num;
int action;
if (is_ipv6)
action = process_l3_headers_v6(&pckt, &protocol, off,
&pkt_bytes, data, data_end);
else
action = process_l3_headers_v4(&pckt, &protocol, off,
&pkt_bytes, data, data_end);
if (action >= 0)
return action;
protocol = pckt.flow.proto;
if (protocol == IPPROTO_TCP) {
if (!parse_tcp(data, data_end, is_ipv6, &pckt))
return XDP_DROP;
} else if (protocol == IPPROTO_UDP) {
if (!parse_udp(data, data_end, is_ipv6, &pckt))
return XDP_DROP;
} else {
return XDP_TX;
}
if (is_ipv6)
memcpy(vip.vipv6, pckt.flow.dstv6, 16);
else
vip.vip = pckt.flow.dst;
vip.port = pckt.flow.port16[1];
vip.proto = pckt.flow.proto;
vip_info = bpf_map_lookup_elem(&vip_map, &vip);
if (!vip_info) {
vip.port = 0;
vip_info = bpf_map_lookup_elem(&vip_map, &vip);
if (!vip_info)
return XDP_PASS;
if (!(vip_info->flags & (1 << 4)))
pckt.flow.port16[1] = 0;
}
if (data_end - data > 1400)
return XDP_DROP;
data_stats = bpf_map_lookup_elem(&stats, &stats_key);
if (!data_stats)
return XDP_DROP;
data_stats->v1 += 1;
if (!dst) {
if (vip_info->flags & (1 << 0))
pckt.flow.port16[0] = 0;
if (!(pckt.flags & (1 << 1)) && !(vip_info->flags & (1 << 1)))
connection_table_lookup(&dst, &pckt, lru_map);
if (dst)
goto out;
if (pckt.flow.proto == IPPROTO_TCP) {
struct lb_stats *lru_stats =
bpf_map_lookup_elem(&stats, &lru_stats_key);
if (!lru_stats)
return XDP_DROP;
if (pckt.flags & (1 << 1))
lru_stats->v1 += 1;
else
lru_stats->v2 += 1;
}
if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6, lru_map))
return XDP_DROP;
data_stats->v2 += 1;
}
out:
cval = bpf_map_lookup_elem(&ctl_array, &mac_addr_pos);
if (!cval)
return XDP_DROP;
if (dst->flags & (1 << 0)) {
if (!encap_v6(xdp, cval, &pckt, dst, pkt_bytes))
return XDP_DROP;
} else {
if (!encap_v4(xdp, cval, &pckt, dst, pkt_bytes))
return XDP_DROP;
}
vip_num = vip_info->vip_num;
data_stats = bpf_map_lookup_elem(&stats, &vip_num);
if (!data_stats)
return XDP_DROP;
data_stats->v1 += 1;
data_stats->v2 += pkt_bytes;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
if (data + 4 > data_end)
return XDP_DROP;
*(u32 *)data = dst->dst;
return XDP_DROP;
}
SEC("xdp")
int balancer_ingress_v4(struct xdp_md *ctx)
{
void *data = (void *)(long)ctx->data;
void *data_end = (void *)(long)ctx->data_end;
struct eth_hdr *eth = data;
__u32 eth_proto;
__u32 nh_off;
nh_off = sizeof(struct eth_hdr);
if (data + nh_off > data_end)
return XDP_DROP;
eth_proto = bpf_ntohs(eth->eth_proto);
if (eth_proto == ETH_P_IP)
return process_packet(data, nh_off, data_end, 0, ctx);
else
return XDP_DROP;
}
SEC("xdp")
int balancer_ingress_v6(struct xdp_md *ctx)
{
void *data = (void *)(long)ctx->data;
void *data_end = (void *)(long)ctx->data_end;
struct eth_hdr *eth = data;
__u32 eth_proto;
__u32 nh_off;
nh_off = sizeof(struct eth_hdr);
if (data + nh_off > data_end)
return XDP_DROP;
eth_proto = bpf_ntohs(eth->eth_proto);
if (eth_proto == ETH_P_IPV6)
return process_packet(data, nh_off, data_end, 1, ctx);
else
return XDP_DROP;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_xdp_noinline.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Yafang Shao <[email protected]> */
#include "vmlinux.h"
#include <bpf/bpf_tracing.h>
char tp_name[128];
SEC("lsm/bpf")
int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size)
{
switch (cmd) {
case BPF_RAW_TRACEPOINT_OPEN:
bpf_probe_read_user_str(tp_name, sizeof(tp_name) - 1,
(void *)attr->raw_tracepoint.name);
break;
default:
break;
}
return 0;
}
SEC("raw_tracepoint")
int BPF_PROG(raw_tp_run)
{
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_ptr_untrusted.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <limits.h>
#include <linux/errno.h>
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
const volatile __s64 exp_empty_zero = 0 + 1;
__s64 res_empty_zero;
SEC("raw_tp/sys_enter")
int num_empty_zero(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, 0, 0) sum += i;
res_empty_zero = 1 + sum;
return 0;
}
const volatile __s64 exp_empty_int_min = 0 + 2;
__s64 res_empty_int_min;
SEC("raw_tp/sys_enter")
int num_empty_int_min(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, INT_MIN, INT_MIN) sum += i;
res_empty_int_min = 2 + sum;
return 0;
}
const volatile __s64 exp_empty_int_max = 0 + 3;
__s64 res_empty_int_max;
SEC("raw_tp/sys_enter")
int num_empty_int_max(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, INT_MAX, INT_MAX) sum += i;
res_empty_int_max = 3 + sum;
return 0;
}
const volatile __s64 exp_empty_minus_one = 0 + 4;
__s64 res_empty_minus_one;
SEC("raw_tp/sys_enter")
int num_empty_minus_one(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, -1, -1) sum += i;
res_empty_minus_one = 4 + sum;
return 0;
}
const volatile __s64 exp_simple_sum = 9 * 10 / 2;
__s64 res_simple_sum;
SEC("raw_tp/sys_enter")
int num_simple_sum(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, 0, 10) sum += i;
res_simple_sum = sum;
return 0;
}
const volatile __s64 exp_neg_sum = -11 * 10 / 2;
__s64 res_neg_sum;
SEC("raw_tp/sys_enter")
int num_neg_sum(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, -10, 0) sum += i;
res_neg_sum = sum;
return 0;
}
const volatile __s64 exp_very_neg_sum = INT_MIN + (__s64)(INT_MIN + 1);
__s64 res_very_neg_sum;
SEC("raw_tp/sys_enter")
int num_very_neg_sum(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, INT_MIN, INT_MIN + 2) sum += i;
res_very_neg_sum = sum;
return 0;
}
const volatile __s64 exp_very_big_sum = (__s64)(INT_MAX - 1) + (__s64)(INT_MAX - 2);
__s64 res_very_big_sum;
SEC("raw_tp/sys_enter")
int num_very_big_sum(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, INT_MAX - 2, INT_MAX) sum += i;
res_very_big_sum = sum;
return 0;
}
const volatile __s64 exp_neg_pos_sum = -3;
__s64 res_neg_pos_sum;
SEC("raw_tp/sys_enter")
int num_neg_pos_sum(const void *ctx)
{
__s64 sum = 0, i;
bpf_for(i, -3, 3) sum += i;
res_neg_pos_sum = sum;
return 0;
}
const volatile __s64 exp_invalid_range = -EINVAL;
__s64 res_invalid_range;
SEC("raw_tp/sys_enter")
int num_invalid_range(const void *ctx)
{
struct bpf_iter_num it;
res_invalid_range = bpf_iter_num_new(&it, 1, 0);
bpf_iter_num_destroy(&it);
return 0;
}
const volatile __s64 exp_max_range = 0 + 10;
__s64 res_max_range;
SEC("raw_tp/sys_enter")
int num_max_range(const void *ctx)
{
struct bpf_iter_num it;
res_max_range = 10 + bpf_iter_num_new(&it, 0, BPF_MAX_LOOPS);
bpf_iter_num_destroy(&it);
return 0;
}
const volatile __s64 exp_e2big_range = -E2BIG;
__s64 res_e2big_range;
SEC("raw_tp/sys_enter")
int num_e2big_range(const void *ctx)
{
struct bpf_iter_num it;
res_e2big_range = bpf_iter_num_new(&it, -1, BPF_MAX_LOOPS);
bpf_iter_num_destroy(&it);
return 0;
}
const volatile __s64 exp_succ_elem_cnt = 10;
__s64 res_succ_elem_cnt;
SEC("raw_tp/sys_enter")
int num_succ_elem_cnt(const void *ctx)
{
struct bpf_iter_num it;
int cnt = 0, *v;
bpf_iter_num_new(&it, 0, 10);
while ((v = bpf_iter_num_next(&it))) {
cnt++;
}
bpf_iter_num_destroy(&it);
res_succ_elem_cnt = cnt;
return 0;
}
const volatile __s64 exp_overfetched_elem_cnt = 5;
__s64 res_overfetched_elem_cnt;
SEC("raw_tp/sys_enter")
int num_overfetched_elem_cnt(const void *ctx)
{
struct bpf_iter_num it;
int cnt = 0, *v, i;
bpf_iter_num_new(&it, 0, 5);
for (i = 0; i < 10; i++) {
v = bpf_iter_num_next(&it);
if (v)
cnt++;
}
bpf_iter_num_destroy(&it);
res_overfetched_elem_cnt = cnt;
return 0;
}
const volatile __s64 exp_fail_elem_cnt = 20 + 0;
__s64 res_fail_elem_cnt;
SEC("raw_tp/sys_enter")
int num_fail_elem_cnt(const void *ctx)
{
struct bpf_iter_num it;
int cnt = 0, *v, i;
bpf_iter_num_new(&it, 100, 10);
for (i = 0; i < 10; i++) {
v = bpf_iter_num_next(&it);
if (v)
cnt++;
}
bpf_iter_num_destroy(&it);
res_fail_elem_cnt = 20 + cnt;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/iters_num.c |
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 8);
__type(key, __u32);
__type(value, __u64);
} test_array SEC(".maps");
unsigned int triggered;
static __u64 test_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
{
return 1;
}
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret_subprogs, int arg, struct file *ret)
{
*(volatile long *)ret;
*(volatile int *)&ret->f_mode;
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
triggered++;
return 0;
}
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret_subprogs2, int arg, struct file *ret)
{
*(volatile long *)ret;
*(volatile int *)&ret->f_mode;
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
triggered++;
return 0;
}
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret_subprogs3, int arg, struct file *ret)
{
*(volatile long *)ret;
*(volatile int *)&ret->f_mode;
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
triggered++;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_subprogs_extable.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Jesper Dangaard Brouer */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <linux/if_ether.h>
#include <stddef.h>
#include <stdint.h>
char _license[] SEC("license") = "GPL";
/* Userspace will update with MTU it can see on device */
volatile const int GLOBAL_USER_MTU;
volatile const __u32 GLOBAL_USER_IFINDEX;
/* BPF-prog will update these with MTU values it can see */
__u32 global_bpf_mtu_xdp = 0;
__u32 global_bpf_mtu_tc = 0;
SEC("xdp")
int xdp_use_helper_basic(struct xdp_md *ctx)
{
__u32 mtu_len = 0;
if (bpf_check_mtu(ctx, 0, &mtu_len, 0, 0))
return XDP_ABORTED;
return XDP_PASS;
}
SEC("xdp")
int xdp_use_helper(struct xdp_md *ctx)
{
int retval = XDP_PASS; /* Expected retval on successful test */
__u32 mtu_len = 0;
__u32 ifindex = 0;
int delta = 0;
/* When ifindex is zero, save net_device lookup and use ctx netdev */
if (GLOBAL_USER_IFINDEX > 0)
ifindex = GLOBAL_USER_IFINDEX;
if (bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0)) {
/* mtu_len is also valid when check fail */
retval = XDP_ABORTED;
goto out;
}
if (mtu_len != GLOBAL_USER_MTU)
retval = XDP_DROP;
out:
global_bpf_mtu_xdp = mtu_len;
return retval;
}
SEC("xdp")
int xdp_exceed_mtu(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
__u32 ifindex = GLOBAL_USER_IFINDEX;
__u32 data_len = data_end - data;
int retval = XDP_ABORTED; /* Fail */
__u32 mtu_len = 0;
int delta;
int err;
/* Exceed MTU with 1 via delta adjust */
delta = GLOBAL_USER_MTU - (data_len - ETH_HLEN) + 1;
err = bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0);
if (err) {
retval = XDP_PASS; /* Success in exceeding MTU check */
if (err != BPF_MTU_CHK_RET_FRAG_NEEDED)
retval = XDP_DROP;
}
global_bpf_mtu_xdp = mtu_len;
return retval;
}
SEC("xdp")
int xdp_minus_delta(struct xdp_md *ctx)
{
int retval = XDP_PASS; /* Expected retval on successful test */
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
__u32 ifindex = GLOBAL_USER_IFINDEX;
__u32 data_len = data_end - data;
__u32 mtu_len = 0;
int delta;
/* Borderline test case: Minus delta exceeding packet length allowed */
delta = -((data_len - ETH_HLEN) + 1);
/* Minus length (adjusted via delta) still pass MTU check, other helpers
* are responsible for catching this, when doing actual size adjust
*/
if (bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0))
retval = XDP_ABORTED;
global_bpf_mtu_xdp = mtu_len;
return retval;
}
SEC("xdp")
int xdp_input_len(struct xdp_md *ctx)
{
int retval = XDP_PASS; /* Expected retval on successful test */
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
__u32 ifindex = GLOBAL_USER_IFINDEX;
__u32 data_len = data_end - data;
/* API allow user give length to check as input via mtu_len param,
* resulting MTU value is still output in mtu_len param after call.
*
* Input len is L3, like MTU and iph->tot_len.
* Remember XDP data_len is L2.
*/
__u32 mtu_len = data_len - ETH_HLEN;
if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0))
retval = XDP_ABORTED;
global_bpf_mtu_xdp = mtu_len;
return retval;
}
SEC("xdp")
int xdp_input_len_exceed(struct xdp_md *ctx)
{
int retval = XDP_ABORTED; /* Fail */
__u32 ifindex = GLOBAL_USER_IFINDEX;
int err;
/* API allow user give length to check as input via mtu_len param,
* resulting MTU value is still output in mtu_len param after call.
*
* Input length value is L3 size like MTU.
*/
__u32 mtu_len = GLOBAL_USER_MTU;
mtu_len += 1; /* Exceed with 1 */
err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0);
if (err == BPF_MTU_CHK_RET_FRAG_NEEDED)
retval = XDP_PASS ; /* Success in exceeding MTU check */
global_bpf_mtu_xdp = mtu_len;
return retval;
}
SEC("tc")
int tc_use_helper(struct __sk_buff *ctx)
{
int retval = BPF_OK; /* Expected retval on successful test */
__u32 mtu_len = 0;
int delta = 0;
if (bpf_check_mtu(ctx, 0, &mtu_len, delta, 0)) {
retval = BPF_DROP;
goto out;
}
if (mtu_len != GLOBAL_USER_MTU)
retval = BPF_REDIRECT;
out:
global_bpf_mtu_tc = mtu_len;
return retval;
}
SEC("tc")
int tc_exceed_mtu(struct __sk_buff *ctx)
{
__u32 ifindex = GLOBAL_USER_IFINDEX;
int retval = BPF_DROP; /* Fail */
__u32 skb_len = ctx->len;
__u32 mtu_len = 0;
int delta;
int err;
/* Exceed MTU with 1 via delta adjust */
delta = GLOBAL_USER_MTU - (skb_len - ETH_HLEN) + 1;
err = bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0);
if (err) {
retval = BPF_OK; /* Success in exceeding MTU check */
if (err != BPF_MTU_CHK_RET_FRAG_NEEDED)
retval = BPF_DROP;
}
global_bpf_mtu_tc = mtu_len;
return retval;
}
SEC("tc")
int tc_exceed_mtu_da(struct __sk_buff *ctx)
{
/* SKB Direct-Access variant */
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
__u32 ifindex = GLOBAL_USER_IFINDEX;
__u32 data_len = data_end - data;
int retval = BPF_DROP; /* Fail */
__u32 mtu_len = 0;
int delta;
int err;
/* Exceed MTU with 1 via delta adjust */
delta = GLOBAL_USER_MTU - (data_len - ETH_HLEN) + 1;
err = bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0);
if (err) {
retval = BPF_OK; /* Success in exceeding MTU check */
if (err != BPF_MTU_CHK_RET_FRAG_NEEDED)
retval = BPF_DROP;
}
global_bpf_mtu_tc = mtu_len;
return retval;
}
SEC("tc")
int tc_minus_delta(struct __sk_buff *ctx)
{
int retval = BPF_OK; /* Expected retval on successful test */
__u32 ifindex = GLOBAL_USER_IFINDEX;
__u32 skb_len = ctx->len;
__u32 mtu_len = 0;
int delta;
/* Borderline test case: Minus delta exceeding packet length allowed */
delta = -((skb_len - ETH_HLEN) + 1);
/* Minus length (adjusted via delta) still pass MTU check, other helpers
* are responsible for catching this, when doing actual size adjust
*/
if (bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0))
retval = BPF_DROP;
global_bpf_mtu_xdp = mtu_len;
return retval;
}
SEC("tc")
int tc_input_len(struct __sk_buff *ctx)
{
int retval = BPF_OK; /* Expected retval on successful test */
__u32 ifindex = GLOBAL_USER_IFINDEX;
/* API allow user give length to check as input via mtu_len param,
* resulting MTU value is still output in mtu_len param after call.
*
* Input length value is L3 size.
*/
__u32 mtu_len = GLOBAL_USER_MTU;
if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0))
retval = BPF_DROP;
global_bpf_mtu_xdp = mtu_len;
return retval;
}
SEC("tc")
int tc_input_len_exceed(struct __sk_buff *ctx)
{
int retval = BPF_DROP; /* Fail */
__u32 ifindex = GLOBAL_USER_IFINDEX;
int err;
/* API allow user give length to check as input via mtu_len param,
* resulting MTU value is still output in mtu_len param after call.
*
* Input length value is L3 size like MTU.
*/
__u32 mtu_len = GLOBAL_USER_MTU;
mtu_len += 1; /* Exceed with 1 */
err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0);
if (err == BPF_MTU_CHK_RET_FRAG_NEEDED)
retval = BPF_OK; /* Success in exceeding MTU check */
global_bpf_mtu_xdp = mtu_len;
return retval;
}
| linux-master | tools/testing/selftests/bpf/progs/test_check_mtu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2021 Google LLC.
*/
#include <errno.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
__u32 invocations = 0;
__u32 assertion_error = 0;
__u32 retval_value = 0;
__u32 ctx_retval_value = 0;
__u32 page_size = 0;
SEC("cgroup/getsockopt")
int get_retval(struct bpf_sockopt *ctx)
{
retval_value = bpf_get_retval();
ctx_retval_value = ctx->retval;
__sync_fetch_and_add(&invocations, 1);
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 1;
}
SEC("cgroup/getsockopt")
int set_eisconn(struct bpf_sockopt *ctx)
{
__sync_fetch_and_add(&invocations, 1);
if (bpf_set_retval(-EISCONN))
assertion_error = 1;
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 1;
}
SEC("cgroup/getsockopt")
int clear_retval(struct bpf_sockopt *ctx)
{
__sync_fetch_and_add(&invocations, 1);
ctx->retval = 0;
/* optval larger than PAGE_SIZE use kernel's buffer. */
if (ctx->optlen > page_size)
ctx->optlen = 0;
return 1;
}
| linux-master | tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
/* shuffled layout for relocatable (CO-RE) reads */
struct callback_head___shuffled {
void (*func)(struct callback_head___shuffled *head);
struct callback_head___shuffled *next;
};
struct callback_head k_probe_in = {};
struct callback_head___shuffled k_core_in = {};
struct callback_head *u_probe_in = 0;
struct callback_head___shuffled *u_core_in = 0;
long k_probe_out = 0;
long u_probe_out = 0;
long k_core_out = 0;
long u_core_out = 0;
int my_pid = 0;
SEC("raw_tracepoint/sys_enter")
int handler(void *ctx)
{
int pid = bpf_get_current_pid_tgid() >> 32;
if (my_pid != pid)
return 0;
/* next pointers for kernel address space have to be initialized from
* BPF side, user-space mmaped addresses are stil user-space addresses
*/
k_probe_in.next = &k_probe_in;
__builtin_preserve_access_index(({k_core_in.next = &k_core_in;}));
k_probe_out = (long)BPF_PROBE_READ(&k_probe_in, next, next, func);
k_core_out = (long)BPF_CORE_READ(&k_core_in, next, next, func);
u_probe_out = (long)BPF_PROBE_READ_USER(u_probe_in, next, next, func);
u_core_out = (long)BPF_CORE_READ_USER(u_core_in, next, next, func);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_core_read_macros.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
__u64 test1_result = 0;
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(test1, int a)
{
test1_result = a == 1;
return 0;
}
__u64 test2_result = 0;
SEC("fentry/bpf_fentry_test2")
int BPF_PROG(test2, int a, __u64 b)
{
test2_result = a == 2 && b == 3;
return 0;
}
__u64 test3_result = 0;
SEC("fentry/bpf_fentry_test3")
int BPF_PROG(test3, char a, int b, __u64 c)
{
test3_result = a == 4 && b == 5 && c == 6;
return 0;
}
__u64 test4_result = 0;
SEC("fentry/bpf_fentry_test4")
int BPF_PROG(test4, void *a, char b, int c, __u64 d)
{
test4_result = a == (void *)7 && b == 8 && c == 9 && d == 10;
return 0;
}
__u64 test5_result = 0;
SEC("fentry/bpf_fentry_test5")
int BPF_PROG(test5, __u64 a, void *b, short c, int d, __u64 e)
{
test5_result = a == 11 && b == (void *)12 && c == 13 && d == 14 &&
e == 15;
return 0;
}
__u64 test6_result = 0;
SEC("fentry/bpf_fentry_test6")
int BPF_PROG(test6, __u64 a, void *b, short c, int d, void * e, __u64 f)
{
test6_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
e == (void *)20 && f == 21;
return 0;
}
struct bpf_fentry_test_t {
struct bpf_fentry_test_t *a;
};
__u64 test7_result = 0;
SEC("fentry/bpf_fentry_test7")
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
{
if (!arg)
test7_result = 1;
return 0;
}
__u64 test8_result = 0;
SEC("fentry/bpf_fentry_test8")
int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
{
if (arg->a == 0)
test8_result = 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/fentry_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
struct map_elem {
struct bpf_timer timer;
struct bpf_spin_lock lock;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct map_elem);
} amap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct map_elem);
} hmap SEC(".maps");
int pid = 0;
int crash_map = 0; /* 0 for amap, 1 for hmap */
SEC("fentry/do_nanosleep")
int sys_enter(void *ctx)
{
struct map_elem *e, value = {};
void *map = crash_map ? (void *)&hmap : (void *)&amap;
if (bpf_get_current_task_btf()->tgid != pid)
return 0;
*(void **)&value = (void *)0xdeadcaf3;
bpf_map_update_elem(map, &(int){0}, &value, 0);
/* For array map, doing bpf_map_update_elem will do a
* check_and_free_timer_in_array, which will trigger the crash if timer
* pointer was overwritten, for hmap we need to use bpf_timer_cancel.
*/
if (crash_map == 1) {
e = bpf_map_lookup_elem(map, &(int){0});
if (!e)
return 0;
bpf_timer_cancel(&e->timer);
}
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/timer_crash.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
__u64 test1_result = 0;
SEC("fexit/bpf_fentry_test1")
int BPF_PROG(test1, int a, int ret)
{
test1_result = a == 1 && ret == 2;
return 0;
}
__u64 test2_result = 0;
SEC("fexit/bpf_fentry_test2")
int BPF_PROG(test2, int a, __u64 b, int ret)
{
test2_result = a == 2 && b == 3 && ret == 5;
return 0;
}
__u64 test3_result = 0;
SEC("fexit/bpf_fentry_test3")
int BPF_PROG(test3, char a, int b, __u64 c, int ret)
{
test3_result = a == 4 && b == 5 && c == 6 && ret == 15;
return 0;
}
__u64 test4_result = 0;
SEC("fexit/bpf_fentry_test4")
int BPF_PROG(test4, void *a, char b, int c, __u64 d, int ret)
{
test4_result = a == (void *)7 && b == 8 && c == 9 && d == 10 &&
ret == 34;
return 0;
}
__u64 test5_result = 0;
SEC("fexit/bpf_fentry_test5")
int BPF_PROG(test5, __u64 a, void *b, short c, int d, __u64 e, int ret)
{
test5_result = a == 11 && b == (void *)12 && c == 13 && d == 14 &&
e == 15 && ret == 65;
return 0;
}
__u64 test6_result = 0;
SEC("fexit/bpf_fentry_test6")
int BPF_PROG(test6, __u64 a, void *b, short c, int d, void *e, __u64 f, int ret)
{
test6_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
e == (void *)20 && f == 21 && ret == 111;
return 0;
}
struct bpf_fentry_test_t {
struct bpf_fentry_test *a;
};
__u64 test7_result = 0;
SEC("fexit/bpf_fentry_test7")
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
{
if (!arg)
test7_result = 1;
return 0;
}
__u64 test8_result = 0;
SEC("fexit/bpf_fentry_test8")
int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
{
if (!arg->a)
test8_result = 1;
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/fexit_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
SEC("fentry/bpf_modify_return_test")
int BPF_PROG(fentry_test, int a, int *b)
{
return 0;
}
SEC("fmod_ret/bpf_modify_return_test")
int BPF_PROG(fmod_ret_test, int a, int *b, int ret)
{
return 0;
}
SEC("fexit/bpf_modify_return_test")
int BPF_PROG(fexit_test, int a, int *b, int ret)
{
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_trampoline_count.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#define STACK_MAX_LEN 600
#define USE_BPF_LOOP
#include "pyperf.h"
| linux-master | tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
int kprobe2_res = 0;
int kretprobe2_res = 0;
int uprobe_byname_res = 0;
int uretprobe_byname_res = 0;
int uprobe_byname2_res = 0;
int uretprobe_byname2_res = 0;
int uprobe_byname3_sleepable_res = 0;
int uprobe_byname3_res = 0;
int uretprobe_byname3_sleepable_res = 0;
int uretprobe_byname3_res = 0;
void *user_ptr = 0;
SEC("ksyscall/nanosleep")
int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __kernel_timespec *rem)
{
kprobe2_res = 11;
return 0;
}
SEC("kretsyscall/nanosleep")
int BPF_KRETPROBE(handle_kretprobe_auto, int ret)
{
kretprobe2_res = 22;
return ret;
}
SEC("uprobe")
int handle_uprobe_ref_ctr(struct pt_regs *ctx)
{
return 0;
}
SEC("uretprobe")
int handle_uretprobe_ref_ctr(struct pt_regs *ctx)
{
return 0;
}
SEC("uprobe")
int handle_uprobe_byname(struct pt_regs *ctx)
{
uprobe_byname_res = 5;
return 0;
}
/* use auto-attach format for section definition. */
SEC("uretprobe//proc/self/exe:trigger_func2")
int handle_uretprobe_byname(struct pt_regs *ctx)
{
uretprobe_byname_res = 6;
return 0;
}
SEC("uprobe")
int BPF_UPROBE(handle_uprobe_byname2, const char *pathname, const char *mode)
{
char mode_buf[2] = {};
/* verify fopen mode */
bpf_probe_read_user(mode_buf, sizeof(mode_buf), mode);
if (mode_buf[0] == 'r' && mode_buf[1] == 0)
uprobe_byname2_res = 7;
return 0;
}
SEC("uretprobe")
int BPF_URETPROBE(handle_uretprobe_byname2, void *ret)
{
uretprobe_byname2_res = 8;
return 0;
}
static __always_inline bool verify_sleepable_user_copy(void)
{
char data[9];
bpf_copy_from_user(data, sizeof(data), user_ptr);
return bpf_strncmp(data, sizeof(data), "test_data") == 0;
}
SEC("uprobe.s//proc/self/exe:trigger_func3")
int handle_uprobe_byname3_sleepable(struct pt_regs *ctx)
{
if (verify_sleepable_user_copy())
uprobe_byname3_sleepable_res = 9;
return 0;
}
/**
* same target as the uprobe.s above to force sleepable and non-sleepable
* programs in the same bpf_prog_array
*/
SEC("uprobe//proc/self/exe:trigger_func3")
int handle_uprobe_byname3(struct pt_regs *ctx)
{
uprobe_byname3_res = 10;
return 0;
}
SEC("uretprobe.s//proc/self/exe:trigger_func3")
int handle_uretprobe_byname3_sleepable(struct pt_regs *ctx)
{
if (verify_sleepable_user_copy())
uretprobe_byname3_sleepable_res = 11;
return 0;
}
SEC("uretprobe//proc/self/exe:trigger_func3")
int handle_uretprobe_byname3(struct pt_regs *ctx)
{
uretprobe_byname3_res = 12;
return 0;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_attach_probe.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
// Copyright (c) 2020 Cloudflare
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <sys/socket.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
#define IP4(a, b, c, d) \
bpf_htonl((((__u32)(a) & 0xffU) << 24) | \
(((__u32)(b) & 0xffU) << 16) | \
(((__u32)(c) & 0xffU) << 8) | \
(((__u32)(d) & 0xffU) << 0))
#define IP6(aaaa, bbbb, cccc, dddd) \
{ bpf_htonl(aaaa), bpf_htonl(bbbb), bpf_htonl(cccc), bpf_htonl(dddd) }
/* Macros for least-significant byte and word accesses. */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define LSE_INDEX(index, size) (index)
#else
#define LSE_INDEX(index, size) ((size) - (index) - 1)
#endif
#define LSB(value, index) \
(((__u8 *)&(value))[LSE_INDEX((index), sizeof(value))])
#define LSW(value, index) \
(((__u16 *)&(value))[LSE_INDEX((index), sizeof(value) / 2)])
#define MAX_SOCKS 32
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, MAX_SOCKS);
__type(key, __u32);
__type(value, __u64);
} redir_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 2);
__type(key, int);
__type(value, int);
} run_map SEC(".maps");
enum {
PROG1 = 0,
PROG2,
};
enum {
SERVER_A = 0,
SERVER_B,
};
/* Addressable key/value constants for convenience */
static const int KEY_PROG1 = PROG1;
static const int KEY_PROG2 = PROG2;
static const int PROG_DONE = 1;
static const __u32 KEY_SERVER_A = SERVER_A;
static const __u32 KEY_SERVER_B = SERVER_B;
static const __u16 SRC_PORT = bpf_htons(8008);
static const __u32 SRC_IP4 = IP4(127, 0, 0, 2);
static const __u32 SRC_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000002);
static const __u16 DST_PORT = 7007; /* Host byte order */
static const __u32 DST_IP4 = IP4(127, 0, 0, 1);
static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001);
SEC("sk_lookup")
int lookup_pass(struct bpf_sk_lookup *ctx)
{
return SK_PASS;
}
SEC("sk_lookup")
int lookup_drop(struct bpf_sk_lookup *ctx)
{
return SK_DROP;
}
SEC("sk_lookup")
int check_ifindex(struct bpf_sk_lookup *ctx)
{
if (ctx->ingress_ifindex == 1)
return SK_DROP;
return SK_PASS;
}
SEC("sk_reuseport")
int reuseport_pass(struct sk_reuseport_md *ctx)
{
return SK_PASS;
}
SEC("sk_reuseport")
int reuseport_drop(struct sk_reuseport_md *ctx)
{
return SK_DROP;
}
/* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */
SEC("sk_lookup")
int redir_port(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err;
if (ctx->local_port != DST_PORT)
return SK_PASS;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
return SK_PASS;
err = bpf_sk_assign(ctx, sk, 0);
bpf_sk_release(sk);
return err ? SK_DROP : SK_PASS;
}
/* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */
SEC("sk_lookup")
int redir_ip4(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err;
if (ctx->family != AF_INET)
return SK_PASS;
if (ctx->local_port != DST_PORT)
return SK_PASS;
if (ctx->local_ip4 != DST_IP4)
return SK_PASS;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
return SK_PASS;
err = bpf_sk_assign(ctx, sk, 0);
bpf_sk_release(sk);
return err ? SK_DROP : SK_PASS;
}
/* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */
SEC("sk_lookup")
int redir_ip6(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err;
if (ctx->family != AF_INET6)
return SK_PASS;
if (ctx->local_port != DST_PORT)
return SK_PASS;
if (ctx->local_ip6[0] != DST_IP6[0] ||
ctx->local_ip6[1] != DST_IP6[1] ||
ctx->local_ip6[2] != DST_IP6[2] ||
ctx->local_ip6[3] != DST_IP6[3])
return SK_PASS;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
return SK_PASS;
err = bpf_sk_assign(ctx, sk, 0);
bpf_sk_release(sk);
return err ? SK_DROP : SK_PASS;
}
SEC("sk_lookup")
int select_sock_a(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
return SK_PASS;
err = bpf_sk_assign(ctx, sk, 0);
bpf_sk_release(sk);
return err ? SK_DROP : SK_PASS;
}
SEC("sk_lookup")
int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
return SK_DROP;
err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_NO_REUSEPORT);
bpf_sk_release(sk);
return err ? SK_DROP : SK_PASS;
}
SEC("sk_reuseport")
int select_sock_b(struct sk_reuseport_md *ctx)
{
__u32 key = KEY_SERVER_B;
int err;
err = bpf_sk_select_reuseport(ctx, &redir_map, &key, 0);
return err ? SK_DROP : SK_PASS;
}
/* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */
SEC("sk_lookup")
int sk_assign_eexist(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err, ret;
ret = SK_DROP;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
if (!sk)
goto out;
err = bpf_sk_assign(ctx, sk, 0);
if (err)
goto out;
bpf_sk_release(sk);
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
goto out;
err = bpf_sk_assign(ctx, sk, 0);
if (err != -EEXIST) {
bpf_printk("sk_assign returned %d, expected %d\n",
err, -EEXIST);
goto out;
}
ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
out:
if (sk)
bpf_sk_release(sk);
return ret;
}
/* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */
SEC("sk_lookup")
int sk_assign_replace_flag(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err, ret;
ret = SK_DROP;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
goto out;
err = bpf_sk_assign(ctx, sk, 0);
if (err)
goto out;
bpf_sk_release(sk);
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
if (!sk)
goto out;
err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
if (err) {
bpf_printk("sk_assign returned %d, expected 0\n", err);
goto out;
}
ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
out:
if (sk)
bpf_sk_release(sk);
return ret;
}
/* Check that bpf_sk_assign(sk=NULL) is accepted. */
SEC("sk_lookup")
int sk_assign_null(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk = NULL;
int err, ret;
ret = SK_DROP;
err = bpf_sk_assign(ctx, NULL, 0);
if (err) {
bpf_printk("sk_assign returned %d, expected 0\n", err);
goto out;
}
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
if (!sk)
goto out;
err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
if (err) {
bpf_printk("sk_assign returned %d, expected 0\n", err);
goto out;
}
if (ctx->sk != sk)
goto out;
err = bpf_sk_assign(ctx, NULL, 0);
if (err != -EEXIST)
goto out;
err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE);
if (err)
goto out;
err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
if (err)
goto out;
ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
out:
if (sk)
bpf_sk_release(sk);
return ret;
}
/* Check that selected sk is accessible through context. */
SEC("sk_lookup")
int access_ctx_sk(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk1 = NULL, *sk2 = NULL;
int err, ret;
ret = SK_DROP;
/* Try accessing unassigned (NULL) ctx->sk field */
if (ctx->sk && ctx->sk->family != AF_INET)
goto out;
/* Assign a value to ctx->sk */
sk1 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk1)
goto out;
err = bpf_sk_assign(ctx, sk1, 0);
if (err)
goto out;
if (ctx->sk != sk1)
goto out;
/* Access ctx->sk fields */
if (ctx->sk->family != AF_INET ||
ctx->sk->type != SOCK_STREAM ||
ctx->sk->state != BPF_TCP_LISTEN)
goto out;
/* Reset selection */
err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE);
if (err)
goto out;
if (ctx->sk)
goto out;
/* Assign another socket */
sk2 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
if (!sk2)
goto out;
err = bpf_sk_assign(ctx, sk2, BPF_SK_LOOKUP_F_REPLACE);
if (err)
goto out;
if (ctx->sk != sk2)
goto out;
/* Access reassigned ctx->sk fields */
if (ctx->sk->family != AF_INET ||
ctx->sk->type != SOCK_STREAM ||
ctx->sk->state != BPF_TCP_LISTEN)
goto out;
ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
out:
if (sk1)
bpf_sk_release(sk1);
if (sk2)
bpf_sk_release(sk2);
return ret;
}
/* Check narrow loads from ctx fields that support them.
*
* Narrow loads of size >= target field size from a non-zero offset
* are not covered because they give bogus results, that is the
* verifier ignores the offset.
*/
SEC("sk_lookup")
int ctx_narrow_access(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
__u32 val_u32;
bool v4;
v4 = (ctx->family == AF_INET);
/* Narrow loads from family field */
if (LSB(ctx->family, 0) != (v4 ? AF_INET : AF_INET6) ||
LSB(ctx->family, 1) != 0 || LSB(ctx->family, 2) != 0 || LSB(ctx->family, 3) != 0)
return SK_DROP;
if (LSW(ctx->family, 0) != (v4 ? AF_INET : AF_INET6))
return SK_DROP;
/* Narrow loads from protocol field */
if (LSB(ctx->protocol, 0) != IPPROTO_TCP ||
LSB(ctx->protocol, 1) != 0 || LSB(ctx->protocol, 2) != 0 || LSB(ctx->protocol, 3) != 0)
return SK_DROP;
if (LSW(ctx->protocol, 0) != IPPROTO_TCP)
return SK_DROP;
/* Narrow loads from remote_port field. Expect SRC_PORT. */
if (LSB(ctx->remote_port, 0) != ((SRC_PORT >> 0) & 0xff) ||
LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff))
return SK_DROP;
if (LSW(ctx->remote_port, 0) != SRC_PORT)
return SK_DROP;
/*
* NOTE: 4-byte load from bpf_sk_lookup at remote_port offset
* is quirky. It gets rewritten by the access converter to a
* 2-byte load for backward compatibility. Treating the load
* result as a be16 value makes the code portable across
* little- and big-endian platforms.
*/
val_u32 = *(__u32 *)&ctx->remote_port;
if (val_u32 != SRC_PORT)
return SK_DROP;
/* Narrow loads from local_port field. Expect DST_PORT. */
if (LSB(ctx->local_port, 0) != ((DST_PORT >> 0) & 0xff) ||
LSB(ctx->local_port, 1) != ((DST_PORT >> 8) & 0xff) ||
LSB(ctx->local_port, 2) != 0 || LSB(ctx->local_port, 3) != 0)
return SK_DROP;
if (LSW(ctx->local_port, 0) != DST_PORT)
return SK_DROP;
/* Narrow loads from IPv4 fields */
if (v4) {
/* Expect SRC_IP4 in remote_ip4 */
if (LSB(ctx->remote_ip4, 0) != ((SRC_IP4 >> 0) & 0xff) ||
LSB(ctx->remote_ip4, 1) != ((SRC_IP4 >> 8) & 0xff) ||
LSB(ctx->remote_ip4, 2) != ((SRC_IP4 >> 16) & 0xff) ||
LSB(ctx->remote_ip4, 3) != ((SRC_IP4 >> 24) & 0xff))
return SK_DROP;
if (LSW(ctx->remote_ip4, 0) != ((SRC_IP4 >> 0) & 0xffff) ||
LSW(ctx->remote_ip4, 1) != ((SRC_IP4 >> 16) & 0xffff))
return SK_DROP;
/* Expect DST_IP4 in local_ip4 */
if (LSB(ctx->local_ip4, 0) != ((DST_IP4 >> 0) & 0xff) ||
LSB(ctx->local_ip4, 1) != ((DST_IP4 >> 8) & 0xff) ||
LSB(ctx->local_ip4, 2) != ((DST_IP4 >> 16) & 0xff) ||
LSB(ctx->local_ip4, 3) != ((DST_IP4 >> 24) & 0xff))
return SK_DROP;
if (LSW(ctx->local_ip4, 0) != ((DST_IP4 >> 0) & 0xffff) ||
LSW(ctx->local_ip4, 1) != ((DST_IP4 >> 16) & 0xffff))
return SK_DROP;
} else {
/* Expect 0.0.0.0 IPs when family != AF_INET */
if (LSB(ctx->remote_ip4, 0) != 0 || LSB(ctx->remote_ip4, 1) != 0 ||
LSB(ctx->remote_ip4, 2) != 0 || LSB(ctx->remote_ip4, 3) != 0)
return SK_DROP;
if (LSW(ctx->remote_ip4, 0) != 0 || LSW(ctx->remote_ip4, 1) != 0)
return SK_DROP;
if (LSB(ctx->local_ip4, 0) != 0 || LSB(ctx->local_ip4, 1) != 0 ||
LSB(ctx->local_ip4, 2) != 0 || LSB(ctx->local_ip4, 3) != 0)
return SK_DROP;
if (LSW(ctx->local_ip4, 0) != 0 || LSW(ctx->local_ip4, 1) != 0)
return SK_DROP;
}
/* Narrow loads from IPv6 fields */
if (!v4) {
/* Expect SRC_IP6 in remote_ip6 */
if (LSB(ctx->remote_ip6[0], 0) != ((SRC_IP6[0] >> 0) & 0xff) ||
LSB(ctx->remote_ip6[0], 1) != ((SRC_IP6[0] >> 8) & 0xff) ||
LSB(ctx->remote_ip6[0], 2) != ((SRC_IP6[0] >> 16) & 0xff) ||
LSB(ctx->remote_ip6[0], 3) != ((SRC_IP6[0] >> 24) & 0xff) ||
LSB(ctx->remote_ip6[1], 0) != ((SRC_IP6[1] >> 0) & 0xff) ||
LSB(ctx->remote_ip6[1], 1) != ((SRC_IP6[1] >> 8) & 0xff) ||
LSB(ctx->remote_ip6[1], 2) != ((SRC_IP6[1] >> 16) & 0xff) ||
LSB(ctx->remote_ip6[1], 3) != ((SRC_IP6[1] >> 24) & 0xff) ||
LSB(ctx->remote_ip6[2], 0) != ((SRC_IP6[2] >> 0) & 0xff) ||
LSB(ctx->remote_ip6[2], 1) != ((SRC_IP6[2] >> 8) & 0xff) ||
LSB(ctx->remote_ip6[2], 2) != ((SRC_IP6[2] >> 16) & 0xff) ||
LSB(ctx->remote_ip6[2], 3) != ((SRC_IP6[2] >> 24) & 0xff) ||
LSB(ctx->remote_ip6[3], 0) != ((SRC_IP6[3] >> 0) & 0xff) ||
LSB(ctx->remote_ip6[3], 1) != ((SRC_IP6[3] >> 8) & 0xff) ||
LSB(ctx->remote_ip6[3], 2) != ((SRC_IP6[3] >> 16) & 0xff) ||
LSB(ctx->remote_ip6[3], 3) != ((SRC_IP6[3] >> 24) & 0xff))
return SK_DROP;
if (LSW(ctx->remote_ip6[0], 0) != ((SRC_IP6[0] >> 0) & 0xffff) ||
LSW(ctx->remote_ip6[0], 1) != ((SRC_IP6[0] >> 16) & 0xffff) ||
LSW(ctx->remote_ip6[1], 0) != ((SRC_IP6[1] >> 0) & 0xffff) ||
LSW(ctx->remote_ip6[1], 1) != ((SRC_IP6[1] >> 16) & 0xffff) ||
LSW(ctx->remote_ip6[2], 0) != ((SRC_IP6[2] >> 0) & 0xffff) ||
LSW(ctx->remote_ip6[2], 1) != ((SRC_IP6[2] >> 16) & 0xffff) ||
LSW(ctx->remote_ip6[3], 0) != ((SRC_IP6[3] >> 0) & 0xffff) ||
LSW(ctx->remote_ip6[3], 1) != ((SRC_IP6[3] >> 16) & 0xffff))
return SK_DROP;
/* Expect DST_IP6 in local_ip6 */
if (LSB(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xff) ||
LSB(ctx->local_ip6[0], 1) != ((DST_IP6[0] >> 8) & 0xff) ||
LSB(ctx->local_ip6[0], 2) != ((DST_IP6[0] >> 16) & 0xff) ||
LSB(ctx->local_ip6[0], 3) != ((DST_IP6[0] >> 24) & 0xff) ||
LSB(ctx->local_ip6[1], 0) != ((DST_IP6[1] >> 0) & 0xff) ||
LSB(ctx->local_ip6[1], 1) != ((DST_IP6[1] >> 8) & 0xff) ||
LSB(ctx->local_ip6[1], 2) != ((DST_IP6[1] >> 16) & 0xff) ||
LSB(ctx->local_ip6[1], 3) != ((DST_IP6[1] >> 24) & 0xff) ||
LSB(ctx->local_ip6[2], 0) != ((DST_IP6[2] >> 0) & 0xff) ||
LSB(ctx->local_ip6[2], 1) != ((DST_IP6[2] >> 8) & 0xff) ||
LSB(ctx->local_ip6[2], 2) != ((DST_IP6[2] >> 16) & 0xff) ||
LSB(ctx->local_ip6[2], 3) != ((DST_IP6[2] >> 24) & 0xff) ||
LSB(ctx->local_ip6[3], 0) != ((DST_IP6[3] >> 0) & 0xff) ||
LSB(ctx->local_ip6[3], 1) != ((DST_IP6[3] >> 8) & 0xff) ||
LSB(ctx->local_ip6[3], 2) != ((DST_IP6[3] >> 16) & 0xff) ||
LSB(ctx->local_ip6[3], 3) != ((DST_IP6[3] >> 24) & 0xff))
return SK_DROP;
if (LSW(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xffff) ||
LSW(ctx->local_ip6[0], 1) != ((DST_IP6[0] >> 16) & 0xffff) ||
LSW(ctx->local_ip6[1], 0) != ((DST_IP6[1] >> 0) & 0xffff) ||
LSW(ctx->local_ip6[1], 1) != ((DST_IP6[1] >> 16) & 0xffff) ||
LSW(ctx->local_ip6[2], 0) != ((DST_IP6[2] >> 0) & 0xffff) ||
LSW(ctx->local_ip6[2], 1) != ((DST_IP6[2] >> 16) & 0xffff) ||
LSW(ctx->local_ip6[3], 0) != ((DST_IP6[3] >> 0) & 0xffff) ||
LSW(ctx->local_ip6[3], 1) != ((DST_IP6[3] >> 16) & 0xffff))
return SK_DROP;
} else {
/* Expect :: IPs when family != AF_INET6 */
if (LSB(ctx->remote_ip6[0], 0) != 0 || LSB(ctx->remote_ip6[0], 1) != 0 ||
LSB(ctx->remote_ip6[0], 2) != 0 || LSB(ctx->remote_ip6[0], 3) != 0 ||
LSB(ctx->remote_ip6[1], 0) != 0 || LSB(ctx->remote_ip6[1], 1) != 0 ||
LSB(ctx->remote_ip6[1], 2) != 0 || LSB(ctx->remote_ip6[1], 3) != 0 ||
LSB(ctx->remote_ip6[2], 0) != 0 || LSB(ctx->remote_ip6[2], 1) != 0 ||
LSB(ctx->remote_ip6[2], 2) != 0 || LSB(ctx->remote_ip6[2], 3) != 0 ||
LSB(ctx->remote_ip6[3], 0) != 0 || LSB(ctx->remote_ip6[3], 1) != 0 ||
LSB(ctx->remote_ip6[3], 2) != 0 || LSB(ctx->remote_ip6[3], 3) != 0)
return SK_DROP;
if (LSW(ctx->remote_ip6[0], 0) != 0 || LSW(ctx->remote_ip6[0], 1) != 0 ||
LSW(ctx->remote_ip6[1], 0) != 0 || LSW(ctx->remote_ip6[1], 1) != 0 ||
LSW(ctx->remote_ip6[2], 0) != 0 || LSW(ctx->remote_ip6[2], 1) != 0 ||
LSW(ctx->remote_ip6[3], 0) != 0 || LSW(ctx->remote_ip6[3], 1) != 0)
return SK_DROP;
if (LSB(ctx->local_ip6[0], 0) != 0 || LSB(ctx->local_ip6[0], 1) != 0 ||
LSB(ctx->local_ip6[0], 2) != 0 || LSB(ctx->local_ip6[0], 3) != 0 ||
LSB(ctx->local_ip6[1], 0) != 0 || LSB(ctx->local_ip6[1], 1) != 0 ||
LSB(ctx->local_ip6[1], 2) != 0 || LSB(ctx->local_ip6[1], 3) != 0 ||
LSB(ctx->local_ip6[2], 0) != 0 || LSB(ctx->local_ip6[2], 1) != 0 ||
LSB(ctx->local_ip6[2], 2) != 0 || LSB(ctx->local_ip6[2], 3) != 0 ||
LSB(ctx->local_ip6[3], 0) != 0 || LSB(ctx->local_ip6[3], 1) != 0 ||
LSB(ctx->local_ip6[3], 2) != 0 || LSB(ctx->local_ip6[3], 3) != 0)
return SK_DROP;
if (LSW(ctx->remote_ip6[0], 0) != 0 || LSW(ctx->remote_ip6[0], 1) != 0 ||
LSW(ctx->remote_ip6[1], 0) != 0 || LSW(ctx->remote_ip6[1], 1) != 0 ||
LSW(ctx->remote_ip6[2], 0) != 0 || LSW(ctx->remote_ip6[2], 1) != 0 ||
LSW(ctx->remote_ip6[3], 0) != 0 || LSW(ctx->remote_ip6[3], 1) != 0)
return SK_DROP;
}
/* Success, redirect to KEY_SERVER_B */
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
if (sk) {
bpf_sk_assign(ctx, sk, 0);
bpf_sk_release(sk);
}
return SK_PASS;
}
/* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */
SEC("sk_lookup")
int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err, ret;
ret = SK_DROP;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
goto out;
err = bpf_sk_assign(ctx, sk, 0);
if (err != -ESOCKTNOSUPPORT) {
bpf_printk("sk_assign returned %d, expected %d\n",
err, -ESOCKTNOSUPPORT);
goto out;
}
ret = SK_PASS; /* Success, pass to regular lookup */
out:
if (sk)
bpf_sk_release(sk);
return ret;
}
SEC("sk_lookup")
int multi_prog_pass1(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
SEC("sk_lookup")
int multi_prog_pass2(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
SEC("sk_lookup")
int multi_prog_drop1(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_DROP;
}
SEC("sk_lookup")
int multi_prog_drop2(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_DROP;
}
static __always_inline int select_server_a(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
return SK_DROP;
err = bpf_sk_assign(ctx, sk, 0);
bpf_sk_release(sk);
if (err)
return SK_DROP;
return SK_PASS;
}
SEC("sk_lookup")
int multi_prog_redir1(struct bpf_sk_lookup *ctx)
{
(void)select_server_a(ctx);
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
SEC("sk_lookup")
int multi_prog_redir2(struct bpf_sk_lookup *ctx)
{
(void)select_server_a(ctx);
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
char _license[] SEC("license") = "Dual BSD/GPL";
| linux-master | tools/testing/selftests/bpf/progs/test_sk_lookup.c |
#include "core_reloc_types.h"
void f(struct core_reloc_ptr_as_arr___diff_sz x) {}
| linux-master | tools/testing/selftests/bpf/progs/btf__core_reloc_ptr_as_arr___diff_sz.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_experimental.h"
struct node_data {
long key;
long data;
struct bpf_rb_node node;
};
long less_callback_ran = -1;
long removed_key = -1;
long first_data[2] = {-1, -1};
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
private(A) struct bpf_spin_lock glock;
private(A) struct bpf_rb_root groot __contains(node_data, node);
static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
struct node_data *node_a;
struct node_data *node_b;
node_a = container_of(a, struct node_data, node);
node_b = container_of(b, struct node_data, node);
less_callback_ran = 1;
return node_a->key < node_b->key;
}
static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock)
{
struct node_data *n, *m;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
n->key = 5;
m = bpf_obj_new(typeof(*m));
if (!m) {
bpf_obj_drop(n);
return 2;
}
m->key = 1;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less);
bpf_rbtree_add(&groot, &m->node, less);
bpf_spin_unlock(&glock);
n = bpf_obj_new(typeof(*n));
if (!n)
return 3;
n->key = 3;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less);
bpf_spin_unlock(&glock);
return 0;
}
SEC("tc")
long rbtree_add_nodes(void *ctx)
{
return __add_three(&groot, &glock);
}
SEC("tc")
long rbtree_add_and_remove(void *ctx)
{
struct bpf_rb_node *res = NULL;
struct node_data *n, *m = NULL;
n = bpf_obj_new(typeof(*n));
if (!n)
goto err_out;
n->key = 5;
m = bpf_obj_new(typeof(*m));
if (!m)
goto err_out;
m->key = 3;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less);
bpf_rbtree_add(&groot, &m->node, less);
res = bpf_rbtree_remove(&groot, &n->node);
bpf_spin_unlock(&glock);
if (!res)
return 1;
n = container_of(res, struct node_data, node);
removed_key = n->key;
bpf_obj_drop(n);
return 0;
err_out:
if (n)
bpf_obj_drop(n);
if (m)
bpf_obj_drop(m);
return 1;
}
SEC("tc")
long rbtree_first_and_remove(void *ctx)
{
struct bpf_rb_node *res = NULL;
struct node_data *n, *m, *o;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
n->key = 3;
n->data = 4;
m = bpf_obj_new(typeof(*m));
if (!m)
goto err_out;
m->key = 5;
m->data = 6;
o = bpf_obj_new(typeof(*o));
if (!o)
goto err_out;
o->key = 1;
o->data = 2;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less);
bpf_rbtree_add(&groot, &m->node, less);
bpf_rbtree_add(&groot, &o->node, less);
res = bpf_rbtree_first(&groot);
if (!res) {
bpf_spin_unlock(&glock);
return 2;
}
o = container_of(res, struct node_data, node);
first_data[0] = o->data;
res = bpf_rbtree_remove(&groot, &o->node);
bpf_spin_unlock(&glock);
if (!res)
return 5;
o = container_of(res, struct node_data, node);
removed_key = o->key;
bpf_obj_drop(o);
bpf_spin_lock(&glock);
res = bpf_rbtree_first(&groot);
if (!res) {
bpf_spin_unlock(&glock);
return 3;
}
o = container_of(res, struct node_data, node);
first_data[1] = o->data;
bpf_spin_unlock(&glock);
return 0;
err_out:
if (n)
bpf_obj_drop(n);
if (m)
bpf_obj_drop(m);
return 1;
}
SEC("tc")
long rbtree_api_release_aliasing(void *ctx)
{
struct node_data *n, *m, *o;
struct bpf_rb_node *res, *res2;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
n->key = 41;
n->data = 42;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less);
bpf_spin_unlock(&glock);
bpf_spin_lock(&glock);
/* m and o point to the same node,
* but verifier doesn't know this
*/
res = bpf_rbtree_first(&groot);
if (!res)
goto err_out;
o = container_of(res, struct node_data, node);
res = bpf_rbtree_first(&groot);
if (!res)
goto err_out;
m = container_of(res, struct node_data, node);
res = bpf_rbtree_remove(&groot, &m->node);
/* Retval of previous remove returns an owning reference to m,
* which is the same node non-owning ref o is pointing at.
* We can safely try to remove o as the second rbtree_remove will
* return NULL since the node isn't in a tree.
*
* Previously we relied on the verifier type system + rbtree_remove
* invalidating non-owning refs to ensure that rbtree_remove couldn't
* fail, but now rbtree_remove does runtime checking so we no longer
* invalidate non-owning refs after remove.
*/
res2 = bpf_rbtree_remove(&groot, &o->node);
bpf_spin_unlock(&glock);
if (res) {
o = container_of(res, struct node_data, node);
first_data[0] = o->data;
bpf_obj_drop(o);
}
if (res2) {
/* The second remove fails, so res2 is null and this doesn't
* execute
*/
m = container_of(res2, struct node_data, node);
first_data[1] = m->data;
bpf_obj_drop(m);
}
return 0;
err_out:
bpf_spin_unlock(&glock);
return 1;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/rbtree.c |
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/value_or_null.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, long long);
} map_hash_8b SEC(".maps");
SEC("tc")
__description("multiple registers share map_lookup_elem result")
__success __retval(0)
__naked void share_map_lookup_elem_result(void)
{
asm volatile (" \
r1 = 10; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r4 = r0; \
if r0 == 0 goto l0_%=; \
r1 = 0; \
*(u64*)(r4 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tc")
__description("alu ops on ptr_to_map_value_or_null, 1")
__failure __msg("R4 pointer arithmetic on map_value_or_null")
__naked void map_value_or_null_1(void)
{
asm volatile (" \
r1 = 10; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r4 = r0; \
r4 += -2; \
r4 += 2; \
if r0 == 0 goto l0_%=; \
r1 = 0; \
*(u64*)(r4 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tc")
__description("alu ops on ptr_to_map_value_or_null, 2")
__failure __msg("R4 pointer arithmetic on map_value_or_null")
__naked void map_value_or_null_2(void)
{
asm volatile (" \
r1 = 10; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r4 = r0; \
r4 &= -1; \
if r0 == 0 goto l0_%=; \
r1 = 0; \
*(u64*)(r4 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tc")
__description("alu ops on ptr_to_map_value_or_null, 3")
__failure __msg("R4 pointer arithmetic on map_value_or_null")
__naked void map_value_or_null_3(void)
{
asm volatile (" \
r1 = 10; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r4 = r0; \
r4 <<= 1; \
if r0 == 0 goto l0_%=; \
r1 = 0; \
*(u64*)(r4 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tc")
__description("invalid memory access with multiple map_lookup_elem calls")
__failure __msg("R4 !read_ok")
__naked void multiple_map_lookup_elem_calls(void)
{
asm volatile (" \
r1 = 10; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
r8 = r1; \
r7 = r2; \
call %[bpf_map_lookup_elem]; \
r4 = r0; \
r1 = r8; \
r2 = r7; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = 0; \
*(u64*)(r4 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("tc")
__description("valid indirect map_lookup_elem access with 2nd lookup in branch")
__success __retval(0)
__naked void with_2nd_lookup_in_branch(void)
{
asm volatile (" \
r1 = 10; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
r8 = r1; \
r7 = r2; \
call %[bpf_map_lookup_elem]; \
r2 = 10; \
if r2 != 0 goto l0_%=; \
r1 = r8; \
r2 = r7; \
call %[bpf_map_lookup_elem]; \
l0_%=: r4 = r0; \
if r0 == 0 goto l1_%=; \
r1 = 0; \
*(u64*)(r4 + 0) = r1; \
l1_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("invalid map access from else condition")
__failure __msg("R0 unbounded memory access")
__failure_unpriv __msg_unpriv("R0 leaks addr")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void map_access_from_else_condition(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u32*)(r0 + 0); \
if r1 >= %[__imm_0] goto l1_%=; \
r1 += 1; \
l1_%=: r1 <<= 2; \
r0 += r1; \
r1 = %[test_val_foo]; \
*(u64*)(r0 + 0) = r1; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, MAX_ENTRIES-1),
__imm_const(test_val_foo, offsetof(struct test_val, foo))
: __clobber_all);
}
SEC("tc")
__description("map lookup and null branch prediction")
__success __retval(0)
__naked void lookup_and_null_branch_prediction(void)
{
asm volatile (" \
r1 = 10; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r6 = r0; \
if r6 == 0 goto l0_%=; \
if r6 != 0 goto l0_%=; \
r10 += 10; \
l0_%=: exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("cgroup/skb")
__description("MAP_VALUE_OR_NULL check_ids() in regsafe()")
__failure __msg("R8 invalid mem access 'map_value_or_null'")
__failure_unpriv __msg_unpriv("")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void null_check_ids_in_regsafe(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
/* r9 = map_lookup_elem(...) */ \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r9 = r0; \
/* r8 = map_lookup_elem(...) */ \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r8 = r0; \
/* r7 = ktime_get_ns() */ \
call %[bpf_ktime_get_ns]; \
r7 = r0; \
/* r6 = ktime_get_ns() */ \
call %[bpf_ktime_get_ns]; \
r6 = r0; \
/* if r6 > r7 goto +1 ; no new information about the state is derived from\
* ; this check, thus produced verifier states differ\
* ; only in 'insn_idx' \
* r9 = r8 ; optionally share ID between r9 and r8\
*/ \
if r6 > r7 goto l0_%=; \
r9 = r8; \
l0_%=: /* if r9 == 0 goto <exit> */ \
if r9 == 0 goto l1_%=; \
/* read map value via r8, this is not always \
* safe because r8 might be not equal to r9. \
*/ \
r0 = *(u64*)(r8 + 0); \
l1_%=: /* exit 0 */ \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/verifier_value_or_null.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} hash_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_STACK);
__uint(max_entries, 1);
__type(value, int);
} stack_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} array_map SEC(".maps");
const volatile pid_t pid;
long err = 0;
static u64 callback(u64 map, u64 key, u64 val, u64 ctx, u64 flags)
{
return 0;
}
SEC("tp/syscalls/sys_enter_getpid")
int map_update(void *ctx)
{
const int key = 0;
const int val = 1;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
err = bpf_map_update_elem(&hash_map, &key, &val, BPF_NOEXIST);
return 0;
}
SEC("tp/syscalls/sys_enter_getppid")
int map_delete(void *ctx)
{
const int key = 0;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
err = bpf_map_delete_elem(&hash_map, &key);
return 0;
}
SEC("tp/syscalls/sys_enter_getuid")
int map_push(void *ctx)
{
const int val = 1;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
err = bpf_map_push_elem(&stack_map, &val, 0);
return 0;
}
SEC("tp/syscalls/sys_enter_geteuid")
int map_pop(void *ctx)
{
int val;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
err = bpf_map_pop_elem(&stack_map, &val);
return 0;
}
SEC("tp/syscalls/sys_enter_getgid")
int map_peek(void *ctx)
{
int val;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
err = bpf_map_peek_elem(&stack_map, &val);
return 0;
}
SEC("tp/syscalls/sys_enter_gettid")
int map_for_each_pass(void *ctx)
{
const int key = 0;
const int val = 1;
const u64 flags = 0;
int callback_ctx;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
bpf_map_update_elem(&array_map, &key, &val, flags);
err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags);
return 0;
}
SEC("tp/syscalls/sys_enter_getpgid")
int map_for_each_fail(void *ctx)
{
const int key = 0;
const int val = 1;
const u64 flags = BPF_NOEXIST;
int callback_ctx;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
bpf_map_update_elem(&array_map, &key, &val, flags);
/* calling for_each with non-zero flags will return error */
err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags);
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/test_map_ops.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct key_t {
int a;
int b;
int c;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 3);
__type(key, struct key_t);
__type(value, __u64);
} hashmap1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 3);
__type(key, __u64);
__type(value, __u64);
} hashmap2 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 3);
__type(key, struct key_t);
__type(value, __u32);
} hashmap3 SEC(".maps");
/* will set before prog run */
bool in_test_mode = 0;
/* will collect results during prog run */
__u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0;
__u64 val_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx)
{
struct seq_file *seq = ctx->meta->seq;
__u32 seq_num = ctx->meta->seq_num;
struct bpf_map *map = ctx->map;
struct key_t *key = ctx->key;
struct key_t tmp_key;
__u64 *val = ctx->value;
__u64 tmp_val = 0;
int ret;
if (in_test_mode) {
/* test mode is used by selftests to
* test functionality of bpf_hash_map iter.
*
* the above hashmap1 will have correct size
* and will be accepted, hashmap2 and hashmap3
* should be rejected due to smaller key/value
* size.
*/
if (key == (void *)0 || val == (void *)0)
return 0;
/* update the value and then delete the <key, value> pair.
* it should not impact the existing 'val' which is still
* accessible under rcu.
*/
__builtin_memcpy(&tmp_key, key, sizeof(struct key_t));
ret = bpf_map_update_elem(&hashmap1, &tmp_key, &tmp_val, 0);
if (ret)
return 0;
ret = bpf_map_delete_elem(&hashmap1, &tmp_key);
if (ret)
return 0;
key_sum_a += key->a;
key_sum_b += key->b;
key_sum_c += key->c;
val_sum += *val;
return 0;
}
/* non-test mode, the map is prepared with the
* below bpftool command sequence:
* bpftool map create /sys/fs/bpf/m1 type hash \
* key 12 value 8 entries 3 name map1
* bpftool map update id 77 key 0 0 0 1 0 0 0 0 0 0 0 1 \
* value 0 0 0 1 0 0 0 1
* bpftool map update id 77 key 0 0 0 1 0 0 0 0 0 0 0 2 \
* value 0 0 0 1 0 0 0 2
* The bpftool iter command line:
* bpftool iter pin ./bpf_iter_bpf_hash_map.o /sys/fs/bpf/p1 \
* map id 77
* The below output will be:
* map dump starts
* 77: (1000000 0 2000000) (200000001000000)
* 77: (1000000 0 1000000) (100000001000000)
* map dump ends
*/
if (seq_num == 0)
BPF_SEQ_PRINTF(seq, "map dump starts\n");
if (key == (void *)0 || val == (void *)0) {
BPF_SEQ_PRINTF(seq, "map dump ends\n");
return 0;
}
BPF_SEQ_PRINTF(seq, "%d: (%x %d %x) (%llx)\n", map->id,
key->a, key->b, key->c, *val);
return 0;
}
SEC("iter.s/bpf_map_elem")
int sleepable_dummy_dump(struct bpf_iter__bpf_map_elem *ctx)
{
if (ctx->meta->seq_num == 0)
BPF_SEQ_PRINTF(ctx->meta->seq, "map dump starts\n");
return 0;
}
| linux-master | tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Isovalent */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char LICENSE[] SEC("license") = "GPL";
bool seen_tc1;
bool seen_tc2;
bool seen_tc3;
bool seen_tc4;
bool seen_tc5;
bool seen_tc6;
SEC("tc/ingress")
int tc1(struct __sk_buff *skb)
{
seen_tc1 = true;
return TCX_NEXT;
}
SEC("tc/egress")
int tc2(struct __sk_buff *skb)
{
seen_tc2 = true;
return TCX_NEXT;
}
SEC("tc/egress")
int tc3(struct __sk_buff *skb)
{
seen_tc3 = true;
return TCX_NEXT;
}
SEC("tc/egress")
int tc4(struct __sk_buff *skb)
{
seen_tc4 = true;
return TCX_NEXT;
}
SEC("tc/egress")
int tc5(struct __sk_buff *skb)
{
seen_tc5 = true;
return TCX_PASS;
}
SEC("tc/egress")
int tc6(struct __sk_buff *skb)
{
seen_tc6 = true;
return TCX_PASS;
}
| linux-master | tools/testing/selftests/bpf/progs/test_tc_link.c |
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
struct map_value {
struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
struct prog_test_ref_kfunc __kptr *ref_ptr;
};
struct array_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
} array_map SEC(".maps");
struct pcpu_array_map {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
} pcpu_array_map SEC(".maps");
struct hash_map {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
} hash_map SEC(".maps");
struct pcpu_hash_map {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
} pcpu_hash_map SEC(".maps");
struct hash_malloc_map {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
__uint(map_flags, BPF_F_NO_PREALLOC);
} hash_malloc_map SEC(".maps");
struct pcpu_hash_malloc_map {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
__uint(map_flags, BPF_F_NO_PREALLOC);
} pcpu_hash_malloc_map SEC(".maps");
struct lru_hash_map {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
} lru_hash_map SEC(".maps");
struct lru_pcpu_hash_map {
__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
} lru_pcpu_hash_map SEC(".maps");
struct cgrp_ls_map {
__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct map_value);
} cgrp_ls_map SEC(".maps");
struct task_ls_map {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct map_value);
} task_ls_map SEC(".maps");
struct inode_ls_map {
__uint(type, BPF_MAP_TYPE_INODE_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct map_value);
} inode_ls_map SEC(".maps");
struct sk_ls_map {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct map_value);
} sk_ls_map SEC(".maps");
#define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name) \
struct { \
__uint(type, map_type); \
__uint(max_entries, 1); \
__uint(key_size, sizeof(int)); \
__uint(value_size, sizeof(int)); \
__array(values, struct inner_map_type); \
} name SEC(".maps") = { \
.values = { [0] = &inner_map_type }, \
}
DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
static void test_kptr_unref(struct map_value *v)
{
struct prog_test_ref_kfunc *p;
p = v->unref_ptr;
/* store untrusted_ptr_or_null_ */
WRITE_ONCE(v->unref_ptr, p);
if (!p)
return;
if (p->a + p->b > 100)
return;
/* store untrusted_ptr_ */
WRITE_ONCE(v->unref_ptr, p);
/* store NULL */
WRITE_ONCE(v->unref_ptr, NULL);
}
static void test_kptr_ref(struct map_value *v)
{
struct prog_test_ref_kfunc *p;
p = v->ref_ptr;
/* store ptr_or_null_ */
WRITE_ONCE(v->unref_ptr, p);
if (!p)
return;
/*
* p is rcu_ptr_prog_test_ref_kfunc,
* because bpf prog is non-sleepable and runs in RCU CS.
* p can be passed to kfunc that requires KF_RCU.
*/
bpf_kfunc_call_test_ref(p);
if (p->a + p->b > 100)
return;
/* store NULL */
p = bpf_kptr_xchg(&v->ref_ptr, NULL);
if (!p)
return;
/*
* p is trusted_ptr_prog_test_ref_kfunc.
* p can be passed to kfunc that requires KF_RCU.
*/
bpf_kfunc_call_test_ref(p);
if (p->a + p->b > 100) {
bpf_kfunc_call_test_release(p);
return;
}
/* store ptr_ */
WRITE_ONCE(v->unref_ptr, p);
bpf_kfunc_call_test_release(p);
p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
if (!p)
return;
/* store ptr_ */
p = bpf_kptr_xchg(&v->ref_ptr, p);
if (!p)
return;
if (p->a + p->b > 100) {
bpf_kfunc_call_test_release(p);
return;
}
bpf_kfunc_call_test_release(p);
}
static void test_kptr(struct map_value *v)
{
test_kptr_unref(v);
test_kptr_ref(v);
}
SEC("tc")
int test_map_kptr(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
#define TEST(map) \
v = bpf_map_lookup_elem(&map, &key); \
if (!v) \
return 0; \
test_kptr(v)
TEST(array_map);
TEST(hash_map);
TEST(hash_malloc_map);
TEST(lru_hash_map);
#undef TEST
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_cgrp_map_kptr, struct cgroup *cgrp, const char *path)
{
struct map_value *v;
v = bpf_cgrp_storage_get(&cgrp_ls_map, cgrp, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (v)
test_kptr(v);
return 0;
}
SEC("lsm/inode_unlink")
int BPF_PROG(test_task_map_kptr, struct inode *inode, struct dentry *victim)
{
struct task_struct *task;
struct map_value *v;
task = bpf_get_current_task_btf();
if (!task)
return 0;
v = bpf_task_storage_get(&task_ls_map, task, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (v)
test_kptr(v);
return 0;
}
SEC("lsm/inode_unlink")
int BPF_PROG(test_inode_map_kptr, struct inode *inode, struct dentry *victim)
{
struct map_value *v;
v = bpf_inode_storage_get(&inode_ls_map, inode, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (v)
test_kptr(v);
return 0;
}
SEC("tc")
int test_sk_map_kptr(struct __sk_buff *ctx)
{
struct map_value *v;
struct bpf_sock *sk;
sk = ctx->sk;
if (!sk)
return 0;
v = bpf_sk_storage_get(&sk_ls_map, sk, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (v)
test_kptr(v);
return 0;
}
SEC("tc")
int test_map_in_map_kptr(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0;
void *map;
#define TEST(map_in_map) \
map = bpf_map_lookup_elem(&map_in_map, &key); \
if (!map) \
return 0; \
v = bpf_map_lookup_elem(map, &key); \
if (!v) \
return 0; \
test_kptr(v)
TEST(array_of_array_maps);
TEST(array_of_hash_maps);
TEST(array_of_hash_malloc_maps);
TEST(array_of_lru_hash_maps);
TEST(hash_of_array_maps);
TEST(hash_of_hash_maps);
TEST(hash_of_hash_malloc_maps);
TEST(hash_of_lru_hash_maps);
#undef TEST
return 0;
}
int ref = 1;
static __always_inline
int test_map_kptr_ref_pre(struct map_value *v)
{
struct prog_test_ref_kfunc *p, *p_st;
unsigned long arg = 0;
int ret;
p = bpf_kfunc_call_test_acquire(&arg);
if (!p)
return 1;
ref++;
p_st = p->next;
if (p_st->cnt.refs.counter != ref) {
ret = 2;
goto end;
}
p = bpf_kptr_xchg(&v->ref_ptr, p);
if (p) {
ret = 3;
goto end;
}
if (p_st->cnt.refs.counter != ref)
return 4;
p = bpf_kptr_xchg(&v->ref_ptr, NULL);
if (!p)
return 5;
bpf_kfunc_call_test_release(p);
ref--;
if (p_st->cnt.refs.counter != ref)
return 6;
p = bpf_kfunc_call_test_acquire(&arg);
if (!p)
return 7;
ref++;
p = bpf_kptr_xchg(&v->ref_ptr, p);
if (p) {
ret = 8;
goto end;
}
if (p_st->cnt.refs.counter != ref)
return 9;
/* Leave in map */
return 0;
end:
ref--;
bpf_kfunc_call_test_release(p);
return ret;
}
static __always_inline
int test_map_kptr_ref_post(struct map_value *v)
{
struct prog_test_ref_kfunc *p, *p_st;
p_st = v->ref_ptr;
if (!p_st || p_st->cnt.refs.counter != ref)
return 1;
p = bpf_kptr_xchg(&v->ref_ptr, NULL);
if (!p)
return 2;
if (p_st->cnt.refs.counter != ref) {
bpf_kfunc_call_test_release(p);
return 3;
}
p = bpf_kptr_xchg(&v->ref_ptr, p);
if (p) {
bpf_kfunc_call_test_release(p);
return 4;
}
if (p_st->cnt.refs.counter != ref)
return 5;
return 0;
}
#define TEST(map) \
v = bpf_map_lookup_elem(&map, &key); \
if (!v) \
return -1; \
ret = test_map_kptr_ref_pre(v); \
if (ret) \
return ret;
#define TEST_PCPU(map) \
v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
if (!v) \
return -1; \
ret = test_map_kptr_ref_pre(v); \
if (ret) \
return ret;
SEC("tc")
int test_map_kptr_ref1(struct __sk_buff *ctx)
{
struct map_value *v, val = {};
int key = 0, ret;
bpf_map_update_elem(&hash_map, &key, &val, 0);
bpf_map_update_elem(&hash_malloc_map, &key, &val, 0);
bpf_map_update_elem(&lru_hash_map, &key, &val, 0);
bpf_map_update_elem(&pcpu_hash_map, &key, &val, 0);
bpf_map_update_elem(&pcpu_hash_malloc_map, &key, &val, 0);
bpf_map_update_elem(&lru_pcpu_hash_map, &key, &val, 0);
TEST(array_map);
TEST(hash_map);
TEST(hash_malloc_map);
TEST(lru_hash_map);
TEST_PCPU(pcpu_array_map);
TEST_PCPU(pcpu_hash_map);
TEST_PCPU(pcpu_hash_malloc_map);
TEST_PCPU(lru_pcpu_hash_map);
return 0;
}
#undef TEST
#undef TEST_PCPU
#define TEST(map) \
v = bpf_map_lookup_elem(&map, &key); \
if (!v) \
return -1; \
ret = test_map_kptr_ref_post(v); \
if (ret) \
return ret;
#define TEST_PCPU(map) \
v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
if (!v) \
return -1; \
ret = test_map_kptr_ref_post(v); \
if (ret) \
return ret;
SEC("tc")
int test_map_kptr_ref2(struct __sk_buff *ctx)
{
struct map_value *v;
int key = 0, ret;
TEST(array_map);
TEST(hash_map);
TEST(hash_malloc_map);
TEST(lru_hash_map);
TEST_PCPU(pcpu_array_map);
TEST_PCPU(pcpu_hash_map);
TEST_PCPU(pcpu_hash_malloc_map);
TEST_PCPU(lru_pcpu_hash_map);
return 0;
}
#undef TEST
#undef TEST_PCPU
SEC("tc")
int test_map_kptr_ref3(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
unsigned long sp = 0;
p = bpf_kfunc_call_test_acquire(&sp);
if (!p)
return 1;
ref++;
if (p->cnt.refs.counter != ref) {
bpf_kfunc_call_test_release(p);
return 2;
}
bpf_kfunc_call_test_release(p);
ref--;
return 0;
}
SEC("syscall")
int test_ls_map_kptr_ref1(void *ctx)
{
struct task_struct *current;
struct map_value *v;
current = bpf_get_current_task_btf();
if (!current)
return 100;
v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
if (v)
return 150;
v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!v)
return 200;
return test_map_kptr_ref_pre(v);
}
SEC("syscall")
int test_ls_map_kptr_ref2(void *ctx)
{
struct task_struct *current;
struct map_value *v;
current = bpf_get_current_task_btf();
if (!current)
return 100;
v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
if (!v)
return 200;
return test_map_kptr_ref_post(v);
}
SEC("syscall")
int test_ls_map_kptr_ref_del(void *ctx)
{
struct task_struct *current;
struct map_value *v;
current = bpf_get_current_task_btf();
if (!current)
return 100;
v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
if (!v)
return 200;
if (!v->ref_ptr)
return 300;
return bpf_task_storage_delete(&task_ls_map, current);
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/map_kptr.c |
// SPDX-License-Identifier: GPL-2.0
#define KBUILD_MODNAME "foo"
#include <string.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
/* One map use devmap, another one use devmap_hash for testing */
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
__uint(max_entries, 1024);
} map_all SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(struct bpf_devmap_val));
__uint(max_entries, 128);
} map_egress SEC(".maps");
/* map to store egress interfaces mac addresses */
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __be64);
__uint(max_entries, 128);
} mac_map SEC(".maps");
SEC("xdp")
int xdp_redirect_map_multi_prog(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
int if_index = ctx->ingress_ifindex;
struct ethhdr *eth = data;
__u16 h_proto;
__u64 nh_off;
nh_off = sizeof(*eth);
if (data + nh_off > data_end)
return XDP_DROP;
h_proto = eth->h_proto;
/* Using IPv4 for (BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS) testing */
if (h_proto == bpf_htons(ETH_P_IP))
return bpf_redirect_map(&map_all, 0,
BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS);
/* Using IPv6 for none flag testing */
else if (h_proto == bpf_htons(ETH_P_IPV6))
return bpf_redirect_map(&map_all, if_index, 0);
/* All others for BPF_F_BROADCAST testing */
else
return bpf_redirect_map(&map_all, 0, BPF_F_BROADCAST);
}
/* The following 2 progs are for 2nd devmap prog testing */
SEC("xdp")
int xdp_redirect_map_all_prog(struct xdp_md *ctx)
{
return bpf_redirect_map(&map_egress, 0,
BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS);
}
SEC("xdp/devmap")
int xdp_devmap_prog(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
__u32 key = ctx->egress_ifindex;
struct ethhdr *eth = data;
__u64 nh_off;
__be64 *mac;
nh_off = sizeof(*eth);
if (data + nh_off > data_end)
return XDP_DROP;
mac = bpf_map_lookup_elem(&mac_map, &key);
if (mac)
__builtin_memcpy(eth->h_source, mac, ETH_ALEN);
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
| linux-master | tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.