python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
*
* A test for the patch "Allow compaction of unevictable pages".
* With this patch we should be able to allocate at least 1/4
* of RAM in huge pages. Without the patch much less is
* allocated.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include "../kselftest.h"
#define MAP_SIZE_MB 100
#define MAP_SIZE (MAP_SIZE_MB * 1024 * 1024)
struct map_list {
void *map;
struct map_list *next;
};
int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize)
{
char buffer[256] = {0};
char *cmd = "cat /proc/meminfo | grep -i memfree | grep -o '[0-9]*'";
FILE *cmdfile = popen(cmd, "r");
if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
perror("Failed to read meminfo\n");
return -1;
}
pclose(cmdfile);
*memfree = atoll(buffer);
cmd = "cat /proc/meminfo | grep -i hugepagesize | grep -o '[0-9]*'";
cmdfile = popen(cmd, "r");
if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
perror("Failed to read meminfo\n");
return -1;
}
pclose(cmdfile);
*hugepagesize = atoll(buffer);
return 0;
}
int prereq(void)
{
char allowed;
int fd;
fd = open("/proc/sys/vm/compact_unevictable_allowed",
O_RDONLY | O_NONBLOCK);
if (fd < 0) {
perror("Failed to open\n"
"/proc/sys/vm/compact_unevictable_allowed\n");
return -1;
}
if (read(fd, &allowed, sizeof(char)) != sizeof(char)) {
perror("Failed to read from\n"
"/proc/sys/vm/compact_unevictable_allowed\n");
close(fd);
return -1;
}
close(fd);
if (allowed == '1')
return 0;
return -1;
}
int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
{
int fd;
int compaction_index = 0;
char initial_nr_hugepages[10] = {0};
char nr_hugepages[10] = {0};
/* We want to test with 80% of available memory. Else, OOM killer comes
in to play */
mem_free = mem_free * 0.8;
fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK);
if (fd < 0) {
perror("Failed to open /proc/sys/vm/nr_hugepages");
return -1;
}
if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) {
perror("Failed to read from /proc/sys/vm/nr_hugepages");
goto close_fd;
}
/* Start with the initial condition of 0 huge pages*/
if (write(fd, "0", sizeof(char)) != sizeof(char)) {
perror("Failed to write 0 to /proc/sys/vm/nr_hugepages\n");
goto close_fd;
}
lseek(fd, 0, SEEK_SET);
/* Request a large number of huge pages. The Kernel will allocate
as much as it can */
if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
perror("Failed to write 100000 to /proc/sys/vm/nr_hugepages\n");
goto close_fd;
}
lseek(fd, 0, SEEK_SET);
if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
perror("Failed to re-read from /proc/sys/vm/nr_hugepages\n");
goto close_fd;
}
/* We should have been able to request at least 1/3 rd of the memory in
huge pages */
compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size);
if (compaction_index > 3) {
printf("No of huge pages allocated = %d\n",
(atoi(nr_hugepages)));
fprintf(stderr, "ERROR: Less that 1/%d of memory is available\n"
"as huge pages\n", compaction_index);
goto close_fd;
}
printf("No of huge pages allocated = %d\n",
(atoi(nr_hugepages)));
lseek(fd, 0, SEEK_SET);
if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
!= strlen(initial_nr_hugepages)) {
perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
goto close_fd;
}
close(fd);
return 0;
close_fd:
close(fd);
printf("Not OK. Compaction test failed.");
return -1;
}
int main(int argc, char **argv)
{
struct rlimit lim;
struct map_list *list, *entry;
size_t page_size, i;
void *map = NULL;
unsigned long mem_free = 0;
unsigned long hugepage_size = 0;
long mem_fragmentable_MB = 0;
if (prereq() != 0) {
printf("Either the sysctl compact_unevictable_allowed is not\n"
"set to 1 or couldn't read the proc file.\n"
"Skipping the test\n");
return KSFT_SKIP;
}
lim.rlim_cur = RLIM_INFINITY;
lim.rlim_max = RLIM_INFINITY;
if (setrlimit(RLIMIT_MEMLOCK, &lim)) {
perror("Failed to set rlimit:\n");
return -1;
}
page_size = getpagesize();
list = NULL;
if (read_memory_info(&mem_free, &hugepage_size) != 0) {
printf("ERROR: Cannot read meminfo\n");
return -1;
}
mem_fragmentable_MB = mem_free * 0.8 / 1024;
while (mem_fragmentable_MB > 0) {
map = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_LOCKED, -1, 0);
if (map == MAP_FAILED)
break;
entry = malloc(sizeof(struct map_list));
if (!entry) {
munmap(map, MAP_SIZE);
break;
}
entry->map = map;
entry->next = list;
list = entry;
/* Write something (in this case the address of the map) to
* ensure that KSM can't merge the mapped pages
*/
for (i = 0; i < MAP_SIZE; i += page_size)
*(unsigned long *)(map + i) = (unsigned long)map + i;
mem_fragmentable_MB -= MAP_SIZE_MB;
}
for (entry = list; entry != NULL; entry = entry->next) {
munmap(entry->map, MAP_SIZE);
if (!entry->next)
break;
entry = entry->next;
}
if (check_compaction(mem_free, hugepage_size) == 0)
return 0;
return -1;
}
| linux-master | tools/testing/selftests/mm/compaction_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <fcntl.h>
#include <dirent.h>
#include <sys/ioctl.h>
#include <linux/userfaultfd.h>
#include <sys/syscall.h>
#include <unistd.h>
#include "../kselftest.h"
#include "vm_util.h"
#define PMD_SIZE_FILE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size"
#define SMAP_FILE_PATH "/proc/self/smaps"
#define MAX_LINE_LENGTH 500
unsigned int __page_size;
unsigned int __page_shift;
uint64_t pagemap_get_entry(int fd, char *start)
{
const unsigned long pfn = (unsigned long)start / getpagesize();
uint64_t entry;
int ret;
ret = pread(fd, &entry, sizeof(entry), pfn * sizeof(entry));
if (ret != sizeof(entry))
ksft_exit_fail_msg("reading pagemap failed\n");
return entry;
}
bool pagemap_is_softdirty(int fd, char *start)
{
return pagemap_get_entry(fd, start) & PM_SOFT_DIRTY;
}
bool pagemap_is_swapped(int fd, char *start)
{
return pagemap_get_entry(fd, start) & PM_SWAP;
}
bool pagemap_is_populated(int fd, char *start)
{
return pagemap_get_entry(fd, start) & (PM_PRESENT | PM_SWAP);
}
unsigned long pagemap_get_pfn(int fd, char *start)
{
uint64_t entry = pagemap_get_entry(fd, start);
/* If present (63th bit), PFN is at bit 0 -- 54. */
if (entry & PM_PRESENT)
return entry & 0x007fffffffffffffull;
return -1ul;
}
void clear_softdirty(void)
{
int ret;
const char *ctrl = "4";
int fd = open("/proc/self/clear_refs", O_WRONLY);
if (fd < 0)
ksft_exit_fail_msg("opening clear_refs failed\n");
ret = write(fd, ctrl, strlen(ctrl));
close(fd);
if (ret != strlen(ctrl))
ksft_exit_fail_msg("writing clear_refs failed\n");
}
bool check_for_pattern(FILE *fp, const char *pattern, char *buf, size_t len)
{
while (fgets(buf, len, fp)) {
if (!strncmp(buf, pattern, strlen(pattern)))
return true;
}
return false;
}
uint64_t read_pmd_pagesize(void)
{
int fd;
char buf[20];
ssize_t num_read;
fd = open(PMD_SIZE_FILE_PATH, O_RDONLY);
if (fd == -1)
return 0;
num_read = read(fd, buf, 19);
if (num_read < 1) {
close(fd);
return 0;
}
buf[num_read] = '\0';
close(fd);
return strtoul(buf, NULL, 10);
}
bool __check_huge(void *addr, char *pattern, int nr_hpages,
uint64_t hpage_size)
{
uint64_t thp = -1;
int ret;
FILE *fp;
char buffer[MAX_LINE_LENGTH];
char addr_pattern[MAX_LINE_LENGTH];
ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-",
(unsigned long) addr);
if (ret >= MAX_LINE_LENGTH)
ksft_exit_fail_msg("%s: Pattern is too long\n", __func__);
fp = fopen(SMAP_FILE_PATH, "r");
if (!fp)
ksft_exit_fail_msg("%s: Failed to open file %s\n", __func__, SMAP_FILE_PATH);
if (!check_for_pattern(fp, addr_pattern, buffer, sizeof(buffer)))
goto err_out;
/*
* Fetch the pattern in the same block and check the number of
* hugepages.
*/
if (!check_for_pattern(fp, pattern, buffer, sizeof(buffer)))
goto err_out;
snprintf(addr_pattern, MAX_LINE_LENGTH, "%s%%9ld kB", pattern);
if (sscanf(buffer, addr_pattern, &thp) != 1)
ksft_exit_fail_msg("Reading smap error\n");
err_out:
fclose(fp);
return thp == (nr_hpages * (hpage_size >> 10));
}
bool check_huge_anon(void *addr, int nr_hpages, uint64_t hpage_size)
{
return __check_huge(addr, "AnonHugePages: ", nr_hpages, hpage_size);
}
bool check_huge_file(void *addr, int nr_hpages, uint64_t hpage_size)
{
return __check_huge(addr, "FilePmdMapped:", nr_hpages, hpage_size);
}
bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size)
{
return __check_huge(addr, "ShmemPmdMapped:", nr_hpages, hpage_size);
}
int64_t allocate_transhuge(void *ptr, int pagemap_fd)
{
uint64_t ent[2];
/* drop pmd */
if (mmap(ptr, HPAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_ANONYMOUS |
MAP_NORESERVE | MAP_PRIVATE, -1, 0) != ptr)
errx(2, "mmap transhuge");
if (madvise(ptr, HPAGE_SIZE, MADV_HUGEPAGE))
err(2, "MADV_HUGEPAGE");
/* allocate transparent huge page */
*(volatile void **)ptr = ptr;
if (pread(pagemap_fd, ent, sizeof(ent),
(uintptr_t)ptr >> (pshift() - 3)) != sizeof(ent))
err(2, "read pagemap");
if (PAGEMAP_PRESENT(ent[0]) && PAGEMAP_PRESENT(ent[1]) &&
PAGEMAP_PFN(ent[0]) + 1 == PAGEMAP_PFN(ent[1]) &&
!(PAGEMAP_PFN(ent[0]) & ((1 << (HPAGE_SHIFT - pshift())) - 1)))
return PAGEMAP_PFN(ent[0]);
return -1;
}
unsigned long default_huge_page_size(void)
{
unsigned long hps = 0;
char *line = NULL;
size_t linelen = 0;
FILE *f = fopen("/proc/meminfo", "r");
if (!f)
return 0;
while (getline(&line, &linelen, f) > 0) {
if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) {
hps <<= 10;
break;
}
}
free(line);
fclose(f);
return hps;
}
int detect_hugetlb_page_sizes(size_t sizes[], int max)
{
DIR *dir = opendir("/sys/kernel/mm/hugepages/");
int count = 0;
if (!dir)
return 0;
while (count < max) {
struct dirent *entry = readdir(dir);
size_t kb;
if (!entry)
break;
if (entry->d_type != DT_DIR)
continue;
if (sscanf(entry->d_name, "hugepages-%zukB", &kb) != 1)
continue;
sizes[count++] = kb * 1024;
ksft_print_msg("[INFO] detected hugetlb page size: %zu KiB\n",
kb);
}
closedir(dir);
return count;
}
/* If `ioctls' non-NULL, the allowed ioctls will be returned into the var */
int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len,
bool miss, bool wp, bool minor, uint64_t *ioctls)
{
struct uffdio_register uffdio_register = { 0 };
uint64_t mode = 0;
int ret = 0;
if (miss)
mode |= UFFDIO_REGISTER_MODE_MISSING;
if (wp)
mode |= UFFDIO_REGISTER_MODE_WP;
if (minor)
mode |= UFFDIO_REGISTER_MODE_MINOR;
uffdio_register.range.start = (unsigned long)addr;
uffdio_register.range.len = len;
uffdio_register.mode = mode;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1)
ret = -errno;
else if (ioctls)
*ioctls = uffdio_register.ioctls;
return ret;
}
int uffd_register(int uffd, void *addr, uint64_t len,
bool miss, bool wp, bool minor)
{
return uffd_register_with_ioctls(uffd, addr, len,
miss, wp, minor, NULL);
}
int uffd_unregister(int uffd, void *addr, uint64_t len)
{
struct uffdio_range range = { .start = (uintptr_t)addr, .len = len };
int ret = 0;
if (ioctl(uffd, UFFDIO_UNREGISTER, &range) == -1)
ret = -errno;
return ret;
}
| linux-master | tools/testing/selftests/mm/vm_util.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 Dmitry Safonov, Arista Networks
*
* MAP_POPULATE | MAP_PRIVATE should COW VMA pages.
*/
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#define MMAP_SZ 4096
#define BUG_ON(condition, description) \
do { \
if (condition) { \
fprintf(stderr, "[FAIL]\t%s:%d\t%s:%s\n", __func__, \
__LINE__, (description), strerror(errno)); \
exit(1); \
} \
} while (0)
static int parent_f(int sock, unsigned long *smap, int child)
{
int status, ret;
ret = read(sock, &status, sizeof(int));
BUG_ON(ret <= 0, "read(sock)");
*smap = 0x22222BAD;
ret = msync(smap, MMAP_SZ, MS_SYNC);
BUG_ON(ret, "msync()");
ret = write(sock, &status, sizeof(int));
BUG_ON(ret <= 0, "write(sock)");
waitpid(child, &status, 0);
BUG_ON(!WIFEXITED(status), "child in unexpected state");
return WEXITSTATUS(status);
}
static int child_f(int sock, unsigned long *smap, int fd)
{
int ret, buf = 0;
smap = mmap(0, MMAP_SZ, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_POPULATE, fd, 0);
BUG_ON(smap == MAP_FAILED, "mmap()");
BUG_ON(*smap != 0xdeadbabe, "MAP_PRIVATE | MAP_POPULATE changed file");
ret = write(sock, &buf, sizeof(int));
BUG_ON(ret <= 0, "write(sock)");
ret = read(sock, &buf, sizeof(int));
BUG_ON(ret <= 0, "read(sock)");
BUG_ON(*smap == 0x22222BAD, "MAP_POPULATE didn't COW private page");
BUG_ON(*smap != 0xdeadbabe, "mapping was corrupted");
return 0;
}
int main(int argc, char **argv)
{
int sock[2], child, ret;
FILE *ftmp;
unsigned long *smap;
ftmp = tmpfile();
BUG_ON(!ftmp, "tmpfile()");
ret = ftruncate(fileno(ftmp), MMAP_SZ);
BUG_ON(ret, "ftruncate()");
smap = mmap(0, MMAP_SZ, PROT_READ | PROT_WRITE,
MAP_SHARED, fileno(ftmp), 0);
BUG_ON(smap == MAP_FAILED, "mmap()");
*smap = 0xdeadbabe;
/* Probably unnecessary, but let it be. */
ret = msync(smap, MMAP_SZ, MS_SYNC);
BUG_ON(ret, "msync()");
ret = socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sock);
BUG_ON(ret, "socketpair()");
child = fork();
BUG_ON(child == -1, "fork()");
if (child) {
ret = close(sock[0]);
BUG_ON(ret, "close()");
return parent_f(sock[1], smap, child);
}
ret = close(sock[1]);
BUG_ON(ret, "close()");
return child_f(sock[0], smap, fileno(ftmp));
}
| linux-master | tools/testing/selftests/mm/map_populate.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test handling of code that might set PTE/PMD dirty in read-only VMAs.
* Setting a PTE/PMD dirty must not accidentally set the PTE/PMD writable.
*
* Copyright 2023, Red Hat, Inc.
*
* Author(s): David Hildenbrand <[email protected]>
*/
#include <fcntl.h>
#include <signal.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <sys/mman.h>
#include <setjmp.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <linux/userfaultfd.h>
#include <linux/mempolicy.h>
#include "../kselftest.h"
#include "vm_util.h"
static size_t pagesize;
static size_t thpsize;
static int mem_fd;
static int pagemap_fd;
static sigjmp_buf env;
static void signal_handler(int sig)
{
if (sig == SIGSEGV)
siglongjmp(env, 1);
siglongjmp(env, 2);
}
static void do_test_write_sigsegv(char *mem)
{
char orig = *mem;
int ret;
if (signal(SIGSEGV, signal_handler) == SIG_ERR) {
ksft_test_result_fail("signal() failed\n");
return;
}
ret = sigsetjmp(env, 1);
if (!ret)
*mem = orig + 1;
if (signal(SIGSEGV, SIG_DFL) == SIG_ERR)
ksft_test_result_fail("signal() failed\n");
ksft_test_result(ret == 1 && *mem == orig,
"SIGSEGV generated, page not modified\n");
}
static char *mmap_thp_range(int prot, char **_mmap_mem, size_t *_mmap_size)
{
const size_t mmap_size = 2 * thpsize;
char *mem, *mmap_mem;
mmap_mem = mmap(NULL, mmap_size, prot, MAP_PRIVATE|MAP_ANON,
-1, 0);
if (mmap_mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
return MAP_FAILED;
}
mem = (char *)(((uintptr_t)mmap_mem + thpsize) & ~(thpsize - 1));
if (madvise(mem, thpsize, MADV_HUGEPAGE)) {
ksft_test_result_skip("MADV_HUGEPAGE failed\n");
munmap(mmap_mem, mmap_size);
return MAP_FAILED;
}
*_mmap_mem = mmap_mem;
*_mmap_size = mmap_size;
return mem;
}
static void test_ptrace_write(void)
{
char data = 1;
char *mem;
int ret;
ksft_print_msg("[INFO] PTRACE write access\n");
mem = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE|MAP_ANON, -1, 0);
if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
return;
}
/* Fault in the shared zeropage. */
if (*mem != 0) {
ksft_test_result_fail("Memory not zero\n");
goto munmap;
}
/*
* Unshare the page (populating a fresh anon page that might be set
* dirty in the PTE) in the read-only VMA using ptrace (FOLL_FORCE).
*/
lseek(mem_fd, (uintptr_t) mem, SEEK_SET);
ret = write(mem_fd, &data, 1);
if (ret != 1 || *mem != data) {
ksft_test_result_fail("write() failed\n");
goto munmap;
}
do_test_write_sigsegv(mem);
munmap:
munmap(mem, pagesize);
}
static void test_ptrace_write_thp(void)
{
char *mem, *mmap_mem;
size_t mmap_size;
char data = 1;
int ret;
ksft_print_msg("[INFO] PTRACE write access to THP\n");
mem = mmap_thp_range(PROT_READ, &mmap_mem, &mmap_size);
if (mem == MAP_FAILED)
return;
/*
* Write to the first subpage in the read-only VMA using
* ptrace(FOLL_FORCE), eventually placing a fresh THP that is marked
* dirty in the PMD.
*/
lseek(mem_fd, (uintptr_t) mem, SEEK_SET);
ret = write(mem_fd, &data, 1);
if (ret != 1 || *mem != data) {
ksft_test_result_fail("write() failed\n");
goto munmap;
}
/* MM populated a THP if we got the last subpage populated as well. */
if (!pagemap_is_populated(pagemap_fd, mem + thpsize - pagesize)) {
ksft_test_result_skip("Did not get a THP populated\n");
goto munmap;
}
do_test_write_sigsegv(mem);
munmap:
munmap(mmap_mem, mmap_size);
}
static void test_page_migration(void)
{
char *mem;
ksft_print_msg("[INFO] Page migration\n");
mem = mmap(NULL, pagesize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON,
-1, 0);
if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
return;
}
/* Populate a fresh page and dirty it. */
memset(mem, 1, pagesize);
if (mprotect(mem, pagesize, PROT_READ)) {
ksft_test_result_fail("mprotect() failed\n");
goto munmap;
}
/* Trigger page migration. Might not be available or fail. */
if (syscall(__NR_mbind, mem, pagesize, MPOL_LOCAL, NULL, 0x7fful,
MPOL_MF_MOVE)) {
ksft_test_result_skip("mbind() failed\n");
goto munmap;
}
do_test_write_sigsegv(mem);
munmap:
munmap(mem, pagesize);
}
static void test_page_migration_thp(void)
{
char *mem, *mmap_mem;
size_t mmap_size;
ksft_print_msg("[INFO] Page migration of THP\n");
mem = mmap_thp_range(PROT_READ|PROT_WRITE, &mmap_mem, &mmap_size);
if (mem == MAP_FAILED)
return;
/*
* Write to the first page, which might populate a fresh anon THP
* and dirty it.
*/
memset(mem, 1, pagesize);
if (mprotect(mem, thpsize, PROT_READ)) {
ksft_test_result_fail("mprotect() failed\n");
goto munmap;
}
/* MM populated a THP if we got the last subpage populated as well. */
if (!pagemap_is_populated(pagemap_fd, mem + thpsize - pagesize)) {
ksft_test_result_skip("Did not get a THP populated\n");
goto munmap;
}
/* Trigger page migration. Might not be available or fail. */
if (syscall(__NR_mbind, mem, thpsize, MPOL_LOCAL, NULL, 0x7fful,
MPOL_MF_MOVE)) {
ksft_test_result_skip("mbind() failed\n");
goto munmap;
}
do_test_write_sigsegv(mem);
munmap:
munmap(mmap_mem, mmap_size);
}
static void test_pte_mapped_thp(void)
{
char *mem, *mmap_mem;
size_t mmap_size;
ksft_print_msg("[INFO] PTE-mapping a THP\n");
mem = mmap_thp_range(PROT_READ|PROT_WRITE, &mmap_mem, &mmap_size);
if (mem == MAP_FAILED)
return;
/*
* Write to the first page, which might populate a fresh anon THP
* and dirty it.
*/
memset(mem, 1, pagesize);
if (mprotect(mem, thpsize, PROT_READ)) {
ksft_test_result_fail("mprotect() failed\n");
goto munmap;
}
/* MM populated a THP if we got the last subpage populated as well. */
if (!pagemap_is_populated(pagemap_fd, mem + thpsize - pagesize)) {
ksft_test_result_skip("Did not get a THP populated\n");
goto munmap;
}
/* Trigger PTE-mapping the THP by mprotect'ing the last subpage. */
if (mprotect(mem + thpsize - pagesize, pagesize,
PROT_READ|PROT_WRITE)) {
ksft_test_result_fail("mprotect() failed\n");
goto munmap;
}
do_test_write_sigsegv(mem);
munmap:
munmap(mmap_mem, mmap_size);
}
#ifdef __NR_userfaultfd
static void test_uffdio_copy(void)
{
struct uffdio_register uffdio_register;
struct uffdio_copy uffdio_copy;
struct uffdio_api uffdio_api;
char *dst, *src;
int uffd;
ksft_print_msg("[INFO] UFFDIO_COPY\n");
src = malloc(pagesize);
memset(src, 1, pagesize);
dst = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE|MAP_ANON, -1, 0);
if (dst == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
return;
}
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
if (uffd < 0) {
ksft_test_result_skip("__NR_userfaultfd failed\n");
goto munmap;
}
uffdio_api.api = UFFD_API;
uffdio_api.features = 0;
if (ioctl(uffd, UFFDIO_API, &uffdio_api) < 0) {
ksft_test_result_fail("UFFDIO_API failed\n");
goto close_uffd;
}
uffdio_register.range.start = (unsigned long) dst;
uffdio_register.range.len = pagesize;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) {
ksft_test_result_fail("UFFDIO_REGISTER failed\n");
goto close_uffd;
}
/* Place a page in a read-only VMA, which might set the PTE dirty. */
uffdio_copy.dst = (unsigned long) dst;
uffdio_copy.src = (unsigned long) src;
uffdio_copy.len = pagesize;
uffdio_copy.mode = 0;
if (ioctl(uffd, UFFDIO_COPY, &uffdio_copy)) {
ksft_test_result_fail("UFFDIO_COPY failed\n");
goto close_uffd;
}
do_test_write_sigsegv(dst);
close_uffd:
close(uffd);
munmap:
munmap(dst, pagesize);
free(src);
}
#endif /* __NR_userfaultfd */
int main(void)
{
int err, tests = 2;
pagesize = getpagesize();
thpsize = read_pmd_pagesize();
if (thpsize) {
ksft_print_msg("[INFO] detected THP size: %zu KiB\n",
thpsize / 1024);
tests += 3;
}
#ifdef __NR_userfaultfd
tests += 1;
#endif /* __NR_userfaultfd */
ksft_print_header();
ksft_set_plan(tests);
mem_fd = open("/proc/self/mem", O_RDWR);
if (mem_fd < 0)
ksft_exit_fail_msg("opening /proc/self/mem failed\n");
pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
if (pagemap_fd < 0)
ksft_exit_fail_msg("opening /proc/self/pagemap failed\n");
/*
* On some ptrace(FOLL_FORCE) write access via /proc/self/mem in
* read-only VMAs, the kernel may set the PTE/PMD dirty.
*/
test_ptrace_write();
if (thpsize)
test_ptrace_write_thp();
/*
* On page migration, the kernel may set the PTE/PMD dirty when
* remapping the page.
*/
test_page_migration();
if (thpsize)
test_page_migration_thp();
/* PTE-mapping a THP might propagate the dirty PMD bit to the PTEs. */
if (thpsize)
test_pte_mapped_thp();
/* Placing a fresh page via userfaultfd may set the PTE dirty. */
#ifdef __NR_userfaultfd
test_uffdio_copy();
#endif /* __NR_userfaultfd */
err = ksft_get_fail_cnt();
if (err)
ksft_exit_fail_msg("%d out of %d tests failed\n",
err, ksft_test_num());
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/mm/mkdirty.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Example of using hugepage memory in a user application using the mmap
* system call with MAP_HUGETLB flag. Before running this program make
* sure the administrator has allocated enough default sized huge pages
* to cover the 256 MB allocation.
*
* For ia64 architecture, Linux kernel reserves Region number 4 for hugepages.
* That means the addresses starting with 0x800000... will need to be
* specified. Specifying a fixed address is not required on ppc64, i386
* or x86_64.
*/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
/* Only ia64 requires this */
#ifdef __ia64__
#define ADDR (void *)(0x8000000000000000UL)
#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_FIXED)
#else
#define ADDR (void *)(0x0UL)
#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
#endif
static void check_bytes(char *addr)
{
printf("First hex is %x\n", *((unsigned int *)addr));
}
static void write_bytes(char *addr, size_t length)
{
unsigned long i;
for (i = 0; i < length; i++)
*(addr + i) = (char)i;
}
static int read_bytes(char *addr, size_t length)
{
unsigned long i;
check_bytes(addr);
for (i = 0; i < length; i++)
if (*(addr + i) != (char)i) {
printf("Mismatch at %lu\n", i);
return 1;
}
return 0;
}
int main(int argc, char **argv)
{
void *addr;
int ret;
size_t length = LENGTH;
int flags = FLAGS;
int shift = 0;
if (argc > 1)
length = atol(argv[1]) << 20;
if (argc > 2) {
shift = atoi(argv[2]);
if (shift)
flags |= (shift & MAP_HUGE_MASK) << MAP_HUGE_SHIFT;
}
if (shift)
printf("%u kB hugepages\n", 1 << (shift - 10));
else
printf("Default size hugepages\n");
printf("Mapping %lu Mbytes\n", (unsigned long)length >> 20);
addr = mmap(ADDR, length, PROTECTION, flags, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
}
printf("Returned address is %p\n", addr);
check_bytes(addr);
write_bytes(addr, length);
ret = read_bytes(addr, length);
/* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
if (munmap(addr, length)) {
perror("munmap");
exit(1);
}
return ret;
}
| linux-master | tools/testing/selftests/mm/map_hugetlb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Userfaultfd tests util functions
*
* Copyright (C) 2015-2023 Red Hat, Inc.
*/
#include "uffd-common.h"
#define BASE_PMD_ADDR ((void *)(1UL << 30))
volatile bool test_uffdio_copy_eexist = true;
unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap;
int uffd = -1, uffd_flags, finished, *pipefd, test_type;
bool map_shared;
bool test_uffdio_wp = true;
unsigned long long *count_verify;
uffd_test_ops_t *uffd_test_ops;
static int uffd_mem_fd_create(off_t mem_size, bool hugetlb)
{
unsigned int memfd_flags = 0;
int mem_fd;
if (hugetlb)
memfd_flags = MFD_HUGETLB;
mem_fd = memfd_create("uffd-test", memfd_flags);
if (mem_fd < 0)
err("memfd_create");
if (ftruncate(mem_fd, mem_size))
err("ftruncate");
if (fallocate(mem_fd,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0,
mem_size))
err("fallocate");
return mem_fd;
}
static void anon_release_pages(char *rel_area)
{
if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED))
err("madvise(MADV_DONTNEED) failed");
}
static int anon_allocate_area(void **alloc_area, bool is_src)
{
*alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (*alloc_area == MAP_FAILED) {
*alloc_area = NULL;
return -errno;
}
return 0;
}
static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)
{
}
static void hugetlb_release_pages(char *rel_area)
{
if (!map_shared) {
if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED))
err("madvise(MADV_DONTNEED) failed");
} else {
if (madvise(rel_area, nr_pages * page_size, MADV_REMOVE))
err("madvise(MADV_REMOVE) failed");
}
}
static int hugetlb_allocate_area(void **alloc_area, bool is_src)
{
off_t size = nr_pages * page_size;
off_t offset = is_src ? 0 : size;
void *area_alias = NULL;
char **alloc_area_alias;
int mem_fd = uffd_mem_fd_create(size * 2, true);
*alloc_area = mmap(NULL, size, PROT_READ | PROT_WRITE,
(map_shared ? MAP_SHARED : MAP_PRIVATE) |
(is_src ? 0 : MAP_NORESERVE),
mem_fd, offset);
if (*alloc_area == MAP_FAILED) {
*alloc_area = NULL;
return -errno;
}
if (map_shared) {
area_alias = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_SHARED, mem_fd, offset);
if (area_alias == MAP_FAILED)
return -errno;
}
if (is_src) {
alloc_area_alias = &area_src_alias;
} else {
alloc_area_alias = &area_dst_alias;
}
if (area_alias)
*alloc_area_alias = area_alias;
close(mem_fd);
return 0;
}
static void hugetlb_alias_mapping(__u64 *start, size_t len, unsigned long offset)
{
if (!map_shared)
return;
*start = (unsigned long) area_dst_alias + offset;
}
static void shmem_release_pages(char *rel_area)
{
if (madvise(rel_area, nr_pages * page_size, MADV_REMOVE))
err("madvise(MADV_REMOVE) failed");
}
static int shmem_allocate_area(void **alloc_area, bool is_src)
{
void *area_alias = NULL;
size_t bytes = nr_pages * page_size, hpage_size = read_pmd_pagesize();
unsigned long offset = is_src ? 0 : bytes;
char *p = NULL, *p_alias = NULL;
int mem_fd = uffd_mem_fd_create(bytes * 2, false);
/* TODO: clean this up. Use a static addr is ugly */
p = BASE_PMD_ADDR;
if (!is_src)
/* src map + alias + interleaved hpages */
p += 2 * (bytes + hpage_size);
p_alias = p;
p_alias += bytes;
p_alias += hpage_size; /* Prevent src/dst VMA merge */
*alloc_area = mmap(p, bytes, PROT_READ | PROT_WRITE, MAP_SHARED,
mem_fd, offset);
if (*alloc_area == MAP_FAILED) {
*alloc_area = NULL;
return -errno;
}
if (*alloc_area != p)
err("mmap of memfd failed at %p", p);
area_alias = mmap(p_alias, bytes, PROT_READ | PROT_WRITE, MAP_SHARED,
mem_fd, offset);
if (area_alias == MAP_FAILED) {
munmap(*alloc_area, bytes);
*alloc_area = NULL;
return -errno;
}
if (area_alias != p_alias)
err("mmap of anonymous memory failed at %p", p_alias);
if (is_src)
area_src_alias = area_alias;
else
area_dst_alias = area_alias;
close(mem_fd);
return 0;
}
static void shmem_alias_mapping(__u64 *start, size_t len, unsigned long offset)
{
*start = (unsigned long)area_dst_alias + offset;
}
static void shmem_check_pmd_mapping(void *p, int expect_nr_hpages)
{
if (!check_huge_shmem(area_dst_alias, expect_nr_hpages,
read_pmd_pagesize()))
err("Did not find expected %d number of hugepages",
expect_nr_hpages);
}
struct uffd_test_ops anon_uffd_test_ops = {
.allocate_area = anon_allocate_area,
.release_pages = anon_release_pages,
.alias_mapping = noop_alias_mapping,
.check_pmd_mapping = NULL,
};
struct uffd_test_ops shmem_uffd_test_ops = {
.allocate_area = shmem_allocate_area,
.release_pages = shmem_release_pages,
.alias_mapping = shmem_alias_mapping,
.check_pmd_mapping = shmem_check_pmd_mapping,
};
struct uffd_test_ops hugetlb_uffd_test_ops = {
.allocate_area = hugetlb_allocate_area,
.release_pages = hugetlb_release_pages,
.alias_mapping = hugetlb_alias_mapping,
.check_pmd_mapping = NULL,
};
void uffd_stats_report(struct uffd_args *args, int n_cpus)
{
int i;
unsigned long long miss_total = 0, wp_total = 0, minor_total = 0;
for (i = 0; i < n_cpus; i++) {
miss_total += args[i].missing_faults;
wp_total += args[i].wp_faults;
minor_total += args[i].minor_faults;
}
printf("userfaults: ");
if (miss_total) {
printf("%llu missing (", miss_total);
for (i = 0; i < n_cpus; i++)
printf("%lu+", args[i].missing_faults);
printf("\b) ");
}
if (wp_total) {
printf("%llu wp (", wp_total);
for (i = 0; i < n_cpus; i++)
printf("%lu+", args[i].wp_faults);
printf("\b) ");
}
if (minor_total) {
printf("%llu minor (", minor_total);
for (i = 0; i < n_cpus; i++)
printf("%lu+", args[i].minor_faults);
printf("\b)");
}
printf("\n");
}
int userfaultfd_open(uint64_t *features)
{
struct uffdio_api uffdio_api;
uffd = uffd_open(UFFD_FLAGS);
if (uffd < 0)
return -1;
uffd_flags = fcntl(uffd, F_GETFD, NULL);
uffdio_api.api = UFFD_API;
uffdio_api.features = *features;
if (ioctl(uffd, UFFDIO_API, &uffdio_api))
/* Probably lack of CAP_PTRACE? */
return -1;
if (uffdio_api.api != UFFD_API)
err("UFFDIO_API error: %" PRIu64, (uint64_t)uffdio_api.api);
*features = uffdio_api.features;
return 0;
}
static inline void munmap_area(void **area)
{
if (*area)
if (munmap(*area, nr_pages * page_size))
err("munmap");
*area = NULL;
}
static void uffd_test_ctx_clear(void)
{
size_t i;
if (pipefd) {
for (i = 0; i < nr_cpus * 2; ++i) {
if (close(pipefd[i]))
err("close pipefd");
}
free(pipefd);
pipefd = NULL;
}
if (count_verify) {
free(count_verify);
count_verify = NULL;
}
if (uffd != -1) {
if (close(uffd))
err("close uffd");
uffd = -1;
}
munmap_area((void **)&area_src);
munmap_area((void **)&area_src_alias);
munmap_area((void **)&area_dst);
munmap_area((void **)&area_dst_alias);
munmap_area((void **)&area_remap);
}
int uffd_test_ctx_init(uint64_t features, const char **errmsg)
{
unsigned long nr, cpu;
int ret;
uffd_test_ctx_clear();
ret = uffd_test_ops->allocate_area((void **)&area_src, true);
ret |= uffd_test_ops->allocate_area((void **)&area_dst, false);
if (ret) {
if (errmsg)
*errmsg = "memory allocation failed";
return ret;
}
ret = userfaultfd_open(&features);
if (ret) {
if (errmsg)
*errmsg = "possible lack of priviledge";
return ret;
}
count_verify = malloc(nr_pages * sizeof(unsigned long long));
if (!count_verify)
err("count_verify");
for (nr = 0; nr < nr_pages; nr++) {
*area_mutex(area_src, nr) =
(pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
count_verify[nr] = *area_count(area_src, nr) = 1;
/*
* In the transition between 255 to 256, powerpc will
* read out of order in my_bcmp and see both bytes as
* zero, so leave a placeholder below always non-zero
* after the count, to avoid my_bcmp to trigger false
* positives.
*/
*(area_count(area_src, nr) + 1) = 1;
}
/*
* After initialization of area_src, we must explicitly release pages
* for area_dst to make sure it's fully empty. Otherwise we could have
* some area_dst pages be errornously initialized with zero pages,
* hence we could hit memory corruption later in the test.
*
* One example is when THP is globally enabled, above allocate_area()
* calls could have the two areas merged into a single VMA (as they
* will have the same VMA flags so they're mergeable). When we
* initialize the area_src above, it's possible that some part of
* area_dst could have been faulted in via one huge THP that will be
* shared between area_src and area_dst. It could cause some of the
* area_dst won't be trapped by missing userfaults.
*
* This release_pages() will guarantee even if that happened, we'll
* proactively split the thp and drop any accidentally initialized
* pages within area_dst.
*/
uffd_test_ops->release_pages(area_dst);
pipefd = malloc(sizeof(int) * nr_cpus * 2);
if (!pipefd)
err("pipefd");
for (cpu = 0; cpu < nr_cpus; cpu++)
if (pipe2(&pipefd[cpu * 2], O_CLOEXEC | O_NONBLOCK))
err("pipe");
return 0;
}
void wp_range(int ufd, __u64 start, __u64 len, bool wp)
{
struct uffdio_writeprotect prms;
/* Write protection page faults */
prms.range.start = start;
prms.range.len = len;
/* Undo write-protect, do wakeup after that */
prms.mode = wp ? UFFDIO_WRITEPROTECT_MODE_WP : 0;
if (ioctl(ufd, UFFDIO_WRITEPROTECT, &prms))
err("clear WP failed: address=0x%"PRIx64, (uint64_t)start);
}
static void continue_range(int ufd, __u64 start, __u64 len, bool wp)
{
struct uffdio_continue req;
int ret;
req.range.start = start;
req.range.len = len;
req.mode = 0;
if (wp)
req.mode |= UFFDIO_CONTINUE_MODE_WP;
if (ioctl(ufd, UFFDIO_CONTINUE, &req))
err("UFFDIO_CONTINUE failed for address 0x%" PRIx64,
(uint64_t)start);
/*
* Error handling within the kernel for continue is subtly different
* from copy or zeropage, so it may be a source of bugs. Trigger an
* error (-EEXIST) on purpose, to verify doing so doesn't cause a BUG.
*/
req.mapped = 0;
ret = ioctl(ufd, UFFDIO_CONTINUE, &req);
if (ret >= 0 || req.mapped != -EEXIST)
err("failed to exercise UFFDIO_CONTINUE error handling, ret=%d, mapped=%" PRId64,
ret, (int64_t) req.mapped);
}
int uffd_read_msg(int ufd, struct uffd_msg *msg)
{
int ret = read(uffd, msg, sizeof(*msg));
if (ret != sizeof(*msg)) {
if (ret < 0) {
if (errno == EAGAIN || errno == EINTR)
return 1;
err("blocking read error");
} else {
err("short read");
}
}
return 0;
}
void uffd_handle_page_fault(struct uffd_msg *msg, struct uffd_args *args)
{
unsigned long offset;
if (msg->event != UFFD_EVENT_PAGEFAULT)
err("unexpected msg event %u", msg->event);
if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WP) {
/* Write protect page faults */
wp_range(uffd, msg->arg.pagefault.address, page_size, false);
args->wp_faults++;
} else if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_MINOR) {
uint8_t *area;
int b;
/*
* Minor page faults
*
* To prove we can modify the original range for testing
* purposes, we're going to bit flip this range before
* continuing.
*
* Note that this requires all minor page fault tests operate on
* area_dst (non-UFFD-registered) and area_dst_alias
* (UFFD-registered).
*/
area = (uint8_t *)(area_dst +
((char *)msg->arg.pagefault.address -
area_dst_alias));
for (b = 0; b < page_size; ++b)
area[b] = ~area[b];
continue_range(uffd, msg->arg.pagefault.address, page_size,
args->apply_wp);
args->minor_faults++;
} else {
/*
* Missing page faults.
*
* Here we force a write check for each of the missing mode
* faults. It's guaranteed because the only threads that
* will trigger uffd faults are the locking threads, and
* their first instruction to touch the missing page will
* always be pthread_mutex_lock().
*
* Note that here we relied on an NPTL glibc impl detail to
* always read the lock type at the entry of the lock op
* (pthread_mutex_t.__data.__type, offset 0x10) before
* doing any locking operations to guarantee that. It's
* actually not good to rely on this impl detail because
* logically a pthread-compatible lib can implement the
* locks without types and we can fail when linking with
* them. However since we used to find bugs with this
* strict check we still keep it around. Hopefully this
* could be a good hint when it fails again. If one day
* it'll break on some other impl of glibc we'll revisit.
*/
if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)
err("unexpected write fault");
offset = (char *)(unsigned long)msg->arg.pagefault.address - area_dst;
offset &= ~(page_size-1);
if (copy_page(uffd, offset, args->apply_wp))
args->missing_faults++;
}
}
void *uffd_poll_thread(void *arg)
{
struct uffd_args *args = (struct uffd_args *)arg;
unsigned long cpu = args->cpu;
struct pollfd pollfd[2];
struct uffd_msg msg;
struct uffdio_register uffd_reg;
int ret;
char tmp_chr;
if (!args->handle_fault)
args->handle_fault = uffd_handle_page_fault;
pollfd[0].fd = uffd;
pollfd[0].events = POLLIN;
pollfd[1].fd = pipefd[cpu*2];
pollfd[1].events = POLLIN;
for (;;) {
ret = poll(pollfd, 2, -1);
if (ret <= 0) {
if (errno == EINTR || errno == EAGAIN)
continue;
err("poll error: %d", ret);
}
if (pollfd[1].revents) {
if (!(pollfd[1].revents & POLLIN))
err("pollfd[1].revents %d", pollfd[1].revents);
if (read(pollfd[1].fd, &tmp_chr, 1) != 1)
err("read pipefd error");
break;
}
if (!(pollfd[0].revents & POLLIN))
err("pollfd[0].revents %d", pollfd[0].revents);
if (uffd_read_msg(uffd, &msg))
continue;
switch (msg.event) {
default:
err("unexpected msg event %u\n", msg.event);
break;
case UFFD_EVENT_PAGEFAULT:
args->handle_fault(&msg, args);
break;
case UFFD_EVENT_FORK:
close(uffd);
uffd = msg.arg.fork.ufd;
pollfd[0].fd = uffd;
break;
case UFFD_EVENT_REMOVE:
uffd_reg.range.start = msg.arg.remove.start;
uffd_reg.range.len = msg.arg.remove.end -
msg.arg.remove.start;
if (ioctl(uffd, UFFDIO_UNREGISTER, &uffd_reg.range))
err("remove failure");
break;
case UFFD_EVENT_REMAP:
area_remap = area_dst; /* save for later unmap */
area_dst = (char *)(unsigned long)msg.arg.remap.to;
break;
}
}
return NULL;
}
static void retry_copy_page(int ufd, struct uffdio_copy *uffdio_copy,
unsigned long offset)
{
uffd_test_ops->alias_mapping(&uffdio_copy->dst,
uffdio_copy->len,
offset);
if (ioctl(ufd, UFFDIO_COPY, uffdio_copy)) {
/* real retval in ufdio_copy.copy */
if (uffdio_copy->copy != -EEXIST)
err("UFFDIO_COPY retry error: %"PRId64,
(int64_t)uffdio_copy->copy);
} else {
err("UFFDIO_COPY retry unexpected: %"PRId64,
(int64_t)uffdio_copy->copy);
}
}
static void wake_range(int ufd, unsigned long addr, unsigned long len)
{
struct uffdio_range uffdio_wake;
uffdio_wake.start = addr;
uffdio_wake.len = len;
if (ioctl(ufd, UFFDIO_WAKE, &uffdio_wake))
fprintf(stderr, "error waking %lu\n",
addr), exit(1);
}
int __copy_page(int ufd, unsigned long offset, bool retry, bool wp)
{
struct uffdio_copy uffdio_copy;
if (offset >= nr_pages * page_size)
err("unexpected offset %lu\n", offset);
uffdio_copy.dst = (unsigned long) area_dst + offset;
uffdio_copy.src = (unsigned long) area_src + offset;
uffdio_copy.len = page_size;
if (wp)
uffdio_copy.mode = UFFDIO_COPY_MODE_WP;
else
uffdio_copy.mode = 0;
uffdio_copy.copy = 0;
if (ioctl(ufd, UFFDIO_COPY, &uffdio_copy)) {
/* real retval in ufdio_copy.copy */
if (uffdio_copy.copy != -EEXIST)
err("UFFDIO_COPY error: %"PRId64,
(int64_t)uffdio_copy.copy);
wake_range(ufd, uffdio_copy.dst, page_size);
} else if (uffdio_copy.copy != page_size) {
err("UFFDIO_COPY error: %"PRId64, (int64_t)uffdio_copy.copy);
} else {
if (test_uffdio_copy_eexist && retry) {
test_uffdio_copy_eexist = false;
retry_copy_page(ufd, &uffdio_copy, offset);
}
return 1;
}
return 0;
}
int copy_page(int ufd, unsigned long offset, bool wp)
{
return __copy_page(ufd, offset, false, wp);
}
int uffd_open_dev(unsigned int flags)
{
int fd, uffd;
fd = open("/dev/userfaultfd", O_RDWR | O_CLOEXEC);
if (fd < 0)
return fd;
uffd = ioctl(fd, USERFAULTFD_IOC_NEW, flags);
close(fd);
return uffd;
}
int uffd_open_sys(unsigned int flags)
{
#ifdef __NR_userfaultfd
return syscall(__NR_userfaultfd, flags);
#else
return -1;
#endif
}
int uffd_open(unsigned int flags)
{
int uffd = uffd_open_sys(flags);
if (uffd < 0)
uffd = uffd_open_dev(flags);
return uffd;
}
int uffd_get_features(uint64_t *features)
{
struct uffdio_api uffdio_api = { .api = UFFD_API, .features = 0 };
/*
* This should by default work in most kernels; the feature list
* will be the same no matter what we pass in here.
*/
int fd = uffd_open(UFFD_USER_MODE_ONLY);
if (fd < 0)
/* Maybe the kernel is older than user-only mode? */
fd = uffd_open(0);
if (fd < 0)
return fd;
if (ioctl(fd, UFFDIO_API, &uffdio_api)) {
close(fd);
return -errno;
}
*features = uffdio_api.features;
close(fd);
return 0;
}
| linux-master | tools/testing/selftests/mm/uffd-common.c |
// SPDX-License-Identifier: GPL-2.0
/*
* A test of splitting PMD THPs and PTE-mapped THPs from a specified virtual
* address range in a process via <debugfs>/split_huge_pages interface.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <unistd.h>
#include <inttypes.h>
#include <string.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <malloc.h>
#include <stdbool.h>
#include "vm_util.h"
uint64_t pagesize;
unsigned int pageshift;
uint64_t pmd_pagesize;
#define SPLIT_DEBUGFS "/sys/kernel/debug/split_huge_pages"
#define INPUT_MAX 80
#define PID_FMT "%d,0x%lx,0x%lx"
#define PATH_FMT "%s,0x%lx,0x%lx"
#define PFN_MASK ((1UL<<55)-1)
#define KPF_THP (1UL<<22)
int is_backed_by_thp(char *vaddr, int pagemap_file, int kpageflags_file)
{
uint64_t paddr;
uint64_t page_flags;
if (pagemap_file) {
pread(pagemap_file, &paddr, sizeof(paddr),
((long)vaddr >> pageshift) * sizeof(paddr));
if (kpageflags_file) {
pread(kpageflags_file, &page_flags, sizeof(page_flags),
(paddr & PFN_MASK) * sizeof(page_flags));
return !!(page_flags & KPF_THP);
}
}
return 0;
}
static int write_file(const char *path, const char *buf, size_t buflen)
{
int fd;
ssize_t numwritten;
fd = open(path, O_WRONLY);
if (fd == -1)
return 0;
numwritten = write(fd, buf, buflen - 1);
close(fd);
if (numwritten < 1)
return 0;
return (unsigned int) numwritten;
}
static void write_debugfs(const char *fmt, ...)
{
char input[INPUT_MAX];
int ret;
va_list argp;
va_start(argp, fmt);
ret = vsnprintf(input, INPUT_MAX, fmt, argp);
va_end(argp);
if (ret >= INPUT_MAX) {
printf("%s: Debugfs input is too long\n", __func__);
exit(EXIT_FAILURE);
}
if (!write_file(SPLIT_DEBUGFS, input, ret + 1)) {
perror(SPLIT_DEBUGFS);
exit(EXIT_FAILURE);
}
}
void split_pmd_thp(void)
{
char *one_page;
size_t len = 4 * pmd_pagesize;
size_t i;
one_page = memalign(pmd_pagesize, len);
if (!one_page) {
printf("Fail to allocate memory\n");
exit(EXIT_FAILURE);
}
madvise(one_page, len, MADV_HUGEPAGE);
for (i = 0; i < len; i++)
one_page[i] = (char)i;
if (!check_huge_anon(one_page, 4, pmd_pagesize)) {
printf("No THP is allocated\n");
exit(EXIT_FAILURE);
}
/* split all THPs */
write_debugfs(PID_FMT, getpid(), (uint64_t)one_page,
(uint64_t)one_page + len);
for (i = 0; i < len; i++)
if (one_page[i] != (char)i) {
printf("%ld byte corrupted\n", i);
exit(EXIT_FAILURE);
}
if (!check_huge_anon(one_page, 0, pmd_pagesize)) {
printf("Still AnonHugePages not split\n");
exit(EXIT_FAILURE);
}
printf("Split huge pages successful\n");
free(one_page);
}
void split_pte_mapped_thp(void)
{
char *one_page, *pte_mapped, *pte_mapped2;
size_t len = 4 * pmd_pagesize;
uint64_t thp_size;
size_t i;
const char *pagemap_template = "/proc/%d/pagemap";
const char *kpageflags_proc = "/proc/kpageflags";
char pagemap_proc[255];
int pagemap_fd;
int kpageflags_fd;
if (snprintf(pagemap_proc, 255, pagemap_template, getpid()) < 0) {
perror("get pagemap proc error");
exit(EXIT_FAILURE);
}
pagemap_fd = open(pagemap_proc, O_RDONLY);
if (pagemap_fd == -1) {
perror("read pagemap:");
exit(EXIT_FAILURE);
}
kpageflags_fd = open(kpageflags_proc, O_RDONLY);
if (kpageflags_fd == -1) {
perror("read kpageflags:");
exit(EXIT_FAILURE);
}
one_page = mmap((void *)(1UL << 30), len, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
madvise(one_page, len, MADV_HUGEPAGE);
for (i = 0; i < len; i++)
one_page[i] = (char)i;
if (!check_huge_anon(one_page, 4, pmd_pagesize)) {
printf("No THP is allocated\n");
exit(EXIT_FAILURE);
}
/* remap the first pagesize of first THP */
pte_mapped = mremap(one_page, pagesize, pagesize, MREMAP_MAYMOVE);
/* remap the Nth pagesize of Nth THP */
for (i = 1; i < 4; i++) {
pte_mapped2 = mremap(one_page + pmd_pagesize * i + pagesize * i,
pagesize, pagesize,
MREMAP_MAYMOVE|MREMAP_FIXED,
pte_mapped + pagesize * i);
if (pte_mapped2 == (char *)-1) {
perror("mremap failed");
exit(EXIT_FAILURE);
}
}
/* smap does not show THPs after mremap, use kpageflags instead */
thp_size = 0;
for (i = 0; i < pagesize * 4; i++)
if (i % pagesize == 0 &&
is_backed_by_thp(&pte_mapped[i], pagemap_fd, kpageflags_fd))
thp_size++;
if (thp_size != 4) {
printf("Some THPs are missing during mremap\n");
exit(EXIT_FAILURE);
}
/* split all remapped THPs */
write_debugfs(PID_FMT, getpid(), (uint64_t)pte_mapped,
(uint64_t)pte_mapped + pagesize * 4);
/* smap does not show THPs after mremap, use kpageflags instead */
thp_size = 0;
for (i = 0; i < pagesize * 4; i++) {
if (pte_mapped[i] != (char)i) {
printf("%ld byte corrupted\n", i);
exit(EXIT_FAILURE);
}
if (i % pagesize == 0 &&
is_backed_by_thp(&pte_mapped[i], pagemap_fd, kpageflags_fd))
thp_size++;
}
if (thp_size) {
printf("Still %ld THPs not split\n", thp_size);
exit(EXIT_FAILURE);
}
printf("Split PTE-mapped huge pages successful\n");
munmap(one_page, len);
close(pagemap_fd);
close(kpageflags_fd);
}
void split_file_backed_thp(void)
{
int status;
int fd;
ssize_t num_written;
char tmpfs_template[] = "/tmp/thp_split_XXXXXX";
const char *tmpfs_loc = mkdtemp(tmpfs_template);
char testfile[INPUT_MAX];
uint64_t pgoff_start = 0, pgoff_end = 1024;
printf("Please enable pr_debug in split_huge_pages_in_file() if you need more info.\n");
status = mount("tmpfs", tmpfs_loc, "tmpfs", 0, "huge=always,size=4m");
if (status) {
printf("Unable to create a tmpfs for testing\n");
exit(EXIT_FAILURE);
}
status = snprintf(testfile, INPUT_MAX, "%s/thp_file", tmpfs_loc);
if (status >= INPUT_MAX) {
printf("Fail to create file-backed THP split testing file\n");
goto cleanup;
}
fd = open(testfile, O_CREAT|O_WRONLY);
if (fd == -1) {
perror("Cannot open testing file\n");
goto cleanup;
}
/* write something to the file, so a file-backed THP can be allocated */
num_written = write(fd, tmpfs_loc, strlen(tmpfs_loc) + 1);
close(fd);
if (num_written < 1) {
printf("Fail to write data to testing file\n");
goto cleanup;
}
/* split the file-backed THP */
write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end);
status = unlink(testfile);
if (status)
perror("Cannot remove testing file\n");
cleanup:
status = umount(tmpfs_loc);
if (status) {
printf("Unable to umount %s\n", tmpfs_loc);
exit(EXIT_FAILURE);
}
status = rmdir(tmpfs_loc);
if (status) {
perror("cannot remove tmp dir");
exit(EXIT_FAILURE);
}
printf("file-backed THP split test done, please check dmesg for more information\n");
}
int main(int argc, char **argv)
{
if (geteuid() != 0) {
printf("Please run the benchmark as root\n");
exit(EXIT_FAILURE);
}
pagesize = getpagesize();
pageshift = ffs(pagesize) - 1;
pmd_pagesize = read_pmd_pagesize();
if (!pmd_pagesize) {
printf("Reading PMD pagesize failed\n");
exit(EXIT_FAILURE);
}
split_pmd_thp();
split_pte_mapped_thp();
split_file_backed_thp();
return 0;
}
| linux-master | tools/testing/selftests/mm/split_huge_page_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Stress userfaultfd syscall.
*
* Copyright (C) 2015 Red Hat, Inc.
*
* This test allocates two virtual areas and bounces the physical
* memory across the two virtual areas (from area_src to area_dst)
* using userfaultfd.
*
* There are three threads running per CPU:
*
* 1) one per-CPU thread takes a per-page pthread_mutex in a random
* page of the area_dst (while the physical page may still be in
* area_src), and increments a per-page counter in the same page,
* and checks its value against a verification region.
*
* 2) another per-CPU thread handles the userfaults generated by
* thread 1 above. userfaultfd blocking reads or poll() modes are
* exercised interleaved.
*
* 3) one last per-CPU thread transfers the memory in the background
* at maximum bandwidth (if not already transferred by thread
* 2). Each cpu thread takes cares of transferring a portion of the
* area.
*
* When all threads of type 3 completed the transfer, one bounce is
* complete. area_src and area_dst are then swapped. All threads are
* respawned and so the bounce is immediately restarted in the
* opposite direction.
*
* per-CPU threads 1 by triggering userfaults inside
* pthread_mutex_lock will also verify the atomicity of the memory
* transfer (UFFDIO_COPY).
*/
#include "uffd-common.h"
#ifdef __NR_userfaultfd
#define BOUNCE_RANDOM (1<<0)
#define BOUNCE_RACINGFAULTS (1<<1)
#define BOUNCE_VERIFY (1<<2)
#define BOUNCE_POLL (1<<3)
static int bounces;
/* exercise the test_uffdio_*_eexist every ALARM_INTERVAL_SECS */
#define ALARM_INTERVAL_SECS 10
static char *zeropage;
pthread_attr_t attr;
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
const char *examples =
"# Run anonymous memory test on 100MiB region with 99999 bounces:\n"
"./uffd-stress anon 100 99999\n\n"
"# Run share memory test on 1GiB region with 99 bounces:\n"
"./uffd-stress shmem 1000 99\n\n"
"# Run hugetlb memory test on 256MiB region with 50 bounces:\n"
"./uffd-stress hugetlb 256 50\n\n"
"# Run the same hugetlb test but using private file:\n"
"./uffd-stress hugetlb-private 256 50\n\n"
"# 10MiB-~6GiB 999 bounces anonymous test, "
"continue forever unless an error triggers\n"
"while ./uffd-stress anon $[RANDOM % 6000 + 10] 999; do true; done\n\n";
static void usage(void)
{
fprintf(stderr, "\nUsage: ./uffd-stress <test type> <MiB> <bounces>\n\n");
fprintf(stderr, "Supported <test type>: anon, hugetlb, "
"hugetlb-private, shmem, shmem-private\n\n");
fprintf(stderr, "Examples:\n\n");
fprintf(stderr, "%s", examples);
exit(1);
}
static void uffd_stats_reset(struct uffd_args *args, unsigned long n_cpus)
{
int i;
for (i = 0; i < n_cpus; i++) {
args[i].cpu = i;
args[i].apply_wp = test_uffdio_wp;
args[i].missing_faults = 0;
args[i].wp_faults = 0;
args[i].minor_faults = 0;
}
}
static void *locking_thread(void *arg)
{
unsigned long cpu = (unsigned long) arg;
unsigned long page_nr;
unsigned long long count;
if (!(bounces & BOUNCE_RANDOM)) {
page_nr = -bounces;
if (!(bounces & BOUNCE_RACINGFAULTS))
page_nr += cpu * nr_pages_per_cpu;
}
while (!finished) {
if (bounces & BOUNCE_RANDOM) {
if (getrandom(&page_nr, sizeof(page_nr), 0) != sizeof(page_nr))
err("getrandom failed");
} else
page_nr += 1;
page_nr %= nr_pages;
pthread_mutex_lock(area_mutex(area_dst, page_nr));
count = *area_count(area_dst, page_nr);
if (count != count_verify[page_nr])
err("page_nr %lu memory corruption %llu %llu",
page_nr, count, count_verify[page_nr]);
count++;
*area_count(area_dst, page_nr) = count_verify[page_nr] = count;
pthread_mutex_unlock(area_mutex(area_dst, page_nr));
}
return NULL;
}
static int copy_page_retry(int ufd, unsigned long offset)
{
return __copy_page(ufd, offset, true, test_uffdio_wp);
}
pthread_mutex_t uffd_read_mutex = PTHREAD_MUTEX_INITIALIZER;
static void *uffd_read_thread(void *arg)
{
struct uffd_args *args = (struct uffd_args *)arg;
struct uffd_msg msg;
pthread_mutex_unlock(&uffd_read_mutex);
/* from here cancellation is ok */
for (;;) {
if (uffd_read_msg(uffd, &msg))
continue;
uffd_handle_page_fault(&msg, args);
}
return NULL;
}
static void *background_thread(void *arg)
{
unsigned long cpu = (unsigned long) arg;
unsigned long page_nr, start_nr, mid_nr, end_nr;
start_nr = cpu * nr_pages_per_cpu;
end_nr = (cpu+1) * nr_pages_per_cpu;
mid_nr = (start_nr + end_nr) / 2;
/* Copy the first half of the pages */
for (page_nr = start_nr; page_nr < mid_nr; page_nr++)
copy_page_retry(uffd, page_nr * page_size);
/*
* If we need to test uffd-wp, set it up now. Then we'll have
* at least the first half of the pages mapped already which
* can be write-protected for testing
*/
if (test_uffdio_wp)
wp_range(uffd, (unsigned long)area_dst + start_nr * page_size,
nr_pages_per_cpu * page_size, true);
/*
* Continue the 2nd half of the page copying, handling write
* protection faults if any
*/
for (page_nr = mid_nr; page_nr < end_nr; page_nr++)
copy_page_retry(uffd, page_nr * page_size);
return NULL;
}
static int stress(struct uffd_args *args)
{
unsigned long cpu;
pthread_t locking_threads[nr_cpus];
pthread_t uffd_threads[nr_cpus];
pthread_t background_threads[nr_cpus];
finished = 0;
for (cpu = 0; cpu < nr_cpus; cpu++) {
if (pthread_create(&locking_threads[cpu], &attr,
locking_thread, (void *)cpu))
return 1;
if (bounces & BOUNCE_POLL) {
if (pthread_create(&uffd_threads[cpu], &attr, uffd_poll_thread, &args[cpu]))
err("uffd_poll_thread create");
} else {
if (pthread_create(&uffd_threads[cpu], &attr,
uffd_read_thread,
(void *)&args[cpu]))
return 1;
pthread_mutex_lock(&uffd_read_mutex);
}
if (pthread_create(&background_threads[cpu], &attr,
background_thread, (void *)cpu))
return 1;
}
for (cpu = 0; cpu < nr_cpus; cpu++)
if (pthread_join(background_threads[cpu], NULL))
return 1;
/*
* Be strict and immediately zap area_src, the whole area has
* been transferred already by the background treads. The
* area_src could then be faulted in a racy way by still
* running uffdio_threads reading zeropages after we zapped
* area_src (but they're guaranteed to get -EEXIST from
* UFFDIO_COPY without writing zero pages into area_dst
* because the background threads already completed).
*/
uffd_test_ops->release_pages(area_src);
finished = 1;
for (cpu = 0; cpu < nr_cpus; cpu++)
if (pthread_join(locking_threads[cpu], NULL))
return 1;
for (cpu = 0; cpu < nr_cpus; cpu++) {
char c;
if (bounces & BOUNCE_POLL) {
if (write(pipefd[cpu*2+1], &c, 1) != 1)
err("pipefd write error");
if (pthread_join(uffd_threads[cpu],
(void *)&args[cpu]))
return 1;
} else {
if (pthread_cancel(uffd_threads[cpu]))
return 1;
if (pthread_join(uffd_threads[cpu], NULL))
return 1;
}
}
return 0;
}
static int userfaultfd_stress(void)
{
void *area;
unsigned long nr;
struct uffd_args args[nr_cpus];
uint64_t mem_size = nr_pages * page_size;
memset(args, 0, sizeof(struct uffd_args) * nr_cpus);
if (uffd_test_ctx_init(UFFD_FEATURE_WP_UNPOPULATED, NULL))
err("context init failed");
if (posix_memalign(&area, page_size, page_size))
err("out of memory");
zeropage = area;
bzero(zeropage, page_size);
pthread_mutex_lock(&uffd_read_mutex);
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, 16*1024*1024);
while (bounces--) {
printf("bounces: %d, mode:", bounces);
if (bounces & BOUNCE_RANDOM)
printf(" rnd");
if (bounces & BOUNCE_RACINGFAULTS)
printf(" racing");
if (bounces & BOUNCE_VERIFY)
printf(" ver");
if (bounces & BOUNCE_POLL)
printf(" poll");
else
printf(" read");
printf(", ");
fflush(stdout);
if (bounces & BOUNCE_POLL)
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
else
fcntl(uffd, F_SETFL, uffd_flags & ~O_NONBLOCK);
/* register */
if (uffd_register(uffd, area_dst, mem_size,
true, test_uffdio_wp, false))
err("register failure");
if (area_dst_alias) {
if (uffd_register(uffd, area_dst_alias, mem_size,
true, test_uffdio_wp, false))
err("register failure alias");
}
/*
* The madvise done previously isn't enough: some
* uffd_thread could have read userfaults (one of
* those already resolved by the background thread)
* and it may be in the process of calling
* UFFDIO_COPY. UFFDIO_COPY will read the zapped
* area_src and it would map a zero page in it (of
* course such a UFFDIO_COPY is perfectly safe as it'd
* return -EEXIST). The problem comes at the next
* bounce though: that racing UFFDIO_COPY would
* generate zeropages in the area_src, so invalidating
* the previous MADV_DONTNEED. Without this additional
* MADV_DONTNEED those zeropages leftovers in the
* area_src would lead to -EEXIST failure during the
* next bounce, effectively leaving a zeropage in the
* area_dst.
*
* Try to comment this out madvise to see the memory
* corruption being caught pretty quick.
*
* khugepaged is also inhibited to collapse THP after
* MADV_DONTNEED only after the UFFDIO_REGISTER, so it's
* required to MADV_DONTNEED here.
*/
uffd_test_ops->release_pages(area_dst);
uffd_stats_reset(args, nr_cpus);
/* bounce pass */
if (stress(args))
return 1;
/* Clear all the write protections if there is any */
if (test_uffdio_wp)
wp_range(uffd, (unsigned long)area_dst,
nr_pages * page_size, false);
/* unregister */
if (uffd_unregister(uffd, area_dst, mem_size))
err("unregister failure");
if (area_dst_alias) {
if (uffd_unregister(uffd, area_dst_alias, mem_size))
err("unregister failure alias");
}
/* verification */
if (bounces & BOUNCE_VERIFY)
for (nr = 0; nr < nr_pages; nr++)
if (*area_count(area_dst, nr) != count_verify[nr])
err("error area_count %llu %llu %lu\n",
*area_count(area_src, nr),
count_verify[nr], nr);
/* prepare next bounce */
swap(area_src, area_dst);
swap(area_src_alias, area_dst_alias);
uffd_stats_report(args, nr_cpus);
}
return 0;
}
static void set_test_type(const char *type)
{
if (!strcmp(type, "anon")) {
test_type = TEST_ANON;
uffd_test_ops = &anon_uffd_test_ops;
} else if (!strcmp(type, "hugetlb")) {
test_type = TEST_HUGETLB;
uffd_test_ops = &hugetlb_uffd_test_ops;
map_shared = true;
} else if (!strcmp(type, "hugetlb-private")) {
test_type = TEST_HUGETLB;
uffd_test_ops = &hugetlb_uffd_test_ops;
} else if (!strcmp(type, "shmem")) {
map_shared = true;
test_type = TEST_SHMEM;
uffd_test_ops = &shmem_uffd_test_ops;
} else if (!strcmp(type, "shmem-private")) {
test_type = TEST_SHMEM;
uffd_test_ops = &shmem_uffd_test_ops;
}
}
static void parse_test_type_arg(const char *raw_type)
{
uint64_t features = UFFD_API_FEATURES;
set_test_type(raw_type);
if (!test_type)
err("failed to parse test type argument: '%s'", raw_type);
if (test_type == TEST_HUGETLB)
page_size = default_huge_page_size();
else
page_size = sysconf(_SC_PAGE_SIZE);
if (!page_size)
err("Unable to determine page size");
if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) * 2
> page_size)
err("Impossible to run this test");
/*
* Whether we can test certain features depends not just on test type,
* but also on whether or not this particular kernel supports the
* feature.
*/
if (userfaultfd_open(&features))
err("Userfaultfd open failed");
test_uffdio_wp = test_uffdio_wp &&
(features & UFFD_FEATURE_PAGEFAULT_FLAG_WP);
close(uffd);
uffd = -1;
}
static void sigalrm(int sig)
{
if (sig != SIGALRM)
abort();
test_uffdio_copy_eexist = true;
alarm(ALARM_INTERVAL_SECS);
}
int main(int argc, char **argv)
{
size_t bytes;
if (argc < 4)
usage();
if (signal(SIGALRM, sigalrm) == SIG_ERR)
err("failed to arm SIGALRM");
alarm(ALARM_INTERVAL_SECS);
parse_test_type_arg(argv[1]);
bytes = atol(argv[2]) * 1024 * 1024;
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
nr_pages_per_cpu = bytes / page_size / nr_cpus;
if (!nr_pages_per_cpu) {
_err("invalid MiB");
usage();
}
bounces = atoi(argv[3]);
if (bounces <= 0) {
_err("invalid bounces");
usage();
}
nr_pages = nr_pages_per_cpu * nr_cpus;
printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n",
nr_pages, nr_pages_per_cpu);
return userfaultfd_stress();
}
#else /* __NR_userfaultfd */
#warning "missing __NR_userfaultfd definition"
int main(void)
{
printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
return KSFT_SKIP;
}
#endif /* __NR_userfaultfd */
| linux-master | tools/testing/selftests/mm/uffd-stress.c |
// SPDX-License-Identifier: GPL-2.0
/*
* hugepage-shm:
*
* Example of using huge page memory in a user application using Sys V shared
* memory system calls. In this example the app is requesting 256MB of
* memory that is backed by huge pages. The application uses the flag
* SHM_HUGETLB in the shmget system call to inform the kernel that it is
* requesting huge pages.
*
* For the ia64 architecture, the Linux kernel reserves Region number 4 for
* huge pages. That means that if one requires a fixed address, a huge page
* aligned address starting with 0x800000... will be required. If a fixed
* address is not required, the kernel will select an address in the proper
* range.
* Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
*
* Note: The default shared memory limit is quite low on many kernels,
* you may need to increase it via:
*
* echo 268435456 > /proc/sys/kernel/shmmax
*
* This will increase the maximum size per shared memory segment to 256MB.
* The other limit that you will hit eventually is shmall which is the
* total amount of shared memory in pages. To set it to 16GB on a system
* with a 4kB pagesize do:
*
* echo 4194304 > /proc/sys/kernel/shmall
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/mman.h>
#define LENGTH (256UL*1024*1024)
#define dprintf(x) printf(x)
/* Only ia64 requires this */
#ifdef __ia64__
#define ADDR (void *)(0x8000000000000000UL)
#define SHMAT_FLAGS (SHM_RND)
#else
#define ADDR (void *)(0x0UL)
#define SHMAT_FLAGS (0)
#endif
int main(void)
{
int shmid;
unsigned long i;
char *shmaddr;
shmid = shmget(2, LENGTH, SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W);
if (shmid < 0) {
perror("shmget");
exit(1);
}
printf("shmid: 0x%x\n", shmid);
shmaddr = shmat(shmid, ADDR, SHMAT_FLAGS);
if (shmaddr == (char *)-1) {
perror("Shared memory attach failure");
shmctl(shmid, IPC_RMID, NULL);
exit(2);
}
printf("shmaddr: %p\n", shmaddr);
dprintf("Starting the writes:\n");
for (i = 0; i < LENGTH; i++) {
shmaddr[i] = (char)(i);
if (!(i % (1024 * 1024)))
dprintf(".");
}
dprintf("\n");
dprintf("Starting the Check...");
for (i = 0; i < LENGTH; i++)
if (shmaddr[i] != (char)i) {
printf("\nIndex %lu mismatched\n", i);
exit(3);
}
dprintf("Done.\n");
if (shmdt((const void *)shmaddr) != 0) {
perror("Detach failure");
shmctl(shmid, IPC_RMID, NULL);
exit(4);
}
shmctl(shmid, IPC_RMID, NULL);
return 0;
}
| linux-master | tools/testing/selftests/mm/hugepage-shm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test that MAP_FIXED_NOREPLACE works.
*
* Copyright 2018, Jann Horn <[email protected]>
* Copyright 2018, Michael Ellerman, IBM Corporation.
*/
#include <sys/mman.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
static void dump_maps(void)
{
char cmd[32];
snprintf(cmd, sizeof(cmd), "cat /proc/%d/maps", getpid());
system(cmd);
}
static unsigned long find_base_addr(unsigned long size)
{
void *addr;
unsigned long flags;
flags = MAP_PRIVATE | MAP_ANONYMOUS;
addr = mmap(NULL, size, PROT_NONE, flags, -1, 0);
if (addr == MAP_FAILED) {
printf("Error: couldn't map the space we need for the test\n");
return 0;
}
if (munmap(addr, size) != 0) {
printf("Error: couldn't map the space we need for the test\n");
return 0;
}
return (unsigned long)addr;
}
int main(void)
{
unsigned long base_addr;
unsigned long flags, addr, size, page_size;
char *p;
page_size = sysconf(_SC_PAGE_SIZE);
//let's find a base addr that is free before we start the tests
size = 5 * page_size;
base_addr = find_base_addr(size);
if (!base_addr) {
printf("Error: couldn't map the space we need for the test\n");
return 1;
}
flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE;
// Check we can map all the areas we need below
errno = 0;
addr = base_addr;
size = 5 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
if (p == MAP_FAILED) {
dump_maps();
printf("Error: couldn't map the space we need for the test\n");
return 1;
}
errno = 0;
if (munmap((void *)addr, 5 * page_size) != 0) {
dump_maps();
printf("Error: munmap failed!?\n");
return 1;
}
printf("unmap() successful\n");
errno = 0;
addr = base_addr + page_size;
size = 3 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
if (p == MAP_FAILED) {
dump_maps();
printf("Error: first mmap() failed unexpectedly\n");
return 1;
}
/*
* Exact same mapping again:
* base | free | new
* +1 | mapped | new
* +2 | mapped | new
* +3 | mapped | new
* +4 | free | new
*/
errno = 0;
addr = base_addr;
size = 5 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
if (p != MAP_FAILED) {
dump_maps();
printf("Error:1: mmap() succeeded when it shouldn't have\n");
return 1;
}
/*
* Second mapping contained within first:
*
* base | free |
* +1 | mapped |
* +2 | mapped | new
* +3 | mapped |
* +4 | free |
*/
errno = 0;
addr = base_addr + (2 * page_size);
size = page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
if (p != MAP_FAILED) {
dump_maps();
printf("Error:2: mmap() succeeded when it shouldn't have\n");
return 1;
}
/*
* Overlap end of existing mapping:
* base | free |
* +1 | mapped |
* +2 | mapped |
* +3 | mapped | new
* +4 | free | new
*/
errno = 0;
addr = base_addr + (3 * page_size);
size = 2 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
if (p != MAP_FAILED) {
dump_maps();
printf("Error:3: mmap() succeeded when it shouldn't have\n");
return 1;
}
/*
* Overlap start of existing mapping:
* base | free | new
* +1 | mapped | new
* +2 | mapped |
* +3 | mapped |
* +4 | free |
*/
errno = 0;
addr = base_addr;
size = 2 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
if (p != MAP_FAILED) {
dump_maps();
printf("Error:4: mmap() succeeded when it shouldn't have\n");
return 1;
}
/*
* Adjacent to start of existing mapping:
* base | free | new
* +1 | mapped |
* +2 | mapped |
* +3 | mapped |
* +4 | free |
*/
errno = 0;
addr = base_addr;
size = page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
if (p == MAP_FAILED) {
dump_maps();
printf("Error:5: mmap() failed when it shouldn't have\n");
return 1;
}
/*
* Adjacent to end of existing mapping:
* base | free |
* +1 | mapped |
* +2 | mapped |
* +3 | mapped |
* +4 | free | new
*/
errno = 0;
addr = base_addr + (4 * page_size);
size = page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
if (p == MAP_FAILED) {
dump_maps();
printf("Error:6: mmap() failed when it shouldn't have\n");
return 1;
}
addr = base_addr;
size = 5 * page_size;
if (munmap((void *)addr, size) != 0) {
dump_maps();
printf("Error: munmap failed!?\n");
return 1;
}
printf("unmap() successful\n");
printf("OK\n");
return 0;
}
| linux-master | tools/testing/selftests/mm/map_fixed_noreplace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2022 Google LLC
*/
#define _GNU_SOURCE
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#include <unistd.h>
#include <asm-generic/unistd.h>
#include "vm_util.h"
#include "../kselftest.h"
#define MB(x) (x << 20)
#define MAX_SIZE_MB 1024
static int alloc_noexit(unsigned long nr_pages, int pipefd)
{
int ppid = getppid();
int timeout = 10; /* 10sec timeout to get killed */
unsigned long i;
char *buf;
buf = (char *)mmap(NULL, nr_pages * psize(), PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, 0, 0);
if (buf == MAP_FAILED) {
perror("mmap failed, halting the test");
return KSFT_FAIL;
}
for (i = 0; i < nr_pages; i++)
*((unsigned long *)(buf + (i * psize()))) = i;
/* Signal the parent that the child is ready */
if (write(pipefd, "", 1) < 0) {
perror("write");
return KSFT_FAIL;
}
/* Wait to be killed (when reparenting happens) */
while (getppid() == ppid && timeout > 0) {
sleep(1);
timeout--;
}
munmap(buf, nr_pages * psize());
return (timeout > 0) ? KSFT_PASS : KSFT_FAIL;
}
/* The process_mrelease calls in this test are expected to fail */
static void run_negative_tests(int pidfd)
{
int res;
/* Test invalid flags. Expect to fail with EINVAL error code. */
if (!syscall(__NR_process_mrelease, pidfd, (unsigned int)-1) ||
errno != EINVAL) {
res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
perror("process_mrelease with wrong flags");
exit(res);
}
/*
* Test reaping while process is alive with no pending SIGKILL.
* Expect to fail with EINVAL error code.
*/
if (!syscall(__NR_process_mrelease, pidfd, 0) || errno != EINVAL) {
res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
perror("process_mrelease on a live process");
exit(res);
}
}
static int child_main(int pipefd[], size_t size)
{
int res;
/* Allocate and fault-in memory and wait to be killed */
close(pipefd[0]);
res = alloc_noexit(MB(size) / psize(), pipefd[1]);
close(pipefd[1]);
return res;
}
int main(void)
{
int pipefd[2], pidfd;
bool success, retry;
size_t size;
pid_t pid;
char byte;
int res;
/* Test a wrong pidfd */
if (!syscall(__NR_process_mrelease, -1, 0) || errno != EBADF) {
res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
perror("process_mrelease with wrong pidfd");
exit(res);
}
/* Start the test with 1MB child memory allocation */
size = 1;
retry:
/*
* Pipe for the child to signal when it's done allocating
* memory
*/
if (pipe(pipefd)) {
perror("pipe");
exit(KSFT_FAIL);
}
pid = fork();
if (pid < 0) {
perror("fork");
close(pipefd[0]);
close(pipefd[1]);
exit(KSFT_FAIL);
}
if (pid == 0) {
/* Child main routine */
res = child_main(pipefd, size);
exit(res);
}
/*
* Parent main routine:
* Wait for the child to finish allocations, then kill and reap
*/
close(pipefd[1]);
/* Block until the child is ready */
res = read(pipefd[0], &byte, 1);
close(pipefd[0]);
if (res < 0) {
perror("read");
if (!kill(pid, SIGKILL))
waitpid(pid, NULL, 0);
exit(KSFT_FAIL);
}
pidfd = syscall(__NR_pidfd_open, pid, 0);
if (pidfd < 0) {
perror("pidfd_open");
if (!kill(pid, SIGKILL))
waitpid(pid, NULL, 0);
exit(KSFT_FAIL);
}
/* Run negative tests which require a live child */
run_negative_tests(pidfd);
if (kill(pid, SIGKILL)) {
res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
perror("kill");
exit(res);
}
success = (syscall(__NR_process_mrelease, pidfd, 0) == 0);
if (!success) {
/*
* If we failed to reap because the child exited too soon,
* before we could call process_mrelease. Double child's memory
* which causes it to spend more time on cleanup and increases
* our chances of reaping its memory before it exits.
* Retry until we succeed or reach MAX_SIZE_MB.
*/
if (errno == ESRCH) {
retry = (size <= MAX_SIZE_MB);
} else {
res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
perror("process_mrelease");
waitpid(pid, NULL, 0);
exit(res);
}
}
/* Cleanup to prevent zombies */
if (waitpid(pid, NULL, 0) < 0) {
perror("waitpid");
exit(KSFT_FAIL);
}
close(pidfd);
if (!success) {
if (retry) {
size *= 2;
goto retry;
}
printf("All process_mrelease attempts failed!\n");
exit(KSFT_FAIL);
}
printf("Success reaping a child with %zuMB of memory allocations\n",
size);
return KSFT_PASS;
}
| linux-master | tools/testing/selftests/mm/mrelease_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* A test case of using hugepage memory in a user application using the
* mmap system call with MAP_HUGETLB flag. Before running this program
* make sure the administrator has allocated enough default sized huge
* pages to cover the 2 MB allocation.
*/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
#define MAP_LENGTH (2UL * 1024 * 1024)
#define PAGE_SIZE 4096
#define PAGE_COMPOUND_HEAD (1UL << 15)
#define PAGE_COMPOUND_TAIL (1UL << 16)
#define PAGE_HUGE (1UL << 17)
#define HEAD_PAGE_FLAGS (PAGE_COMPOUND_HEAD | PAGE_HUGE)
#define TAIL_PAGE_FLAGS (PAGE_COMPOUND_TAIL | PAGE_HUGE)
#define PM_PFRAME_BITS 55
#define PM_PFRAME_MASK ~((1UL << PM_PFRAME_BITS) - 1)
/*
* For ia64 architecture, Linux kernel reserves Region number 4 for hugepages.
* That means the addresses starting with 0x800000... will need to be
* specified. Specifying a fixed address is not required on ppc64, i386
* or x86_64.
*/
#ifdef __ia64__
#define MAP_ADDR (void *)(0x8000000000000000UL)
#define MAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_FIXED)
#else
#define MAP_ADDR NULL
#define MAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
#endif
static void write_bytes(char *addr, size_t length)
{
unsigned long i;
for (i = 0; i < length; i++)
*(addr + i) = (char)i;
}
static unsigned long virt_to_pfn(void *addr)
{
int fd;
unsigned long pagemap;
fd = open("/proc/self/pagemap", O_RDONLY);
if (fd < 0)
return -1UL;
lseek(fd, (unsigned long)addr / PAGE_SIZE * sizeof(pagemap), SEEK_SET);
read(fd, &pagemap, sizeof(pagemap));
close(fd);
return pagemap & ~PM_PFRAME_MASK;
}
static int check_page_flags(unsigned long pfn)
{
int fd, i;
unsigned long pageflags;
fd = open("/proc/kpageflags", O_RDONLY);
if (fd < 0)
return -1;
lseek(fd, pfn * sizeof(pageflags), SEEK_SET);
read(fd, &pageflags, sizeof(pageflags));
if ((pageflags & HEAD_PAGE_FLAGS) != HEAD_PAGE_FLAGS) {
close(fd);
printf("Head page flags (%lx) is invalid\n", pageflags);
return -1;
}
/*
* pages other than the first page must be tail and shouldn't be head;
* this also verifies kernel has correctly set the fake page_head to tail
* while hugetlb_free_vmemmap is enabled.
*/
for (i = 1; i < MAP_LENGTH / PAGE_SIZE; i++) {
read(fd, &pageflags, sizeof(pageflags));
if ((pageflags & TAIL_PAGE_FLAGS) != TAIL_PAGE_FLAGS ||
(pageflags & HEAD_PAGE_FLAGS) == HEAD_PAGE_FLAGS) {
close(fd);
printf("Tail page flags (%lx) is invalid\n", pageflags);
return -1;
}
}
close(fd);
return 0;
}
int main(int argc, char **argv)
{
void *addr;
unsigned long pfn;
addr = mmap(MAP_ADDR, MAP_LENGTH, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
}
/* Trigger allocation of HugeTLB page. */
write_bytes(addr, MAP_LENGTH);
pfn = virt_to_pfn(addr);
if (pfn == -1UL) {
munmap(addr, MAP_LENGTH);
perror("virt_to_pfn");
exit(1);
}
printf("Returned address is %p whose pfn is %lx\n", addr, pfn);
if (check_page_flags(pfn) < 0) {
munmap(addr, MAP_LENGTH);
perror("check_page_flags");
exit(1);
}
/* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
if (munmap(addr, MAP_LENGTH)) {
perror("munmap");
exit(1);
}
return 0;
}
| linux-master | tools/testing/selftests/mm/hugepage-vmemmap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
*
* Copyright 2021, Red Hat, Inc.
*
* Author(s): David Hildenbrand <[email protected]>
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <stdint.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/mman.h>
#include <sys/mman.h>
#include "../kselftest.h"
#include "vm_util.h"
/*
* For now, we're using 2 MiB of private anonymous memory for all tests.
*/
#define SIZE (2 * 1024 * 1024)
static size_t pagesize;
static void sense_support(void)
{
char *addr;
int ret;
addr = mmap(0, pagesize, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
if (!addr)
ksft_exit_fail_msg("mmap failed\n");
ret = madvise(addr, pagesize, MADV_POPULATE_READ);
if (ret)
ksft_exit_skip("MADV_POPULATE_READ is not available\n");
ret = madvise(addr, pagesize, MADV_POPULATE_WRITE);
if (ret)
ksft_exit_skip("MADV_POPULATE_WRITE is not available\n");
munmap(addr, pagesize);
}
static void test_prot_read(void)
{
char *addr;
int ret;
ksft_print_msg("[RUN] %s\n", __func__);
addr = mmap(0, SIZE, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
if (addr == MAP_FAILED)
ksft_exit_fail_msg("mmap failed\n");
ret = madvise(addr, SIZE, MADV_POPULATE_READ);
ksft_test_result(!ret, "MADV_POPULATE_READ with PROT_READ\n");
ret = madvise(addr, SIZE, MADV_POPULATE_WRITE);
ksft_test_result(ret == -1 && errno == EINVAL,
"MADV_POPULATE_WRITE with PROT_READ\n");
munmap(addr, SIZE);
}
static void test_prot_write(void)
{
char *addr;
int ret;
ksft_print_msg("[RUN] %s\n", __func__);
addr = mmap(0, SIZE, PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
if (addr == MAP_FAILED)
ksft_exit_fail_msg("mmap failed\n");
ret = madvise(addr, SIZE, MADV_POPULATE_READ);
ksft_test_result(ret == -1 && errno == EINVAL,
"MADV_POPULATE_READ with PROT_WRITE\n");
ret = madvise(addr, SIZE, MADV_POPULATE_WRITE);
ksft_test_result(!ret, "MADV_POPULATE_WRITE with PROT_WRITE\n");
munmap(addr, SIZE);
}
static void test_holes(void)
{
char *addr;
int ret;
ksft_print_msg("[RUN] %s\n", __func__);
addr = mmap(0, SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
if (addr == MAP_FAILED)
ksft_exit_fail_msg("mmap failed\n");
ret = munmap(addr + pagesize, pagesize);
if (ret)
ksft_exit_fail_msg("munmap failed\n");
/* Hole in the middle */
ret = madvise(addr, SIZE, MADV_POPULATE_READ);
ksft_test_result(ret == -1 && errno == ENOMEM,
"MADV_POPULATE_READ with holes in the middle\n");
ret = madvise(addr, SIZE, MADV_POPULATE_WRITE);
ksft_test_result(ret == -1 && errno == ENOMEM,
"MADV_POPULATE_WRITE with holes in the middle\n");
/* Hole at end */
ret = madvise(addr, 2 * pagesize, MADV_POPULATE_READ);
ksft_test_result(ret == -1 && errno == ENOMEM,
"MADV_POPULATE_READ with holes at the end\n");
ret = madvise(addr, 2 * pagesize, MADV_POPULATE_WRITE);
ksft_test_result(ret == -1 && errno == ENOMEM,
"MADV_POPULATE_WRITE with holes at the end\n");
/* Hole at beginning */
ret = madvise(addr + pagesize, pagesize, MADV_POPULATE_READ);
ksft_test_result(ret == -1 && errno == ENOMEM,
"MADV_POPULATE_READ with holes at the beginning\n");
ret = madvise(addr + pagesize, pagesize, MADV_POPULATE_WRITE);
ksft_test_result(ret == -1 && errno == ENOMEM,
"MADV_POPULATE_WRITE with holes at the beginning\n");
munmap(addr, SIZE);
}
static bool range_is_populated(char *start, ssize_t size)
{
int fd = open("/proc/self/pagemap", O_RDONLY);
bool ret = true;
if (fd < 0)
ksft_exit_fail_msg("opening pagemap failed\n");
for (; size > 0 && ret; size -= pagesize, start += pagesize)
if (!pagemap_is_populated(fd, start))
ret = false;
close(fd);
return ret;
}
static bool range_is_not_populated(char *start, ssize_t size)
{
int fd = open("/proc/self/pagemap", O_RDONLY);
bool ret = true;
if (fd < 0)
ksft_exit_fail_msg("opening pagemap failed\n");
for (; size > 0 && ret; size -= pagesize, start += pagesize)
if (pagemap_is_populated(fd, start))
ret = false;
close(fd);
return ret;
}
static void test_populate_read(void)
{
char *addr;
int ret;
ksft_print_msg("[RUN] %s\n", __func__);
addr = mmap(0, SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
if (addr == MAP_FAILED)
ksft_exit_fail_msg("mmap failed\n");
ksft_test_result(range_is_not_populated(addr, SIZE),
"range initially not populated\n");
ret = madvise(addr, SIZE, MADV_POPULATE_READ);
ksft_test_result(!ret, "MADV_POPULATE_READ\n");
ksft_test_result(range_is_populated(addr, SIZE),
"range is populated\n");
munmap(addr, SIZE);
}
static void test_populate_write(void)
{
char *addr;
int ret;
ksft_print_msg("[RUN] %s\n", __func__);
addr = mmap(0, SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
if (addr == MAP_FAILED)
ksft_exit_fail_msg("mmap failed\n");
ksft_test_result(range_is_not_populated(addr, SIZE),
"range initially not populated\n");
ret = madvise(addr, SIZE, MADV_POPULATE_WRITE);
ksft_test_result(!ret, "MADV_POPULATE_WRITE\n");
ksft_test_result(range_is_populated(addr, SIZE),
"range is populated\n");
munmap(addr, SIZE);
}
static bool range_is_softdirty(char *start, ssize_t size)
{
int fd = open("/proc/self/pagemap", O_RDONLY);
bool ret = true;
if (fd < 0)
ksft_exit_fail_msg("opening pagemap failed\n");
for (; size > 0 && ret; size -= pagesize, start += pagesize)
if (!pagemap_is_softdirty(fd, start))
ret = false;
close(fd);
return ret;
}
static bool range_is_not_softdirty(char *start, ssize_t size)
{
int fd = open("/proc/self/pagemap", O_RDONLY);
bool ret = true;
if (fd < 0)
ksft_exit_fail_msg("opening pagemap failed\n");
for (; size > 0 && ret; size -= pagesize, start += pagesize)
if (pagemap_is_softdirty(fd, start))
ret = false;
close(fd);
return ret;
}
static void test_softdirty(void)
{
char *addr;
int ret;
ksft_print_msg("[RUN] %s\n", __func__);
addr = mmap(0, SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
if (addr == MAP_FAILED)
ksft_exit_fail_msg("mmap failed\n");
/* Clear any softdirty bits. */
clear_softdirty();
ksft_test_result(range_is_not_softdirty(addr, SIZE),
"range is not softdirty\n");
/* Populating READ should set softdirty. */
ret = madvise(addr, SIZE, MADV_POPULATE_READ);
ksft_test_result(!ret, "MADV_POPULATE_READ\n");
ksft_test_result(range_is_not_softdirty(addr, SIZE),
"range is not softdirty\n");
/* Populating WRITE should set softdirty. */
ret = madvise(addr, SIZE, MADV_POPULATE_WRITE);
ksft_test_result(!ret, "MADV_POPULATE_WRITE\n");
ksft_test_result(range_is_softdirty(addr, SIZE),
"range is softdirty\n");
munmap(addr, SIZE);
}
static int system_has_softdirty(void)
{
/*
* There is no way to check if the kernel supports soft-dirty, other
* than by writing to a page and seeing if the bit was set. But the
* tests are intended to check that the bit gets set when it should, so
* doing that check would turn a potentially legitimate fail into a
* skip. Fortunately, we know for sure that arm64 does not support
* soft-dirty. So for now, let's just use the arch as a corse guide.
*/
#if defined(__aarch64__)
return 0;
#else
return 1;
#endif
}
int main(int argc, char **argv)
{
int nr_tests = 16;
int err;
pagesize = getpagesize();
if (system_has_softdirty())
nr_tests += 5;
ksft_print_header();
ksft_set_plan(nr_tests);
sense_support();
test_prot_read();
test_prot_write();
test_holes();
test_populate_read();
test_populate_write();
if (system_has_softdirty())
test_softdirty();
err = ksft_get_fail_cnt();
if (err)
ksft_exit_fail_msg("%d out of %d tests failed\n",
err, ksft_test_num());
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/mm/madv_populate.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* KSM functional tests
*
* Copyright 2022, Red Hat, Inc.
*
* Author(s): David Hildenbrand <[email protected]>
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <stdint.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/wait.h>
#include <linux/userfaultfd.h>
#include "../kselftest.h"
#include "vm_util.h"
#define KiB 1024u
#define MiB (1024 * KiB)
static int mem_fd;
static int ksm_fd;
static int ksm_full_scans_fd;
static int proc_self_ksm_stat_fd;
static int proc_self_ksm_merging_pages_fd;
static int ksm_use_zero_pages_fd;
static int pagemap_fd;
static size_t pagesize;
static bool range_maps_duplicates(char *addr, unsigned long size)
{
unsigned long offs_a, offs_b, pfn_a, pfn_b;
/*
* There is no easy way to check if there are KSM pages mapped into
* this range. We only check that the range does not map the same PFN
* twice by comparing each pair of mapped pages.
*/
for (offs_a = 0; offs_a < size; offs_a += pagesize) {
pfn_a = pagemap_get_pfn(pagemap_fd, addr + offs_a);
/* Page not present or PFN not exposed by the kernel. */
if (pfn_a == -1ul || !pfn_a)
continue;
for (offs_b = offs_a + pagesize; offs_b < size;
offs_b += pagesize) {
pfn_b = pagemap_get_pfn(pagemap_fd, addr + offs_b);
if (pfn_b == -1ul || !pfn_b)
continue;
if (pfn_a == pfn_b)
return true;
}
}
return false;
}
static long get_my_ksm_zero_pages(void)
{
char buf[200];
char *substr_ksm_zero;
size_t value_pos;
ssize_t read_size;
unsigned long my_ksm_zero_pages;
if (!proc_self_ksm_stat_fd)
return 0;
read_size = pread(proc_self_ksm_stat_fd, buf, sizeof(buf) - 1, 0);
if (read_size < 0)
return -errno;
buf[read_size] = 0;
substr_ksm_zero = strstr(buf, "ksm_zero_pages");
if (!substr_ksm_zero)
return 0;
value_pos = strcspn(substr_ksm_zero, "0123456789");
my_ksm_zero_pages = strtol(substr_ksm_zero + value_pos, NULL, 10);
return my_ksm_zero_pages;
}
static long get_my_merging_pages(void)
{
char buf[10];
ssize_t ret;
if (proc_self_ksm_merging_pages_fd < 0)
return proc_self_ksm_merging_pages_fd;
ret = pread(proc_self_ksm_merging_pages_fd, buf, sizeof(buf) - 1, 0);
if (ret <= 0)
return -errno;
buf[ret] = 0;
return strtol(buf, NULL, 10);
}
static long ksm_get_full_scans(void)
{
char buf[10];
ssize_t ret;
ret = pread(ksm_full_scans_fd, buf, sizeof(buf) - 1, 0);
if (ret <= 0)
return -errno;
buf[ret] = 0;
return strtol(buf, NULL, 10);
}
static int ksm_merge(void)
{
long start_scans, end_scans;
/* Wait for two full scans such that any possible merging happened. */
start_scans = ksm_get_full_scans();
if (start_scans < 0)
return start_scans;
if (write(ksm_fd, "1", 1) != 1)
return -errno;
do {
end_scans = ksm_get_full_scans();
if (end_scans < 0)
return end_scans;
} while (end_scans < start_scans + 2);
return 0;
}
static int ksm_unmerge(void)
{
if (write(ksm_fd, "2", 1) != 1)
return -errno;
return 0;
}
static char *mmap_and_merge_range(char val, unsigned long size, int prot,
bool use_prctl)
{
char *map;
int ret;
/* Stabilize accounting by disabling KSM completely. */
if (ksm_unmerge()) {
ksft_test_result_fail("Disabling (unmerging) KSM failed\n");
goto unmap;
}
if (get_my_merging_pages() > 0) {
ksft_test_result_fail("Still pages merged\n");
goto unmap;
}
map = mmap(NULL, size, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANON, -1, 0);
if (map == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
return MAP_FAILED;
}
/* Don't use THP. Ignore if THP are not around on a kernel. */
if (madvise(map, size, MADV_NOHUGEPAGE) && errno != EINVAL) {
ksft_test_result_fail("MADV_NOHUGEPAGE failed\n");
goto unmap;
}
/* Make sure each page contains the same values to merge them. */
memset(map, val, size);
if (mprotect(map, size, prot)) {
ksft_test_result_skip("mprotect() failed\n");
goto unmap;
}
if (use_prctl) {
ret = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0);
if (ret < 0 && errno == EINVAL) {
ksft_test_result_skip("PR_SET_MEMORY_MERGE not supported\n");
goto unmap;
} else if (ret) {
ksft_test_result_fail("PR_SET_MEMORY_MERGE=1 failed\n");
goto unmap;
}
} else if (madvise(map, size, MADV_MERGEABLE)) {
ksft_test_result_fail("MADV_MERGEABLE failed\n");
goto unmap;
}
/* Run KSM to trigger merging and wait. */
if (ksm_merge()) {
ksft_test_result_fail("Running KSM failed\n");
goto unmap;
}
/*
* Check if anything was merged at all. Ignore the zero page that is
* accounted differently (depending on kernel support).
*/
if (val && !get_my_merging_pages()) {
ksft_test_result_fail("No pages got merged\n");
goto unmap;
}
return map;
unmap:
munmap(map, size);
return MAP_FAILED;
}
static void test_unmerge(void)
{
const unsigned int size = 2 * MiB;
char *map;
ksft_print_msg("[RUN] %s\n", __func__);
map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, false);
if (map == MAP_FAILED)
return;
if (madvise(map, size, MADV_UNMERGEABLE)) {
ksft_test_result_fail("MADV_UNMERGEABLE failed\n");
goto unmap;
}
ksft_test_result(!range_maps_duplicates(map, size),
"Pages were unmerged\n");
unmap:
munmap(map, size);
}
static void test_unmerge_zero_pages(void)
{
const unsigned int size = 2 * MiB;
char *map;
unsigned int offs;
unsigned long pages_expected;
ksft_print_msg("[RUN] %s\n", __func__);
if (proc_self_ksm_stat_fd < 0) {
ksft_test_result_skip("open(\"/proc/self/ksm_stat\") failed\n");
return;
}
if (ksm_use_zero_pages_fd < 0) {
ksft_test_result_skip("open \"/sys/kernel/mm/ksm/use_zero_pages\" failed\n");
return;
}
if (write(ksm_use_zero_pages_fd, "1", 1) != 1) {
ksft_test_result_skip("write \"/sys/kernel/mm/ksm/use_zero_pages\" failed\n");
return;
}
/* Let KSM deduplicate zero pages. */
map = mmap_and_merge_range(0x00, size, PROT_READ | PROT_WRITE, false);
if (map == MAP_FAILED)
return;
/* Check if ksm_zero_pages is updated correctly after KSM merging */
pages_expected = size / pagesize;
if (pages_expected != get_my_ksm_zero_pages()) {
ksft_test_result_fail("'ksm_zero_pages' updated after merging\n");
goto unmap;
}
/* Try to unmerge half of the region */
if (madvise(map, size / 2, MADV_UNMERGEABLE)) {
ksft_test_result_fail("MADV_UNMERGEABLE failed\n");
goto unmap;
}
/* Check if ksm_zero_pages is updated correctly after unmerging */
pages_expected /= 2;
if (pages_expected != get_my_ksm_zero_pages()) {
ksft_test_result_fail("'ksm_zero_pages' updated after unmerging\n");
goto unmap;
}
/* Trigger unmerging of the other half by writing to the pages. */
for (offs = size / 2; offs < size; offs += pagesize)
*((unsigned int *)&map[offs]) = offs;
/* Now we should have no zeropages remaining. */
if (get_my_ksm_zero_pages()) {
ksft_test_result_fail("'ksm_zero_pages' updated after write fault\n");
goto unmap;
}
/* Check if ksm zero pages are really unmerged */
ksft_test_result(!range_maps_duplicates(map, size),
"KSM zero pages were unmerged\n");
unmap:
munmap(map, size);
}
static void test_unmerge_discarded(void)
{
const unsigned int size = 2 * MiB;
char *map;
ksft_print_msg("[RUN] %s\n", __func__);
map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, false);
if (map == MAP_FAILED)
return;
/* Discard half of all mapped pages so we have pte_none() entries. */
if (madvise(map, size / 2, MADV_DONTNEED)) {
ksft_test_result_fail("MADV_DONTNEED failed\n");
goto unmap;
}
if (madvise(map, size, MADV_UNMERGEABLE)) {
ksft_test_result_fail("MADV_UNMERGEABLE failed\n");
goto unmap;
}
ksft_test_result(!range_maps_duplicates(map, size),
"Pages were unmerged\n");
unmap:
munmap(map, size);
}
#ifdef __NR_userfaultfd
static void test_unmerge_uffd_wp(void)
{
struct uffdio_writeprotect uffd_writeprotect;
const unsigned int size = 2 * MiB;
struct uffdio_api uffdio_api;
char *map;
int uffd;
ksft_print_msg("[RUN] %s\n", __func__);
map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, false);
if (map == MAP_FAILED)
return;
/* See if UFFD is around. */
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
if (uffd < 0) {
ksft_test_result_skip("__NR_userfaultfd failed\n");
goto unmap;
}
/* See if UFFD-WP is around. */
uffdio_api.api = UFFD_API;
uffdio_api.features = UFFD_FEATURE_PAGEFAULT_FLAG_WP;
if (ioctl(uffd, UFFDIO_API, &uffdio_api) < 0) {
ksft_test_result_fail("UFFDIO_API failed\n");
goto close_uffd;
}
if (!(uffdio_api.features & UFFD_FEATURE_PAGEFAULT_FLAG_WP)) {
ksft_test_result_skip("UFFD_FEATURE_PAGEFAULT_FLAG_WP not available\n");
goto close_uffd;
}
/* Register UFFD-WP, no need for an actual handler. */
if (uffd_register(uffd, map, size, false, true, false)) {
ksft_test_result_fail("UFFDIO_REGISTER_MODE_WP failed\n");
goto close_uffd;
}
/* Write-protect the range using UFFD-WP. */
uffd_writeprotect.range.start = (unsigned long) map;
uffd_writeprotect.range.len = size;
uffd_writeprotect.mode = UFFDIO_WRITEPROTECT_MODE_WP;
if (ioctl(uffd, UFFDIO_WRITEPROTECT, &uffd_writeprotect)) {
ksft_test_result_fail("UFFDIO_WRITEPROTECT failed\n");
goto close_uffd;
}
if (madvise(map, size, MADV_UNMERGEABLE)) {
ksft_test_result_fail("MADV_UNMERGEABLE failed\n");
goto close_uffd;
}
ksft_test_result(!range_maps_duplicates(map, size),
"Pages were unmerged\n");
close_uffd:
close(uffd);
unmap:
munmap(map, size);
}
#endif
/* Verify that KSM can be enabled / queried with prctl. */
static void test_prctl(void)
{
int ret;
ksft_print_msg("[RUN] %s\n", __func__);
ret = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0);
if (ret < 0 && errno == EINVAL) {
ksft_test_result_skip("PR_SET_MEMORY_MERGE not supported\n");
return;
} else if (ret) {
ksft_test_result_fail("PR_SET_MEMORY_MERGE=1 failed\n");
return;
}
ret = prctl(PR_GET_MEMORY_MERGE, 0, 0, 0, 0);
if (ret < 0) {
ksft_test_result_fail("PR_GET_MEMORY_MERGE failed\n");
return;
} else if (ret != 1) {
ksft_test_result_fail("PR_SET_MEMORY_MERGE=1 not effective\n");
return;
}
ret = prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
if (ret) {
ksft_test_result_fail("PR_SET_MEMORY_MERGE=0 failed\n");
return;
}
ret = prctl(PR_GET_MEMORY_MERGE, 0, 0, 0, 0);
if (ret < 0) {
ksft_test_result_fail("PR_GET_MEMORY_MERGE failed\n");
return;
} else if (ret != 0) {
ksft_test_result_fail("PR_SET_MEMORY_MERGE=0 not effective\n");
return;
}
ksft_test_result_pass("Setting/clearing PR_SET_MEMORY_MERGE works\n");
}
/* Verify that prctl ksm flag is inherited. */
static void test_prctl_fork(void)
{
int ret, status;
pid_t child_pid;
ksft_print_msg("[RUN] %s\n", __func__);
ret = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0);
if (ret < 0 && errno == EINVAL) {
ksft_test_result_skip("PR_SET_MEMORY_MERGE not supported\n");
return;
} else if (ret) {
ksft_test_result_fail("PR_SET_MEMORY_MERGE=1 failed\n");
return;
}
child_pid = fork();
if (!child_pid) {
exit(prctl(PR_GET_MEMORY_MERGE, 0, 0, 0, 0));
} else if (child_pid < 0) {
ksft_test_result_fail("fork() failed\n");
return;
}
if (waitpid(child_pid, &status, 0) < 0) {
ksft_test_result_fail("waitpid() failed\n");
return;
} else if (WEXITSTATUS(status) != 1) {
ksft_test_result_fail("unexpected PR_GET_MEMORY_MERGE result in child\n");
return;
}
if (prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0)) {
ksft_test_result_fail("PR_SET_MEMORY_MERGE=0 failed\n");
return;
}
ksft_test_result_pass("PR_SET_MEMORY_MERGE value is inherited\n");
}
static void test_prctl_unmerge(void)
{
const unsigned int size = 2 * MiB;
char *map;
ksft_print_msg("[RUN] %s\n", __func__);
map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, true);
if (map == MAP_FAILED)
return;
if (prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0)) {
ksft_test_result_fail("PR_SET_MEMORY_MERGE=0 failed\n");
goto unmap;
}
ksft_test_result(!range_maps_duplicates(map, size),
"Pages were unmerged\n");
unmap:
munmap(map, size);
}
static void test_prot_none(void)
{
const unsigned int size = 2 * MiB;
char *map;
int i;
ksft_print_msg("[RUN] %s\n", __func__);
map = mmap_and_merge_range(0x11, size, PROT_NONE, false);
if (map == MAP_FAILED)
goto unmap;
/* Store a unique value in each page on one half using ptrace */
for (i = 0; i < size / 2; i += pagesize) {
lseek(mem_fd, (uintptr_t) map + i, SEEK_SET);
if (write(mem_fd, &i, sizeof(i)) != sizeof(i)) {
ksft_test_result_fail("ptrace write failed\n");
goto unmap;
}
}
/* Trigger unsharing on the other half. */
if (madvise(map + size / 2, size / 2, MADV_UNMERGEABLE)) {
ksft_test_result_fail("MADV_UNMERGEABLE failed\n");
goto unmap;
}
ksft_test_result(!range_maps_duplicates(map, size),
"Pages were unmerged\n");
unmap:
munmap(map, size);
}
int main(int argc, char **argv)
{
unsigned int tests = 7;
int err;
#ifdef __NR_userfaultfd
tests++;
#endif
ksft_print_header();
ksft_set_plan(tests);
pagesize = getpagesize();
mem_fd = open("/proc/self/mem", O_RDWR);
if (mem_fd < 0)
ksft_exit_fail_msg("opening /proc/self/mem failed\n");
ksm_fd = open("/sys/kernel/mm/ksm/run", O_RDWR);
if (ksm_fd < 0)
ksft_exit_skip("open(\"/sys/kernel/mm/ksm/run\") failed\n");
ksm_full_scans_fd = open("/sys/kernel/mm/ksm/full_scans", O_RDONLY);
if (ksm_full_scans_fd < 0)
ksft_exit_skip("open(\"/sys/kernel/mm/ksm/full_scans\") failed\n");
pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
if (pagemap_fd < 0)
ksft_exit_skip("open(\"/proc/self/pagemap\") failed\n");
proc_self_ksm_stat_fd = open("/proc/self/ksm_stat", O_RDONLY);
proc_self_ksm_merging_pages_fd = open("/proc/self/ksm_merging_pages",
O_RDONLY);
ksm_use_zero_pages_fd = open("/sys/kernel/mm/ksm/use_zero_pages", O_RDWR);
test_unmerge();
test_unmerge_zero_pages();
test_unmerge_discarded();
#ifdef __NR_userfaultfd
test_unmerge_uffd_wp();
#endif
test_prot_none();
test_prctl();
test_prctl_fork();
test_prctl_unmerge();
err = ksft_get_fail_cnt();
if (err)
ksft_exit_fail_msg("%d out of %d tests failed\n",
err, ksft_test_num());
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/mm/ksm_functional_tests.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef __aarch64__
#include <asm/hwcap.h>
#endif
#include <linux/mman.h>
#include <linux/prctl.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <unistd.h>
#include "../kselftest_harness.h"
#ifndef __aarch64__
# define PROT_BTI 0
#endif
TEST(prctl_flags)
{
EXPECT_LT(prctl(PR_SET_MDWE, 7L, 0L, 0L, 0L), 0);
EXPECT_LT(prctl(PR_SET_MDWE, 0L, 7L, 0L, 0L), 0);
EXPECT_LT(prctl(PR_SET_MDWE, 0L, 0L, 7L, 0L), 0);
EXPECT_LT(prctl(PR_SET_MDWE, 0L, 0L, 0L, 7L), 0);
EXPECT_LT(prctl(PR_GET_MDWE, 7L, 0L, 0L, 0L), 0);
EXPECT_LT(prctl(PR_GET_MDWE, 0L, 7L, 0L, 0L), 0);
EXPECT_LT(prctl(PR_GET_MDWE, 0L, 0L, 7L, 0L), 0);
EXPECT_LT(prctl(PR_GET_MDWE, 0L, 0L, 0L, 7L), 0);
}
FIXTURE(mdwe)
{
void *p;
int flags;
size_t size;
pid_t pid;
};
FIXTURE_VARIANT(mdwe)
{
bool enabled;
bool forked;
};
FIXTURE_VARIANT_ADD(mdwe, stock)
{
.enabled = false,
.forked = false,
};
FIXTURE_VARIANT_ADD(mdwe, enabled)
{
.enabled = true,
.forked = false,
};
FIXTURE_VARIANT_ADD(mdwe, forked)
{
.enabled = true,
.forked = true,
};
FIXTURE_SETUP(mdwe)
{
int ret, status;
self->p = NULL;
self->flags = MAP_SHARED | MAP_ANONYMOUS;
self->size = getpagesize();
if (!variant->enabled)
return;
ret = prctl(PR_SET_MDWE, PR_MDWE_REFUSE_EXEC_GAIN, 0L, 0L, 0L);
ASSERT_EQ(ret, 0) {
TH_LOG("PR_SET_MDWE failed or unsupported");
}
ret = prctl(PR_GET_MDWE, 0L, 0L, 0L, 0L);
ASSERT_EQ(ret, 1);
if (variant->forked) {
self->pid = fork();
ASSERT_GE(self->pid, 0) {
TH_LOG("fork failed\n");
}
if (self->pid > 0) {
ret = waitpid(self->pid, &status, 0);
ASSERT_TRUE(WIFEXITED(status));
exit(WEXITSTATUS(status));
}
}
}
FIXTURE_TEARDOWN(mdwe)
{
if (self->p && self->p != MAP_FAILED)
munmap(self->p, self->size);
}
TEST_F(mdwe, mmap_READ_EXEC)
{
self->p = mmap(NULL, self->size, PROT_READ | PROT_EXEC, self->flags, 0, 0);
EXPECT_NE(self->p, MAP_FAILED);
}
TEST_F(mdwe, mmap_WRITE_EXEC)
{
self->p = mmap(NULL, self->size, PROT_WRITE | PROT_EXEC, self->flags, 0, 0);
if (variant->enabled) {
EXPECT_EQ(self->p, MAP_FAILED);
} else {
EXPECT_NE(self->p, MAP_FAILED);
}
}
TEST_F(mdwe, mprotect_stay_EXEC)
{
int ret;
self->p = mmap(NULL, self->size, PROT_READ | PROT_EXEC, self->flags, 0, 0);
ASSERT_NE(self->p, MAP_FAILED);
ret = mprotect(self->p, self->size, PROT_READ | PROT_EXEC);
EXPECT_EQ(ret, 0);
}
TEST_F(mdwe, mprotect_add_EXEC)
{
int ret;
self->p = mmap(NULL, self->size, PROT_READ, self->flags, 0, 0);
ASSERT_NE(self->p, MAP_FAILED);
ret = mprotect(self->p, self->size, PROT_READ | PROT_EXEC);
if (variant->enabled) {
EXPECT_LT(ret, 0);
} else {
EXPECT_EQ(ret, 0);
}
}
TEST_F(mdwe, mprotect_WRITE_EXEC)
{
int ret;
self->p = mmap(NULL, self->size, PROT_WRITE, self->flags, 0, 0);
ASSERT_NE(self->p, MAP_FAILED);
ret = mprotect(self->p, self->size, PROT_WRITE | PROT_EXEC);
if (variant->enabled) {
EXPECT_LT(ret, 0);
} else {
EXPECT_EQ(ret, 0);
}
}
TEST_F(mdwe, mmap_FIXED)
{
void *p;
self->p = mmap(NULL, self->size, PROT_READ, self->flags, 0, 0);
ASSERT_NE(self->p, MAP_FAILED);
p = mmap(self->p + self->size, self->size, PROT_READ | PROT_EXEC,
self->flags | MAP_FIXED, 0, 0);
if (variant->enabled) {
EXPECT_EQ(p, MAP_FAILED);
} else {
EXPECT_EQ(p, self->p);
}
}
TEST_F(mdwe, arm64_BTI)
{
int ret;
#ifdef __aarch64__
if (!(getauxval(AT_HWCAP2) & HWCAP2_BTI))
#endif
SKIP(return, "HWCAP2_BTI not supported");
self->p = mmap(NULL, self->size, PROT_EXEC, self->flags, 0, 0);
ASSERT_NE(self->p, MAP_FAILED);
ret = mprotect(self->p, self->size, PROT_EXEC | PROT_BTI);
EXPECT_EQ(ret, 0);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/mm/mdwe_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tests Memory Protection Keys (see Documentation/core-api/protection-keys.rst)
*
* There are examples in here of:
* * how to set protection keys on memory
* * how to set/clear bits in pkey registers (the rights register)
* * how to handle SEGV_PKUERR signals and extract pkey-relevant
* information from the siginfo
*
* Things to add:
* make sure KSM and KSM COW breaking works
* prefault pages in at malloc, or not
* protect MPX bounds tables with protection keys?
* make sure VMA splitting/merging is working correctly
* OOMs can destroy mm->mmap (see exit_mmap()), so make sure it is immune to pkeys
* look for pkey "leaks" where it is still set on a VMA but "freed" back to the kernel
* do a plain mprotect() to a mprotect_pkey() area and make sure the pkey sticks
*
* Compile like this:
* gcc -mxsave -o protection_keys -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
* gcc -mxsave -m32 -o protection_keys_32 -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
*/
#define _GNU_SOURCE
#define __SANE_USERSPACE_TYPES__
#include <errno.h>
#include <linux/elf.h>
#include <linux/futex.h>
#include <time.h>
#include <sys/time.h>
#include <sys/syscall.h>
#include <string.h>
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include <signal.h>
#include <assert.h>
#include <stdlib.h>
#include <ucontext.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/ptrace.h>
#include <setjmp.h>
#include "pkey-helpers.h"
int iteration_nr = 1;
int test_nr;
u64 shadow_pkey_reg;
int dprint_in_signal;
char dprint_in_signal_buffer[DPRINT_IN_SIGNAL_BUF_SIZE];
void cat_into_file(char *str, char *file)
{
int fd = open(file, O_RDWR);
int ret;
dprintf2("%s(): writing '%s' to '%s'\n", __func__, str, file);
/*
* these need to be raw because they are called under
* pkey_assert()
*/
if (fd < 0) {
fprintf(stderr, "error opening '%s'\n", str);
perror("error: ");
exit(__LINE__);
}
ret = write(fd, str, strlen(str));
if (ret != strlen(str)) {
perror("write to file failed");
fprintf(stderr, "filename: '%s' str: '%s'\n", file, str);
exit(__LINE__);
}
close(fd);
}
#if CONTROL_TRACING > 0
static int warned_tracing;
int tracing_root_ok(void)
{
if (geteuid() != 0) {
if (!warned_tracing)
fprintf(stderr, "WARNING: not run as root, "
"can not do tracing control\n");
warned_tracing = 1;
return 0;
}
return 1;
}
#endif
void tracing_on(void)
{
#if CONTROL_TRACING > 0
#define TRACEDIR "/sys/kernel/tracing"
char pidstr[32];
if (!tracing_root_ok())
return;
sprintf(pidstr, "%d", getpid());
cat_into_file("0", TRACEDIR "/tracing_on");
cat_into_file("\n", TRACEDIR "/trace");
if (1) {
cat_into_file("function_graph", TRACEDIR "/current_tracer");
cat_into_file("1", TRACEDIR "/options/funcgraph-proc");
} else {
cat_into_file("nop", TRACEDIR "/current_tracer");
}
cat_into_file(pidstr, TRACEDIR "/set_ftrace_pid");
cat_into_file("1", TRACEDIR "/tracing_on");
dprintf1("enabled tracing\n");
#endif
}
void tracing_off(void)
{
#if CONTROL_TRACING > 0
if (!tracing_root_ok())
return;
cat_into_file("0", "/sys/kernel/tracing/tracing_on");
#endif
}
void abort_hooks(void)
{
fprintf(stderr, "running %s()...\n", __func__);
tracing_off();
#ifdef SLEEP_ON_ABORT
sleep(SLEEP_ON_ABORT);
#endif
}
/*
* This attempts to have roughly a page of instructions followed by a few
* instructions that do a write, and another page of instructions. That
* way, we are pretty sure that the write is in the second page of
* instructions and has at least a page of padding behind it.
*
* *That* lets us be sure to madvise() away the write instruction, which
* will then fault, which makes sure that the fault code handles
* execute-only memory properly.
*/
#ifdef __powerpc64__
/* This way, both 4K and 64K alignment are maintained */
__attribute__((__aligned__(65536)))
#else
__attribute__((__aligned__(PAGE_SIZE)))
#endif
void lots_o_noops_around_write(int *write_to_me)
{
dprintf3("running %s()\n", __func__);
__page_o_noops();
/* Assume this happens in the second page of instructions: */
*write_to_me = __LINE__;
/* pad out by another page: */
__page_o_noops();
dprintf3("%s() done\n", __func__);
}
void dump_mem(void *dumpme, int len_bytes)
{
char *c = (void *)dumpme;
int i;
for (i = 0; i < len_bytes; i += sizeof(u64)) {
u64 *ptr = (u64 *)(c + i);
dprintf1("dump[%03d][@%p]: %016llx\n", i, ptr, *ptr);
}
}
static u32 hw_pkey_get(int pkey, unsigned long flags)
{
u64 pkey_reg = __read_pkey_reg();
dprintf1("%s(pkey=%d, flags=%lx) = %x / %d\n",
__func__, pkey, flags, 0, 0);
dprintf2("%s() raw pkey_reg: %016llx\n", __func__, pkey_reg);
return (u32) get_pkey_bits(pkey_reg, pkey);
}
static int hw_pkey_set(int pkey, unsigned long rights, unsigned long flags)
{
u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE);
u64 old_pkey_reg = __read_pkey_reg();
u64 new_pkey_reg;
/* make sure that 'rights' only contains the bits we expect: */
assert(!(rights & ~mask));
/* modify bits accordingly in old pkey_reg and assign it */
new_pkey_reg = set_pkey_bits(old_pkey_reg, pkey, rights);
__write_pkey_reg(new_pkey_reg);
dprintf3("%s(pkey=%d, rights=%lx, flags=%lx) = %x"
" pkey_reg now: %016llx old_pkey_reg: %016llx\n",
__func__, pkey, rights, flags, 0, __read_pkey_reg(),
old_pkey_reg);
return 0;
}
void pkey_disable_set(int pkey, int flags)
{
unsigned long syscall_flags = 0;
int ret;
int pkey_rights;
u64 orig_pkey_reg = read_pkey_reg();
dprintf1("START->%s(%d, 0x%x)\n", __func__,
pkey, flags);
pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
pkey_rights = hw_pkey_get(pkey, syscall_flags);
dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
pkey, pkey, pkey_rights);
pkey_assert(pkey_rights >= 0);
pkey_rights |= flags;
ret = hw_pkey_set(pkey, pkey_rights, syscall_flags);
assert(!ret);
/* pkey_reg and flags have the same format */
shadow_pkey_reg = set_pkey_bits(shadow_pkey_reg, pkey, pkey_rights);
dprintf1("%s(%d) shadow: 0x%016llx\n",
__func__, pkey, shadow_pkey_reg);
pkey_assert(ret >= 0);
pkey_rights = hw_pkey_get(pkey, syscall_flags);
dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
pkey, pkey, pkey_rights);
dprintf1("%s(%d) pkey_reg: 0x%016llx\n",
__func__, pkey, read_pkey_reg());
if (flags)
pkey_assert(read_pkey_reg() >= orig_pkey_reg);
dprintf1("END<---%s(%d, 0x%x)\n", __func__,
pkey, flags);
}
void pkey_disable_clear(int pkey, int flags)
{
unsigned long syscall_flags = 0;
int ret;
int pkey_rights = hw_pkey_get(pkey, syscall_flags);
u64 orig_pkey_reg = read_pkey_reg();
pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
pkey, pkey, pkey_rights);
pkey_assert(pkey_rights >= 0);
pkey_rights &= ~flags;
ret = hw_pkey_set(pkey, pkey_rights, 0);
shadow_pkey_reg = set_pkey_bits(shadow_pkey_reg, pkey, pkey_rights);
pkey_assert(ret >= 0);
pkey_rights = hw_pkey_get(pkey, syscall_flags);
dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
pkey, pkey, pkey_rights);
dprintf1("%s(%d) pkey_reg: 0x%016llx\n", __func__,
pkey, read_pkey_reg());
if (flags)
assert(read_pkey_reg() <= orig_pkey_reg);
}
void pkey_write_allow(int pkey)
{
pkey_disable_clear(pkey, PKEY_DISABLE_WRITE);
}
void pkey_write_deny(int pkey)
{
pkey_disable_set(pkey, PKEY_DISABLE_WRITE);
}
void pkey_access_allow(int pkey)
{
pkey_disable_clear(pkey, PKEY_DISABLE_ACCESS);
}
void pkey_access_deny(int pkey)
{
pkey_disable_set(pkey, PKEY_DISABLE_ACCESS);
}
static char *si_code_str(int si_code)
{
if (si_code == SEGV_MAPERR)
return "SEGV_MAPERR";
if (si_code == SEGV_ACCERR)
return "SEGV_ACCERR";
if (si_code == SEGV_BNDERR)
return "SEGV_BNDERR";
if (si_code == SEGV_PKUERR)
return "SEGV_PKUERR";
return "UNKNOWN";
}
int pkey_faults;
int last_si_pkey = -1;
void signal_handler(int signum, siginfo_t *si, void *vucontext)
{
ucontext_t *uctxt = vucontext;
int trapno;
unsigned long ip;
char *fpregs;
#if defined(__i386__) || defined(__x86_64__) /* arch */
u32 *pkey_reg_ptr;
int pkey_reg_offset;
#endif /* arch */
u64 siginfo_pkey;
u32 *si_pkey_ptr;
dprint_in_signal = 1;
dprintf1(">>>>===============SIGSEGV============================\n");
dprintf1("%s()::%d, pkey_reg: 0x%016llx shadow: %016llx\n",
__func__, __LINE__,
__read_pkey_reg(), shadow_pkey_reg);
trapno = uctxt->uc_mcontext.gregs[REG_TRAPNO];
ip = uctxt->uc_mcontext.gregs[REG_IP_IDX];
fpregs = (char *) uctxt->uc_mcontext.fpregs;
dprintf2("%s() trapno: %d ip: 0x%016lx info->si_code: %s/%d\n",
__func__, trapno, ip, si_code_str(si->si_code),
si->si_code);
#if defined(__i386__) || defined(__x86_64__) /* arch */
#ifdef __i386__
/*
* 32-bit has some extra padding so that userspace can tell whether
* the XSTATE header is present in addition to the "legacy" FPU
* state. We just assume that it is here.
*/
fpregs += 0x70;
#endif /* i386 */
pkey_reg_offset = pkey_reg_xstate_offset();
pkey_reg_ptr = (void *)(&fpregs[pkey_reg_offset]);
/*
* If we got a PKEY fault, we *HAVE* to have at least one bit set in
* here.
*/
dprintf1("pkey_reg_xstate_offset: %d\n", pkey_reg_xstate_offset());
if (DEBUG_LEVEL > 4)
dump_mem(pkey_reg_ptr - 128, 256);
pkey_assert(*pkey_reg_ptr);
#endif /* arch */
dprintf1("siginfo: %p\n", si);
dprintf1(" fpregs: %p\n", fpregs);
if ((si->si_code == SEGV_MAPERR) ||
(si->si_code == SEGV_ACCERR) ||
(si->si_code == SEGV_BNDERR)) {
printf("non-PK si_code, exiting...\n");
exit(4);
}
si_pkey_ptr = siginfo_get_pkey_ptr(si);
dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
dump_mem((u8 *)si_pkey_ptr - 8, 24);
siginfo_pkey = *si_pkey_ptr;
pkey_assert(siginfo_pkey < NR_PKEYS);
last_si_pkey = siginfo_pkey;
/*
* need __read_pkey_reg() version so we do not do shadow_pkey_reg
* checking
*/
dprintf1("signal pkey_reg from pkey_reg: %016llx\n",
__read_pkey_reg());
dprintf1("pkey from siginfo: %016llx\n", siginfo_pkey);
#if defined(__i386__) || defined(__x86_64__) /* arch */
dprintf1("signal pkey_reg from xsave: %08x\n", *pkey_reg_ptr);
*(u64 *)pkey_reg_ptr = 0x00000000;
dprintf1("WARNING: set PKEY_REG=0 to allow faulting instruction to continue\n");
#elif defined(__powerpc64__) /* arch */
/* restore access and let the faulting instruction continue */
pkey_access_allow(siginfo_pkey);
#endif /* arch */
pkey_faults++;
dprintf1("<<<<==================================================\n");
dprint_in_signal = 0;
}
int wait_all_children(void)
{
int status;
return waitpid(-1, &status, 0);
}
void sig_chld(int x)
{
dprint_in_signal = 1;
dprintf2("[%d] SIGCHLD: %d\n", getpid(), x);
dprint_in_signal = 0;
}
void setup_sigsegv_handler(void)
{
int r, rs;
struct sigaction newact;
struct sigaction oldact;
/* #PF is mapped to sigsegv */
int signum = SIGSEGV;
newact.sa_handler = 0;
newact.sa_sigaction = signal_handler;
/*sigset_t - signals to block while in the handler */
/* get the old signal mask. */
rs = sigprocmask(SIG_SETMASK, 0, &newact.sa_mask);
pkey_assert(rs == 0);
/* call sa_sigaction, not sa_handler*/
newact.sa_flags = SA_SIGINFO;
newact.sa_restorer = 0; /* void(*)(), obsolete */
r = sigaction(signum, &newact, &oldact);
r = sigaction(SIGALRM, &newact, &oldact);
pkey_assert(r == 0);
}
void setup_handlers(void)
{
signal(SIGCHLD, &sig_chld);
setup_sigsegv_handler();
}
pid_t fork_lazy_child(void)
{
pid_t forkret;
forkret = fork();
pkey_assert(forkret >= 0);
dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
if (!forkret) {
/* in the child */
while (1) {
dprintf1("child sleeping...\n");
sleep(30);
}
}
return forkret;
}
int sys_mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
unsigned long pkey)
{
int sret;
dprintf2("%s(0x%p, %zx, prot=%lx, pkey=%lx)\n", __func__,
ptr, size, orig_prot, pkey);
errno = 0;
sret = syscall(__NR_pkey_mprotect, ptr, size, orig_prot, pkey);
if (errno) {
dprintf2("SYS_mprotect_key sret: %d\n", sret);
dprintf2("SYS_mprotect_key prot: 0x%lx\n", orig_prot);
dprintf2("SYS_mprotect_key failed, errno: %d\n", errno);
if (DEBUG_LEVEL >= 2)
perror("SYS_mprotect_pkey");
}
return sret;
}
int sys_pkey_alloc(unsigned long flags, unsigned long init_val)
{
int ret = syscall(SYS_pkey_alloc, flags, init_val);
dprintf1("%s(flags=%lx, init_val=%lx) syscall ret: %d errno: %d\n",
__func__, flags, init_val, ret, errno);
return ret;
}
int alloc_pkey(void)
{
int ret;
unsigned long init_val = 0x0;
dprintf1("%s()::%d, pkey_reg: 0x%016llx shadow: %016llx\n",
__func__, __LINE__, __read_pkey_reg(), shadow_pkey_reg);
ret = sys_pkey_alloc(0, init_val);
/*
* pkey_alloc() sets PKEY register, so we need to reflect it in
* shadow_pkey_reg:
*/
dprintf4("%s()::%d, ret: %d pkey_reg: 0x%016llx"
" shadow: 0x%016llx\n",
__func__, __LINE__, ret, __read_pkey_reg(),
shadow_pkey_reg);
if (ret > 0) {
/* clear both the bits: */
shadow_pkey_reg = set_pkey_bits(shadow_pkey_reg, ret,
~PKEY_MASK);
dprintf4("%s()::%d, ret: %d pkey_reg: 0x%016llx"
" shadow: 0x%016llx\n",
__func__,
__LINE__, ret, __read_pkey_reg(),
shadow_pkey_reg);
/*
* move the new state in from init_val
* (remember, we cheated and init_val == pkey_reg format)
*/
shadow_pkey_reg = set_pkey_bits(shadow_pkey_reg, ret,
init_val);
}
dprintf4("%s()::%d, ret: %d pkey_reg: 0x%016llx"
" shadow: 0x%016llx\n",
__func__, __LINE__, ret, __read_pkey_reg(),
shadow_pkey_reg);
dprintf1("%s()::%d errno: %d\n", __func__, __LINE__, errno);
/* for shadow checking: */
read_pkey_reg();
dprintf4("%s()::%d, ret: %d pkey_reg: 0x%016llx"
" shadow: 0x%016llx\n",
__func__, __LINE__, ret, __read_pkey_reg(),
shadow_pkey_reg);
return ret;
}
int sys_pkey_free(unsigned long pkey)
{
int ret = syscall(SYS_pkey_free, pkey);
dprintf1("%s(pkey=%ld) syscall ret: %d\n", __func__, pkey, ret);
return ret;
}
/*
* I had a bug where pkey bits could be set by mprotect() but
* not cleared. This ensures we get lots of random bit sets
* and clears on the vma and pte pkey bits.
*/
int alloc_random_pkey(void)
{
int max_nr_pkey_allocs;
int ret;
int i;
int alloced_pkeys[NR_PKEYS];
int nr_alloced = 0;
int random_index;
memset(alloced_pkeys, 0, sizeof(alloced_pkeys));
/* allocate every possible key and make a note of which ones we got */
max_nr_pkey_allocs = NR_PKEYS;
for (i = 0; i < max_nr_pkey_allocs; i++) {
int new_pkey = alloc_pkey();
if (new_pkey < 0)
break;
alloced_pkeys[nr_alloced++] = new_pkey;
}
pkey_assert(nr_alloced > 0);
/* select a random one out of the allocated ones */
random_index = rand() % nr_alloced;
ret = alloced_pkeys[random_index];
/* now zero it out so we don't free it next */
alloced_pkeys[random_index] = 0;
/* go through the allocated ones that we did not want and free them */
for (i = 0; i < nr_alloced; i++) {
int free_ret;
if (!alloced_pkeys[i])
continue;
free_ret = sys_pkey_free(alloced_pkeys[i]);
pkey_assert(!free_ret);
}
dprintf1("%s()::%d, ret: %d pkey_reg: 0x%016llx"
" shadow: 0x%016llx\n", __func__,
__LINE__, ret, __read_pkey_reg(), shadow_pkey_reg);
return ret;
}
int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
unsigned long pkey)
{
int nr_iterations = random() % 100;
int ret;
while (0) {
int rpkey = alloc_random_pkey();
ret = sys_mprotect_pkey(ptr, size, orig_prot, pkey);
dprintf1("sys_mprotect_pkey(%p, %zx, prot=0x%lx, pkey=%ld) ret: %d\n",
ptr, size, orig_prot, pkey, ret);
if (nr_iterations-- < 0)
break;
dprintf1("%s()::%d, ret: %d pkey_reg: 0x%016llx"
" shadow: 0x%016llx\n",
__func__, __LINE__, ret, __read_pkey_reg(),
shadow_pkey_reg);
sys_pkey_free(rpkey);
dprintf1("%s()::%d, ret: %d pkey_reg: 0x%016llx"
" shadow: 0x%016llx\n",
__func__, __LINE__, ret, __read_pkey_reg(),
shadow_pkey_reg);
}
pkey_assert(pkey < NR_PKEYS);
ret = sys_mprotect_pkey(ptr, size, orig_prot, pkey);
dprintf1("mprotect_pkey(%p, %zx, prot=0x%lx, pkey=%ld) ret: %d\n",
ptr, size, orig_prot, pkey, ret);
pkey_assert(!ret);
dprintf1("%s()::%d, ret: %d pkey_reg: 0x%016llx"
" shadow: 0x%016llx\n", __func__,
__LINE__, ret, __read_pkey_reg(), shadow_pkey_reg);
return ret;
}
struct pkey_malloc_record {
void *ptr;
long size;
int prot;
};
struct pkey_malloc_record *pkey_malloc_records;
struct pkey_malloc_record *pkey_last_malloc_record;
long nr_pkey_malloc_records;
void record_pkey_malloc(void *ptr, long size, int prot)
{
long i;
struct pkey_malloc_record *rec = NULL;
for (i = 0; i < nr_pkey_malloc_records; i++) {
rec = &pkey_malloc_records[i];
/* find a free record */
if (rec)
break;
}
if (!rec) {
/* every record is full */
size_t old_nr_records = nr_pkey_malloc_records;
size_t new_nr_records = (nr_pkey_malloc_records * 2 + 1);
size_t new_size = new_nr_records * sizeof(struct pkey_malloc_record);
dprintf2("new_nr_records: %zd\n", new_nr_records);
dprintf2("new_size: %zd\n", new_size);
pkey_malloc_records = realloc(pkey_malloc_records, new_size);
pkey_assert(pkey_malloc_records != NULL);
rec = &pkey_malloc_records[nr_pkey_malloc_records];
/*
* realloc() does not initialize memory, so zero it from
* the first new record all the way to the end.
*/
for (i = 0; i < new_nr_records - old_nr_records; i++)
memset(rec + i, 0, sizeof(*rec));
}
dprintf3("filling malloc record[%d/%p]: {%p, %ld}\n",
(int)(rec - pkey_malloc_records), rec, ptr, size);
rec->ptr = ptr;
rec->size = size;
rec->prot = prot;
pkey_last_malloc_record = rec;
nr_pkey_malloc_records++;
}
void free_pkey_malloc(void *ptr)
{
long i;
int ret;
dprintf3("%s(%p)\n", __func__, ptr);
for (i = 0; i < nr_pkey_malloc_records; i++) {
struct pkey_malloc_record *rec = &pkey_malloc_records[i];
dprintf4("looking for ptr %p at record[%ld/%p]: {%p, %ld}\n",
ptr, i, rec, rec->ptr, rec->size);
if ((ptr < rec->ptr) ||
(ptr >= rec->ptr + rec->size))
continue;
dprintf3("found ptr %p at record[%ld/%p]: {%p, %ld}\n",
ptr, i, rec, rec->ptr, rec->size);
nr_pkey_malloc_records--;
ret = munmap(rec->ptr, rec->size);
dprintf3("munmap ret: %d\n", ret);
pkey_assert(!ret);
dprintf3("clearing rec->ptr, rec: %p\n", rec);
rec->ptr = NULL;
dprintf3("done clearing rec->ptr, rec: %p\n", rec);
return;
}
pkey_assert(false);
}
void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey)
{
void *ptr;
int ret;
read_pkey_reg();
dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
size, prot, pkey);
pkey_assert(pkey < NR_PKEYS);
ptr = mmap(NULL, size, prot, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
pkey_assert(ptr != (void *)-1);
ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey);
pkey_assert(!ret);
record_pkey_malloc(ptr, size, prot);
read_pkey_reg();
dprintf1("%s() for pkey %d @ %p\n", __func__, pkey, ptr);
return ptr;
}
void *malloc_pkey_anon_huge(long size, int prot, u16 pkey)
{
int ret;
void *ptr;
dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
size, prot, pkey);
/*
* Guarantee we can fit at least one huge page in the resulting
* allocation by allocating space for 2:
*/
size = ALIGN_UP(size, HPAGE_SIZE * 2);
ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
pkey_assert(ptr != (void *)-1);
record_pkey_malloc(ptr, size, prot);
mprotect_pkey(ptr, size, prot, pkey);
dprintf1("unaligned ptr: %p\n", ptr);
ptr = ALIGN_PTR_UP(ptr, HPAGE_SIZE);
dprintf1(" aligned ptr: %p\n", ptr);
ret = madvise(ptr, HPAGE_SIZE, MADV_HUGEPAGE);
dprintf1("MADV_HUGEPAGE ret: %d\n", ret);
ret = madvise(ptr, HPAGE_SIZE, MADV_WILLNEED);
dprintf1("MADV_WILLNEED ret: %d\n", ret);
memset(ptr, 0, HPAGE_SIZE);
dprintf1("mmap()'d thp for pkey %d @ %p\n", pkey, ptr);
return ptr;
}
int hugetlb_setup_ok;
#define SYSFS_FMT_NR_HUGE_PAGES "/sys/kernel/mm/hugepages/hugepages-%ldkB/nr_hugepages"
#define GET_NR_HUGE_PAGES 10
void setup_hugetlbfs(void)
{
int err;
int fd;
char buf[256];
long hpagesz_kb;
long hpagesz_mb;
if (geteuid() != 0) {
fprintf(stderr, "WARNING: not run as root, can not do hugetlb test\n");
return;
}
cat_into_file(__stringify(GET_NR_HUGE_PAGES), "/proc/sys/vm/nr_hugepages");
/*
* Now go make sure that we got the pages and that they
* are PMD-level pages. Someone might have made PUD-level
* pages the default.
*/
hpagesz_kb = HPAGE_SIZE / 1024;
hpagesz_mb = hpagesz_kb / 1024;
sprintf(buf, SYSFS_FMT_NR_HUGE_PAGES, hpagesz_kb);
fd = open(buf, O_RDONLY);
if (fd < 0) {
fprintf(stderr, "opening sysfs %ldM hugetlb config: %s\n",
hpagesz_mb, strerror(errno));
return;
}
/* -1 to guarantee leaving the trailing \0 */
err = read(fd, buf, sizeof(buf)-1);
close(fd);
if (err <= 0) {
fprintf(stderr, "reading sysfs %ldM hugetlb config: %s\n",
hpagesz_mb, strerror(errno));
return;
}
if (atoi(buf) != GET_NR_HUGE_PAGES) {
fprintf(stderr, "could not confirm %ldM pages, got: '%s' expected %d\n",
hpagesz_mb, buf, GET_NR_HUGE_PAGES);
return;
}
hugetlb_setup_ok = 1;
}
void *malloc_pkey_hugetlb(long size, int prot, u16 pkey)
{
void *ptr;
int flags = MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB;
if (!hugetlb_setup_ok)
return PTR_ERR_ENOTSUP;
dprintf1("doing %s(%ld, %x, %x)\n", __func__, size, prot, pkey);
size = ALIGN_UP(size, HPAGE_SIZE * 2);
pkey_assert(pkey < NR_PKEYS);
ptr = mmap(NULL, size, PROT_NONE, flags, -1, 0);
pkey_assert(ptr != (void *)-1);
mprotect_pkey(ptr, size, prot, pkey);
record_pkey_malloc(ptr, size, prot);
dprintf1("mmap()'d hugetlbfs for pkey %d @ %p\n", pkey, ptr);
return ptr;
}
void *malloc_pkey_mmap_dax(long size, int prot, u16 pkey)
{
void *ptr;
int fd;
dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
size, prot, pkey);
pkey_assert(pkey < NR_PKEYS);
fd = open("/dax/foo", O_RDWR);
pkey_assert(fd >= 0);
ptr = mmap(0, size, prot, MAP_SHARED, fd, 0);
pkey_assert(ptr != (void *)-1);
mprotect_pkey(ptr, size, prot, pkey);
record_pkey_malloc(ptr, size, prot);
dprintf1("mmap()'d for pkey %d @ %p\n", pkey, ptr);
close(fd);
return ptr;
}
void *(*pkey_malloc[])(long size, int prot, u16 pkey) = {
malloc_pkey_with_mprotect,
malloc_pkey_with_mprotect_subpage,
malloc_pkey_anon_huge,
malloc_pkey_hugetlb
/* can not do direct with the pkey_mprotect() API:
malloc_pkey_mmap_direct,
malloc_pkey_mmap_dax,
*/
};
void *malloc_pkey(long size, int prot, u16 pkey)
{
void *ret;
static int malloc_type;
int nr_malloc_types = ARRAY_SIZE(pkey_malloc);
pkey_assert(pkey < NR_PKEYS);
while (1) {
pkey_assert(malloc_type < nr_malloc_types);
ret = pkey_malloc[malloc_type](size, prot, pkey);
pkey_assert(ret != (void *)-1);
malloc_type++;
if (malloc_type >= nr_malloc_types)
malloc_type = (random()%nr_malloc_types);
/* try again if the malloc_type we tried is unsupported */
if (ret == PTR_ERR_ENOTSUP)
continue;
break;
}
dprintf3("%s(%ld, prot=%x, pkey=%x) returning: %p\n", __func__,
size, prot, pkey, ret);
return ret;
}
int last_pkey_faults;
#define UNKNOWN_PKEY -2
void expected_pkey_fault(int pkey)
{
dprintf2("%s(): last_pkey_faults: %d pkey_faults: %d\n",
__func__, last_pkey_faults, pkey_faults);
dprintf2("%s(%d): last_si_pkey: %d\n", __func__, pkey, last_si_pkey);
pkey_assert(last_pkey_faults + 1 == pkey_faults);
/*
* For exec-only memory, we do not know the pkey in
* advance, so skip this check.
*/
if (pkey != UNKNOWN_PKEY)
pkey_assert(last_si_pkey == pkey);
#if defined(__i386__) || defined(__x86_64__) /* arch */
/*
* The signal handler shold have cleared out PKEY register to let the
* test program continue. We now have to restore it.
*/
if (__read_pkey_reg() != 0)
#else /* arch */
if (__read_pkey_reg() != shadow_pkey_reg)
#endif /* arch */
pkey_assert(0);
__write_pkey_reg(shadow_pkey_reg);
dprintf1("%s() set pkey_reg=%016llx to restore state after signal "
"nuked it\n", __func__, shadow_pkey_reg);
last_pkey_faults = pkey_faults;
last_si_pkey = -1;
}
#define do_not_expect_pkey_fault(msg) do { \
if (last_pkey_faults != pkey_faults) \
dprintf0("unexpected PKey fault: %s\n", msg); \
pkey_assert(last_pkey_faults == pkey_faults); \
} while (0)
int test_fds[10] = { -1 };
int nr_test_fds;
void __save_test_fd(int fd)
{
pkey_assert(fd >= 0);
pkey_assert(nr_test_fds < ARRAY_SIZE(test_fds));
test_fds[nr_test_fds] = fd;
nr_test_fds++;
}
int get_test_read_fd(void)
{
int test_fd = open("/etc/passwd", O_RDONLY);
__save_test_fd(test_fd);
return test_fd;
}
void close_test_fds(void)
{
int i;
for (i = 0; i < nr_test_fds; i++) {
if (test_fds[i] < 0)
continue;
close(test_fds[i]);
test_fds[i] = -1;
}
nr_test_fds = 0;
}
#define barrier() __asm__ __volatile__("": : :"memory")
__attribute__((noinline)) int read_ptr(int *ptr)
{
/*
* Keep GCC from optimizing this away somehow
*/
barrier();
return *ptr;
}
void test_pkey_alloc_free_attach_pkey0(int *ptr, u16 pkey)
{
int i, err;
int max_nr_pkey_allocs;
int alloced_pkeys[NR_PKEYS];
int nr_alloced = 0;
long size;
pkey_assert(pkey_last_malloc_record);
size = pkey_last_malloc_record->size;
/*
* This is a bit of a hack. But mprotect() requires
* huge-page-aligned sizes when operating on hugetlbfs.
* So, make sure that we use something that's a multiple
* of a huge page when we can.
*/
if (size >= HPAGE_SIZE)
size = HPAGE_SIZE;
/* allocate every possible key and make sure key-0 never got allocated */
max_nr_pkey_allocs = NR_PKEYS;
for (i = 0; i < max_nr_pkey_allocs; i++) {
int new_pkey = alloc_pkey();
pkey_assert(new_pkey != 0);
if (new_pkey < 0)
break;
alloced_pkeys[nr_alloced++] = new_pkey;
}
/* free all the allocated keys */
for (i = 0; i < nr_alloced; i++) {
int free_ret;
if (!alloced_pkeys[i])
continue;
free_ret = sys_pkey_free(alloced_pkeys[i]);
pkey_assert(!free_ret);
}
/* attach key-0 in various modes */
err = sys_mprotect_pkey(ptr, size, PROT_READ, 0);
pkey_assert(!err);
err = sys_mprotect_pkey(ptr, size, PROT_WRITE, 0);
pkey_assert(!err);
err = sys_mprotect_pkey(ptr, size, PROT_EXEC, 0);
pkey_assert(!err);
err = sys_mprotect_pkey(ptr, size, PROT_READ|PROT_WRITE, 0);
pkey_assert(!err);
err = sys_mprotect_pkey(ptr, size, PROT_READ|PROT_WRITE|PROT_EXEC, 0);
pkey_assert(!err);
}
void test_read_of_write_disabled_region(int *ptr, u16 pkey)
{
int ptr_contents;
dprintf1("disabling write access to PKEY[1], doing read\n");
pkey_write_deny(pkey);
ptr_contents = read_ptr(ptr);
dprintf1("*ptr: %d\n", ptr_contents);
dprintf1("\n");
}
void test_read_of_access_disabled_region(int *ptr, u16 pkey)
{
int ptr_contents;
dprintf1("disabling access to PKEY[%02d], doing read @ %p\n", pkey, ptr);
read_pkey_reg();
pkey_access_deny(pkey);
ptr_contents = read_ptr(ptr);
dprintf1("*ptr: %d\n", ptr_contents);
expected_pkey_fault(pkey);
}
void test_read_of_access_disabled_region_with_page_already_mapped(int *ptr,
u16 pkey)
{
int ptr_contents;
dprintf1("disabling access to PKEY[%02d], doing read @ %p\n",
pkey, ptr);
ptr_contents = read_ptr(ptr);
dprintf1("reading ptr before disabling the read : %d\n",
ptr_contents);
read_pkey_reg();
pkey_access_deny(pkey);
ptr_contents = read_ptr(ptr);
dprintf1("*ptr: %d\n", ptr_contents);
expected_pkey_fault(pkey);
}
void test_write_of_write_disabled_region_with_page_already_mapped(int *ptr,
u16 pkey)
{
*ptr = __LINE__;
dprintf1("disabling write access; after accessing the page, "
"to PKEY[%02d], doing write\n", pkey);
pkey_write_deny(pkey);
*ptr = __LINE__;
expected_pkey_fault(pkey);
}
void test_write_of_write_disabled_region(int *ptr, u16 pkey)
{
dprintf1("disabling write access to PKEY[%02d], doing write\n", pkey);
pkey_write_deny(pkey);
*ptr = __LINE__;
expected_pkey_fault(pkey);
}
void test_write_of_access_disabled_region(int *ptr, u16 pkey)
{
dprintf1("disabling access to PKEY[%02d], doing write\n", pkey);
pkey_access_deny(pkey);
*ptr = __LINE__;
expected_pkey_fault(pkey);
}
void test_write_of_access_disabled_region_with_page_already_mapped(int *ptr,
u16 pkey)
{
*ptr = __LINE__;
dprintf1("disabling access; after accessing the page, "
" to PKEY[%02d], doing write\n", pkey);
pkey_access_deny(pkey);
*ptr = __LINE__;
expected_pkey_fault(pkey);
}
void test_kernel_write_of_access_disabled_region(int *ptr, u16 pkey)
{
int ret;
int test_fd = get_test_read_fd();
dprintf1("disabling access to PKEY[%02d], "
"having kernel read() to buffer\n", pkey);
pkey_access_deny(pkey);
ret = read(test_fd, ptr, 1);
dprintf1("read ret: %d\n", ret);
pkey_assert(ret);
}
void test_kernel_write_of_write_disabled_region(int *ptr, u16 pkey)
{
int ret;
int test_fd = get_test_read_fd();
pkey_write_deny(pkey);
ret = read(test_fd, ptr, 100);
dprintf1("read ret: %d\n", ret);
if (ret < 0 && (DEBUG_LEVEL > 0))
perror("verbose read result (OK for this to be bad)");
pkey_assert(ret);
}
void test_kernel_gup_of_access_disabled_region(int *ptr, u16 pkey)
{
int pipe_ret, vmsplice_ret;
struct iovec iov;
int pipe_fds[2];
pipe_ret = pipe(pipe_fds);
pkey_assert(pipe_ret == 0);
dprintf1("disabling access to PKEY[%02d], "
"having kernel vmsplice from buffer\n", pkey);
pkey_access_deny(pkey);
iov.iov_base = ptr;
iov.iov_len = PAGE_SIZE;
vmsplice_ret = vmsplice(pipe_fds[1], &iov, 1, SPLICE_F_GIFT);
dprintf1("vmsplice() ret: %d\n", vmsplice_ret);
pkey_assert(vmsplice_ret == -1);
close(pipe_fds[0]);
close(pipe_fds[1]);
}
void test_kernel_gup_write_to_write_disabled_region(int *ptr, u16 pkey)
{
int ignored = 0xdada;
int futex_ret;
int some_int = __LINE__;
dprintf1("disabling write to PKEY[%02d], "
"doing futex gunk in buffer\n", pkey);
*ptr = some_int;
pkey_write_deny(pkey);
futex_ret = syscall(SYS_futex, ptr, FUTEX_WAIT, some_int-1, NULL,
&ignored, ignored);
if (DEBUG_LEVEL > 0)
perror("futex");
dprintf1("futex() ret: %d\n", futex_ret);
}
/* Assumes that all pkeys other than 'pkey' are unallocated */
void test_pkey_syscalls_on_non_allocated_pkey(int *ptr, u16 pkey)
{
int err;
int i;
/* Note: 0 is the default pkey, so don't mess with it */
for (i = 1; i < NR_PKEYS; i++) {
if (pkey == i)
continue;
dprintf1("trying get/set/free to non-allocated pkey: %2d\n", i);
err = sys_pkey_free(i);
pkey_assert(err);
err = sys_pkey_free(i);
pkey_assert(err);
err = sys_mprotect_pkey(ptr, PAGE_SIZE, PROT_READ, i);
pkey_assert(err);
}
}
/* Assumes that all pkeys other than 'pkey' are unallocated */
void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
{
int err;
int bad_pkey = NR_PKEYS+99;
/* pass a known-invalid pkey in: */
err = sys_mprotect_pkey(ptr, PAGE_SIZE, PROT_READ, bad_pkey);
pkey_assert(err);
}
void become_child(void)
{
pid_t forkret;
forkret = fork();
pkey_assert(forkret >= 0);
dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
if (!forkret) {
/* in the child */
return;
}
exit(0);
}
/* Assumes that all pkeys other than 'pkey' are unallocated */
void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
{
int err;
int allocated_pkeys[NR_PKEYS] = {0};
int nr_allocated_pkeys = 0;
int i;
for (i = 0; i < NR_PKEYS*3; i++) {
int new_pkey;
dprintf1("%s() alloc loop: %d\n", __func__, i);
new_pkey = alloc_pkey();
dprintf4("%s()::%d, err: %d pkey_reg: 0x%016llx"
" shadow: 0x%016llx\n",
__func__, __LINE__, err, __read_pkey_reg(),
shadow_pkey_reg);
read_pkey_reg(); /* for shadow checking */
dprintf2("%s() errno: %d ENOSPC: %d\n", __func__, errno, ENOSPC);
if ((new_pkey == -1) && (errno == ENOSPC)) {
dprintf2("%s() failed to allocate pkey after %d tries\n",
__func__, nr_allocated_pkeys);
} else {
/*
* Ensure the number of successes never
* exceeds the number of keys supported
* in the hardware.
*/
pkey_assert(nr_allocated_pkeys < NR_PKEYS);
allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
}
/*
* Make sure that allocation state is properly
* preserved across fork().
*/
if (i == NR_PKEYS*2)
become_child();
}
dprintf3("%s()::%d\n", __func__, __LINE__);
/*
* On x86:
* There are 16 pkeys supported in hardware. Three are
* allocated by the time we get here:
* 1. The default key (0)
* 2. One possibly consumed by an execute-only mapping.
* 3. One allocated by the test code and passed in via
* 'pkey' to this function.
* Ensure that we can allocate at least another 13 (16-3).
*
* On powerpc:
* There are either 5, 28, 29 or 32 pkeys supported in
* hardware depending on the page size (4K or 64K) and
* platform (powernv or powervm). Four are allocated by
* the time we get here. These include pkey-0, pkey-1,
* exec-only pkey and the one allocated by the test code.
* Ensure that we can allocate the remaining.
*/
pkey_assert(i >= (NR_PKEYS - get_arch_reserved_keys() - 1));
for (i = 0; i < nr_allocated_pkeys; i++) {
err = sys_pkey_free(allocated_pkeys[i]);
pkey_assert(!err);
read_pkey_reg(); /* for shadow checking */
}
}
void arch_force_pkey_reg_init(void)
{
#if defined(__i386__) || defined(__x86_64__) /* arch */
u64 *buf;
/*
* All keys should be allocated and set to allow reads and
* writes, so the register should be all 0. If not, just
* skip the test.
*/
if (read_pkey_reg())
return;
/*
* Just allocate an absurd about of memory rather than
* doing the XSAVE size enumeration dance.
*/
buf = mmap(NULL, 1*MB, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
/* These __builtins require compiling with -mxsave */
/* XSAVE to build a valid buffer: */
__builtin_ia32_xsave(buf, XSTATE_PKEY);
/* Clear XSTATE_BV[PKRU]: */
buf[XSTATE_BV_OFFSET/sizeof(u64)] &= ~XSTATE_PKEY;
/* XRSTOR will likely get PKRU back to the init state: */
__builtin_ia32_xrstor(buf, XSTATE_PKEY);
munmap(buf, 1*MB);
#endif
}
/*
* This is mostly useless on ppc for now. But it will not
* hurt anything and should give some better coverage as
* a long-running test that continually checks the pkey
* register.
*/
void test_pkey_init_state(int *ptr, u16 pkey)
{
int err;
int allocated_pkeys[NR_PKEYS] = {0};
int nr_allocated_pkeys = 0;
int i;
for (i = 0; i < NR_PKEYS; i++) {
int new_pkey = alloc_pkey();
if (new_pkey < 0)
continue;
allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
}
dprintf3("%s()::%d\n", __func__, __LINE__);
arch_force_pkey_reg_init();
/*
* Loop for a bit, hoping to get exercise the kernel
* context switch code.
*/
for (i = 0; i < 1000000; i++)
read_pkey_reg();
for (i = 0; i < nr_allocated_pkeys; i++) {
err = sys_pkey_free(allocated_pkeys[i]);
pkey_assert(!err);
read_pkey_reg(); /* for shadow checking */
}
}
/*
* pkey 0 is special. It is allocated by default, so you do not
* have to call pkey_alloc() to use it first. Make sure that it
* is usable.
*/
void test_mprotect_with_pkey_0(int *ptr, u16 pkey)
{
long size;
int prot;
assert(pkey_last_malloc_record);
size = pkey_last_malloc_record->size;
/*
* This is a bit of a hack. But mprotect() requires
* huge-page-aligned sizes when operating on hugetlbfs.
* So, make sure that we use something that's a multiple
* of a huge page when we can.
*/
if (size >= HPAGE_SIZE)
size = HPAGE_SIZE;
prot = pkey_last_malloc_record->prot;
/* Use pkey 0 */
mprotect_pkey(ptr, size, prot, 0);
/* Make sure that we can set it back to the original pkey. */
mprotect_pkey(ptr, size, prot, pkey);
}
void test_ptrace_of_child(int *ptr, u16 pkey)
{
__attribute__((__unused__)) int peek_result;
pid_t child_pid;
void *ignored = 0;
long ret;
int status;
/*
* This is the "control" for our little expermient. Make sure
* we can always access it when ptracing.
*/
int *plain_ptr_unaligned = malloc(HPAGE_SIZE);
int *plain_ptr = ALIGN_PTR_UP(plain_ptr_unaligned, PAGE_SIZE);
/*
* Fork a child which is an exact copy of this process, of course.
* That means we can do all of our tests via ptrace() and then plain
* memory access and ensure they work differently.
*/
child_pid = fork_lazy_child();
dprintf1("[%d] child pid: %d\n", getpid(), child_pid);
ret = ptrace(PTRACE_ATTACH, child_pid, ignored, ignored);
if (ret)
perror("attach");
dprintf1("[%d] attach ret: %ld %d\n", getpid(), ret, __LINE__);
pkey_assert(ret != -1);
ret = waitpid(child_pid, &status, WUNTRACED);
if ((ret != child_pid) || !(WIFSTOPPED(status))) {
fprintf(stderr, "weird waitpid result %ld stat %x\n",
ret, status);
pkey_assert(0);
}
dprintf2("waitpid ret: %ld\n", ret);
dprintf2("waitpid status: %d\n", status);
pkey_access_deny(pkey);
pkey_write_deny(pkey);
/* Write access, untested for now:
ret = ptrace(PTRACE_POKEDATA, child_pid, peek_at, data);
pkey_assert(ret != -1);
dprintf1("poke at %p: %ld\n", peek_at, ret);
*/
/*
* Try to access the pkey-protected "ptr" via ptrace:
*/
ret = ptrace(PTRACE_PEEKDATA, child_pid, ptr, ignored);
/* expect it to work, without an error: */
pkey_assert(ret != -1);
/* Now access from the current task, and expect an exception: */
peek_result = read_ptr(ptr);
expected_pkey_fault(pkey);
/*
* Try to access the NON-pkey-protected "plain_ptr" via ptrace:
*/
ret = ptrace(PTRACE_PEEKDATA, child_pid, plain_ptr, ignored);
/* expect it to work, without an error: */
pkey_assert(ret != -1);
/* Now access from the current task, and expect NO exception: */
peek_result = read_ptr(plain_ptr);
do_not_expect_pkey_fault("read plain pointer after ptrace");
ret = ptrace(PTRACE_DETACH, child_pid, ignored, 0);
pkey_assert(ret != -1);
ret = kill(child_pid, SIGKILL);
pkey_assert(ret != -1);
wait(&status);
free(plain_ptr_unaligned);
}
void *get_pointer_to_instructions(void)
{
void *p1;
p1 = ALIGN_PTR_UP(&lots_o_noops_around_write, PAGE_SIZE);
dprintf3("&lots_o_noops: %p\n", &lots_o_noops_around_write);
/* lots_o_noops_around_write should be page-aligned already */
assert(p1 == &lots_o_noops_around_write);
/* Point 'p1' at the *second* page of the function: */
p1 += PAGE_SIZE;
/*
* Try to ensure we fault this in on next touch to ensure
* we get an instruction fault as opposed to a data one
*/
madvise(p1, PAGE_SIZE, MADV_DONTNEED);
return p1;
}
void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
{
void *p1;
int scratch;
int ptr_contents;
int ret;
p1 = get_pointer_to_instructions();
lots_o_noops_around_write(&scratch);
ptr_contents = read_ptr(p1);
dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
ret = mprotect_pkey(p1, PAGE_SIZE, PROT_EXEC, (u64)pkey);
pkey_assert(!ret);
pkey_access_deny(pkey);
dprintf2("pkey_reg: %016llx\n", read_pkey_reg());
/*
* Make sure this is an *instruction* fault
*/
madvise(p1, PAGE_SIZE, MADV_DONTNEED);
lots_o_noops_around_write(&scratch);
do_not_expect_pkey_fault("executing on PROT_EXEC memory");
expect_fault_on_read_execonly_key(p1, pkey);
}
void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey)
{
void *p1;
int scratch;
int ptr_contents;
int ret;
dprintf1("%s() start\n", __func__);
p1 = get_pointer_to_instructions();
lots_o_noops_around_write(&scratch);
ptr_contents = read_ptr(p1);
dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
/* Use a *normal* mprotect(), not mprotect_pkey(): */
ret = mprotect(p1, PAGE_SIZE, PROT_EXEC);
pkey_assert(!ret);
/*
* Reset the shadow, assuming that the above mprotect()
* correctly changed PKRU, but to an unknown value since
* the actual allocated pkey is unknown.
*/
shadow_pkey_reg = __read_pkey_reg();
dprintf2("pkey_reg: %016llx\n", read_pkey_reg());
/* Make sure this is an *instruction* fault */
madvise(p1, PAGE_SIZE, MADV_DONTNEED);
lots_o_noops_around_write(&scratch);
do_not_expect_pkey_fault("executing on PROT_EXEC memory");
expect_fault_on_read_execonly_key(p1, UNKNOWN_PKEY);
/*
* Put the memory back to non-PROT_EXEC. Should clear the
* exec-only pkey off the VMA and allow it to be readable
* again. Go to PROT_NONE first to check for a kernel bug
* that did not clear the pkey when doing PROT_NONE.
*/
ret = mprotect(p1, PAGE_SIZE, PROT_NONE);
pkey_assert(!ret);
ret = mprotect(p1, PAGE_SIZE, PROT_READ|PROT_EXEC);
pkey_assert(!ret);
ptr_contents = read_ptr(p1);
do_not_expect_pkey_fault("plain read on recently PROT_EXEC area");
}
#if defined(__i386__) || defined(__x86_64__)
void test_ptrace_modifies_pkru(int *ptr, u16 pkey)
{
u32 new_pkru;
pid_t child;
int status, ret;
int pkey_offset = pkey_reg_xstate_offset();
size_t xsave_size = cpu_max_xsave_size();
void *xsave;
u32 *pkey_register;
u64 *xstate_bv;
struct iovec iov;
new_pkru = ~read_pkey_reg();
/* Don't make PROT_EXEC mappings inaccessible */
new_pkru &= ~3;
child = fork();
pkey_assert(child >= 0);
dprintf3("[%d] fork() ret: %d\n", getpid(), child);
if (!child) {
ptrace(PTRACE_TRACEME, 0, 0, 0);
/* Stop and allow the tracer to modify PKRU directly */
raise(SIGSTOP);
/*
* need __read_pkey_reg() version so we do not do shadow_pkey_reg
* checking
*/
if (__read_pkey_reg() != new_pkru)
exit(1);
/* Stop and allow the tracer to clear XSTATE_BV for PKRU */
raise(SIGSTOP);
if (__read_pkey_reg() != 0)
exit(1);
/* Stop and allow the tracer to examine PKRU */
raise(SIGSTOP);
exit(0);
}
pkey_assert(child == waitpid(child, &status, 0));
dprintf3("[%d] waitpid(%d) status: %x\n", getpid(), child, status);
pkey_assert(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
xsave = (void *)malloc(xsave_size);
pkey_assert(xsave > 0);
/* Modify the PKRU register directly */
iov.iov_base = xsave;
iov.iov_len = xsave_size;
ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
pkey_assert(ret == 0);
pkey_register = (u32 *)(xsave + pkey_offset);
pkey_assert(*pkey_register == read_pkey_reg());
*pkey_register = new_pkru;
ret = ptrace(PTRACE_SETREGSET, child, (void *)NT_X86_XSTATE, &iov);
pkey_assert(ret == 0);
/* Test that the modification is visible in ptrace before any execution */
memset(xsave, 0xCC, xsave_size);
ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
pkey_assert(ret == 0);
pkey_assert(*pkey_register == new_pkru);
/* Execute the tracee */
ret = ptrace(PTRACE_CONT, child, 0, 0);
pkey_assert(ret == 0);
/* Test that the tracee saw the PKRU value change */
pkey_assert(child == waitpid(child, &status, 0));
dprintf3("[%d] waitpid(%d) status: %x\n", getpid(), child, status);
pkey_assert(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
/* Test that the modification is visible in ptrace after execution */
memset(xsave, 0xCC, xsave_size);
ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
pkey_assert(ret == 0);
pkey_assert(*pkey_register == new_pkru);
/* Clear the PKRU bit from XSTATE_BV */
xstate_bv = (u64 *)(xsave + 512);
*xstate_bv &= ~(1 << 9);
ret = ptrace(PTRACE_SETREGSET, child, (void *)NT_X86_XSTATE, &iov);
pkey_assert(ret == 0);
/* Test that the modification is visible in ptrace before any execution */
memset(xsave, 0xCC, xsave_size);
ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
pkey_assert(ret == 0);
pkey_assert(*pkey_register == 0);
ret = ptrace(PTRACE_CONT, child, 0, 0);
pkey_assert(ret == 0);
/* Test that the tracee saw the PKRU value go to 0 */
pkey_assert(child == waitpid(child, &status, 0));
dprintf3("[%d] waitpid(%d) status: %x\n", getpid(), child, status);
pkey_assert(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
/* Test that the modification is visible in ptrace after execution */
memset(xsave, 0xCC, xsave_size);
ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
pkey_assert(ret == 0);
pkey_assert(*pkey_register == 0);
ret = ptrace(PTRACE_CONT, child, 0, 0);
pkey_assert(ret == 0);
pkey_assert(child == waitpid(child, &status, 0));
dprintf3("[%d] waitpid(%d) status: %x\n", getpid(), child, status);
pkey_assert(WIFEXITED(status));
pkey_assert(WEXITSTATUS(status) == 0);
free(xsave);
}
#endif
void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey)
{
int size = PAGE_SIZE;
int sret;
if (cpu_has_pkeys()) {
dprintf1("SKIP: %s: no CPU support\n", __func__);
return;
}
sret = syscall(__NR_pkey_mprotect, ptr, size, PROT_READ, pkey);
pkey_assert(sret < 0);
}
void (*pkey_tests[])(int *ptr, u16 pkey) = {
test_read_of_write_disabled_region,
test_read_of_access_disabled_region,
test_read_of_access_disabled_region_with_page_already_mapped,
test_write_of_write_disabled_region,
test_write_of_write_disabled_region_with_page_already_mapped,
test_write_of_access_disabled_region,
test_write_of_access_disabled_region_with_page_already_mapped,
test_kernel_write_of_access_disabled_region,
test_kernel_write_of_write_disabled_region,
test_kernel_gup_of_access_disabled_region,
test_kernel_gup_write_to_write_disabled_region,
test_executing_on_unreadable_memory,
test_implicit_mprotect_exec_only_memory,
test_mprotect_with_pkey_0,
test_ptrace_of_child,
test_pkey_init_state,
test_pkey_syscalls_on_non_allocated_pkey,
test_pkey_syscalls_bad_args,
test_pkey_alloc_exhaust,
test_pkey_alloc_free_attach_pkey0,
#if defined(__i386__) || defined(__x86_64__)
test_ptrace_modifies_pkru,
#endif
};
void run_tests_once(void)
{
int *ptr;
int prot = PROT_READ|PROT_WRITE;
for (test_nr = 0; test_nr < ARRAY_SIZE(pkey_tests); test_nr++) {
int pkey;
int orig_pkey_faults = pkey_faults;
dprintf1("======================\n");
dprintf1("test %d preparing...\n", test_nr);
tracing_on();
pkey = alloc_random_pkey();
dprintf1("test %d starting with pkey: %d\n", test_nr, pkey);
ptr = malloc_pkey(PAGE_SIZE, prot, pkey);
dprintf1("test %d starting...\n", test_nr);
pkey_tests[test_nr](ptr, pkey);
dprintf1("freeing test memory: %p\n", ptr);
free_pkey_malloc(ptr);
sys_pkey_free(pkey);
dprintf1("pkey_faults: %d\n", pkey_faults);
dprintf1("orig_pkey_faults: %d\n", orig_pkey_faults);
tracing_off();
close_test_fds();
printf("test %2d PASSED (iteration %d)\n", test_nr, iteration_nr);
dprintf1("======================\n\n");
}
iteration_nr++;
}
void pkey_setup_shadow(void)
{
shadow_pkey_reg = __read_pkey_reg();
}
int main(void)
{
int nr_iterations = 22;
int pkeys_supported = is_pkeys_supported();
srand((unsigned int)time(NULL));
setup_handlers();
printf("has pkeys: %d\n", pkeys_supported);
if (!pkeys_supported) {
int size = PAGE_SIZE;
int *ptr;
printf("running PKEY tests for unsupported CPU/OS\n");
ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
assert(ptr != (void *)-1);
test_mprotect_pkey_on_unsupported_cpu(ptr, 1);
exit(0);
}
pkey_setup_shadow();
printf("startup pkey_reg: %016llx\n", read_pkey_reg());
setup_hugetlbfs();
while (nr_iterations-- > 0)
run_tests_once();
printf("done (all tests OK)\n");
return 0;
}
| linux-master | tools/testing/selftests/mm/protection_keys.c |
// SPDX-License-Identifier: GPL-2.0
/* Test selecting other page sizes for mmap/shmget.
Before running this huge pages for each huge page size must have been
reserved.
For large pages beyond MAX_ORDER (like 1GB on x86) boot options must be used.
Also shmmax must be increased.
And you need to run as root to work around some weird permissions in shm.
And nothing using huge pages should run in parallel.
When the program aborts you may need to clean up the shm segments with
ipcrm -m by hand, like this
sudo ipcs | awk '$1 == "0x00000000" {print $2}' | xargs -n1 sudo ipcrm -m
(warning this will remove all if someone else uses them) */
#define _GNU_SOURCE 1
#include <sys/mman.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/stat.h>
#include <glob.h>
#include <assert.h>
#include <unistd.h>
#include <stdarg.h>
#include <string.h>
#include "vm_util.h"
#define err(x) perror(x), exit(1)
#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT)
#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT)
#define MAP_HUGE_SHIFT 26
#define MAP_HUGE_MASK 0x3f
#if !defined(MAP_HUGETLB)
#define MAP_HUGETLB 0x40000
#endif
#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */
#define SHM_HUGE_SHIFT 26
#define SHM_HUGE_MASK 0x3f
#define SHM_HUGE_2MB (21 << SHM_HUGE_SHIFT)
#define SHM_HUGE_1GB (30 << SHM_HUGE_SHIFT)
#define NUM_PAGESIZES 5
#define NUM_PAGES 4
#define Dprintf(fmt...) // printf(fmt)
unsigned long page_sizes[NUM_PAGESIZES];
int num_page_sizes;
int ilog2(unsigned long v)
{
int l = 0;
while ((1UL << l) < v)
l++;
return l;
}
void find_pagesizes(void)
{
glob_t g;
int i;
glob("/sys/kernel/mm/hugepages/hugepages-*kB", 0, NULL, &g);
assert(g.gl_pathc <= NUM_PAGESIZES);
for (i = 0; i < g.gl_pathc; i++) {
sscanf(g.gl_pathv[i], "/sys/kernel/mm/hugepages/hugepages-%lukB",
&page_sizes[i]);
page_sizes[i] <<= 10;
printf("Found %luMB\n", page_sizes[i] >> 20);
}
num_page_sizes = g.gl_pathc;
globfree(&g);
}
void show(unsigned long ps)
{
char buf[100];
if (ps == getpagesize())
return;
printf("%luMB: ", ps >> 20);
fflush(stdout);
snprintf(buf, sizeof buf,
"cat /sys/kernel/mm/hugepages/hugepages-%lukB/free_hugepages",
ps >> 10);
system(buf);
}
unsigned long read_sysfs(int warn, char *fmt, ...)
{
char *line = NULL;
size_t linelen = 0;
char buf[100];
FILE *f;
va_list ap;
unsigned long val = 0;
va_start(ap, fmt);
vsnprintf(buf, sizeof buf, fmt, ap);
va_end(ap);
f = fopen(buf, "r");
if (!f) {
if (warn)
printf("missing %s\n", buf);
return 0;
}
if (getline(&line, &linelen, f) > 0) {
sscanf(line, "%lu", &val);
}
fclose(f);
free(line);
return val;
}
unsigned long read_free(unsigned long ps)
{
return read_sysfs(ps != getpagesize(),
"/sys/kernel/mm/hugepages/hugepages-%lukB/free_hugepages",
ps >> 10);
}
void test_mmap(unsigned long size, unsigned flags)
{
char *map;
unsigned long before, after;
int err;
before = read_free(size);
map = mmap(NULL, size*NUM_PAGES, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB|flags, -1, 0);
if (map == (char *)-1) err("mmap");
memset(map, 0xff, size*NUM_PAGES);
after = read_free(size);
Dprintf("before %lu after %lu diff %ld size %lu\n",
before, after, before - after, size);
assert(size == getpagesize() || (before - after) == NUM_PAGES);
show(size);
err = munmap(map, size * NUM_PAGES);
assert(!err);
}
void test_shmget(unsigned long size, unsigned flags)
{
int id;
unsigned long before, after;
int err;
before = read_free(size);
id = shmget(IPC_PRIVATE, size * NUM_PAGES, IPC_CREAT|0600|flags);
if (id < 0) err("shmget");
struct shm_info i;
if (shmctl(id, SHM_INFO, (void *)&i) < 0) err("shmctl");
Dprintf("alloc %lu res %lu\n", i.shm_tot, i.shm_rss);
Dprintf("id %d\n", id);
char *map = shmat(id, NULL, 0600);
if (map == (char*)-1) err("shmat");
shmctl(id, IPC_RMID, NULL);
memset(map, 0xff, size*NUM_PAGES);
after = read_free(size);
Dprintf("before %lu after %lu diff %ld size %lu\n",
before, after, before - after, size);
assert(size == getpagesize() || (before - after) == NUM_PAGES);
show(size);
err = shmdt(map);
assert(!err);
}
void sanity_checks(void)
{
int i;
unsigned long largest = getpagesize();
for (i = 0; i < num_page_sizes; i++) {
if (page_sizes[i] > largest)
largest = page_sizes[i];
if (read_free(page_sizes[i]) < NUM_PAGES) {
printf("Not enough huge pages for page size %lu MB, need %u\n",
page_sizes[i] >> 20,
NUM_PAGES);
exit(0);
}
}
if (read_sysfs(0, "/proc/sys/kernel/shmmax") < NUM_PAGES * largest) {
printf("Please do echo %lu > /proc/sys/kernel/shmmax", largest * NUM_PAGES);
exit(0);
}
#if defined(__x86_64__)
if (largest != 1U<<30) {
printf("No GB pages available on x86-64\n"
"Please boot with hugepagesz=1G hugepages=%d\n", NUM_PAGES);
exit(0);
}
#endif
}
int main(void)
{
int i;
unsigned default_hps = default_huge_page_size();
find_pagesizes();
sanity_checks();
for (i = 0; i < num_page_sizes; i++) {
unsigned long ps = page_sizes[i];
int arg = ilog2(ps) << MAP_HUGE_SHIFT;
printf("Testing %luMB mmap with shift %x\n", ps >> 20, arg);
test_mmap(ps, MAP_HUGETLB | arg);
}
printf("Testing default huge mmap\n");
test_mmap(default_hps, MAP_HUGETLB);
puts("Testing non-huge shmget");
test_shmget(getpagesize(), 0);
for (i = 0; i < num_page_sizes; i++) {
unsigned long ps = page_sizes[i];
int arg = ilog2(ps) << SHM_HUGE_SHIFT;
printf("Testing %luMB shmget with shift %x\n", ps >> 20, arg);
test_shmget(ps, SHM_HUGETLB | arg);
}
puts("default huge shmget");
test_shmget(default_hps, SHM_HUGETLB);
return 0;
}
| linux-master | tools/testing/selftests/mm/thuge-gen.c |
// SPDX-License-Identifier: GPL-2.0
/*
* The main purpose of the tests here is to exercise the migration entry code
* paths in the kernel.
*/
#include "../kselftest_harness.h"
#include <strings.h>
#include <pthread.h>
#include <numa.h>
#include <numaif.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <sys/types.h>
#include <signal.h>
#include <time.h>
#define TWOMEG (2<<20)
#define RUNTIME (20)
#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
FIXTURE(migration)
{
pthread_t *threads;
pid_t *pids;
int nthreads;
int n1;
int n2;
};
FIXTURE_SETUP(migration)
{
int n;
ASSERT_EQ(numa_available(), 0);
self->nthreads = numa_num_task_cpus() - 1;
self->n1 = -1;
self->n2 = -1;
for (n = 0; n < numa_max_possible_node(); n++)
if (numa_bitmask_isbitset(numa_all_nodes_ptr, n)) {
if (self->n1 == -1) {
self->n1 = n;
} else {
self->n2 = n;
break;
}
}
self->threads = malloc(self->nthreads * sizeof(*self->threads));
ASSERT_NE(self->threads, NULL);
self->pids = malloc(self->nthreads * sizeof(*self->pids));
ASSERT_NE(self->pids, NULL);
};
FIXTURE_TEARDOWN(migration)
{
free(self->threads);
free(self->pids);
}
int migrate(uint64_t *ptr, int n1, int n2)
{
int ret, tmp;
int status = 0;
struct timespec ts1, ts2;
if (clock_gettime(CLOCK_MONOTONIC, &ts1))
return -1;
while (1) {
if (clock_gettime(CLOCK_MONOTONIC, &ts2))
return -1;
if (ts2.tv_sec - ts1.tv_sec >= RUNTIME)
return 0;
ret = move_pages(0, 1, (void **) &ptr, &n2, &status,
MPOL_MF_MOVE_ALL);
if (ret) {
if (ret > 0)
printf("Didn't migrate %d pages\n", ret);
else
perror("Couldn't migrate pages");
return -2;
}
tmp = n2;
n2 = n1;
n1 = tmp;
}
return 0;
}
void *access_mem(void *ptr)
{
volatile uint64_t y = 0;
volatile uint64_t *x = ptr;
while (1) {
pthread_testcancel();
y += *x;
/* Prevent the compiler from optimizing out the writes to y: */
asm volatile("" : "+r" (y));
}
return NULL;
}
/*
* Basic migration entry testing. One thread will move pages back and forth
* between nodes whilst other threads try and access them triggering the
* migration entry wait paths in the kernel.
*/
TEST_F_TIMEOUT(migration, private_anon, 2*RUNTIME)
{
uint64_t *ptr;
int i;
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(ptr, MAP_FAILED);
memset(ptr, 0xde, TWOMEG);
for (i = 0; i < self->nthreads - 1; i++)
if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
perror("Couldn't create thread");
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
for (i = 0; i < self->nthreads - 1; i++)
ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
}
/*
* Same as the previous test but with shared memory.
*/
TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME)
{
pid_t pid;
uint64_t *ptr;
int i;
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(ptr, MAP_FAILED);
memset(ptr, 0xde, TWOMEG);
for (i = 0; i < self->nthreads - 1; i++) {
pid = fork();
if (!pid) {
prctl(PR_SET_PDEATHSIG, SIGHUP);
/* Parent may have died before prctl so check now. */
if (getppid() == 1)
kill(getpid(), SIGHUP);
access_mem(ptr);
} else {
self->pids[i] = pid;
}
}
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
for (i = 0; i < self->nthreads - 1; i++)
ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
}
/*
* Tests the pmd migration entry paths.
*/
TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
{
uint64_t *ptr;
int i;
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
ptr = mmap(NULL, 2*TWOMEG, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(ptr, MAP_FAILED);
ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
memset(ptr, 0xde, TWOMEG);
for (i = 0; i < self->nthreads - 1; i++)
if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
perror("Couldn't create thread");
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
for (i = 0; i < self->nthreads - 1; i++)
ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/mm/migration.c |
/*
* Copyright (c) 2019 Alexey Dobriyan <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Test that open(O_TMPFILE), linkat() doesn't screw accounting. */
#include <errno.h>
#include <sched.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mount.h>
#include <unistd.h>
int main(void)
{
int fd;
if (unshare(CLONE_NEWNS) == -1) {
if (errno == ENOSYS || errno == EPERM) {
fprintf(stderr, "error: unshare, errno %d\n", errno);
return 4;
}
fprintf(stderr, "error: unshare, errno %d\n", errno);
return 1;
}
if (mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL) == -1) {
fprintf(stderr, "error: mount '/', errno %d\n", errno);
return 1;
}
/* Our heroes: 1 root inode, 1 O_TMPFILE inode, 1 permanent inode. */
if (mount(NULL, "/tmp", "tmpfs", 0, "nr_inodes=3") == -1) {
fprintf(stderr, "error: mount tmpfs, errno %d\n", errno);
return 1;
}
fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_TMPFILE, 0600);
if (fd == -1) {
fprintf(stderr, "error: open 1, errno %d\n", errno);
return 1;
}
if (linkat(fd, "", AT_FDCWD, "/tmp/1", AT_EMPTY_PATH) == -1) {
fprintf(stderr, "error: linkat, errno %d\n", errno);
return 1;
}
close(fd);
fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_TMPFILE, 0600);
if (fd == -1) {
fprintf(stderr, "error: open 2, errno %d\n", errno);
return 1;
}
return 0;
}
| linux-master | tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <linux/types.h>
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <syscall.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include "pidfd.h"
#include "../clone3/clone3_selftests.h"
#include "../kselftest_harness.h"
enum {
PIDFD_NS_USER,
PIDFD_NS_MNT,
PIDFD_NS_PID,
PIDFD_NS_UTS,
PIDFD_NS_IPC,
PIDFD_NS_NET,
PIDFD_NS_CGROUP,
PIDFD_NS_PIDCLD,
PIDFD_NS_TIME,
PIDFD_NS_MAX
};
const struct ns_info {
const char *name;
int flag;
} ns_info[] = {
[PIDFD_NS_USER] = { "user", CLONE_NEWUSER, },
[PIDFD_NS_MNT] = { "mnt", CLONE_NEWNS, },
[PIDFD_NS_PID] = { "pid", CLONE_NEWPID, },
[PIDFD_NS_UTS] = { "uts", CLONE_NEWUTS, },
[PIDFD_NS_IPC] = { "ipc", CLONE_NEWIPC, },
[PIDFD_NS_NET] = { "net", CLONE_NEWNET, },
[PIDFD_NS_CGROUP] = { "cgroup", CLONE_NEWCGROUP, },
[PIDFD_NS_PIDCLD] = { "pid_for_children", 0, },
[PIDFD_NS_TIME] = { "time", CLONE_NEWTIME, },
};
FIXTURE(current_nsset)
{
pid_t pid;
int pidfd;
int nsfds[PIDFD_NS_MAX];
pid_t child_pid_exited;
int child_pidfd_exited;
pid_t child_pid1;
int child_pidfd1;
int child_nsfds1[PIDFD_NS_MAX];
pid_t child_pid2;
int child_pidfd2;
int child_nsfds2[PIDFD_NS_MAX];
};
static int sys_waitid(int which, pid_t pid, int options)
{
return syscall(__NR_waitid, which, pid, NULL, options, NULL);
}
pid_t create_child(int *pidfd, unsigned flags)
{
struct __clone_args args = {
.flags = CLONE_PIDFD | flags,
.exit_signal = SIGCHLD,
.pidfd = ptr_to_u64(pidfd),
};
return sys_clone3(&args, sizeof(struct clone_args));
}
static bool switch_timens(void)
{
int fd, ret;
if (unshare(CLONE_NEWTIME))
return false;
fd = open("/proc/self/ns/time_for_children", O_RDONLY | O_CLOEXEC);
if (fd < 0)
return false;
ret = setns(fd, CLONE_NEWTIME);
close(fd);
return ret == 0;
}
static ssize_t read_nointr(int fd, void *buf, size_t count)
{
ssize_t ret;
do {
ret = read(fd, buf, count);
} while (ret < 0 && errno == EINTR);
return ret;
}
static ssize_t write_nointr(int fd, const void *buf, size_t count)
{
ssize_t ret;
do {
ret = write(fd, buf, count);
} while (ret < 0 && errno == EINTR);
return ret;
}
FIXTURE_SETUP(current_nsset)
{
int i, proc_fd, ret;
int ipc_sockets[2];
char c;
for (i = 0; i < PIDFD_NS_MAX; i++) {
self->nsfds[i] = -EBADF;
self->child_nsfds1[i] = -EBADF;
self->child_nsfds2[i] = -EBADF;
}
proc_fd = open("/proc/self/ns", O_DIRECTORY | O_CLOEXEC);
ASSERT_GE(proc_fd, 0) {
TH_LOG("%m - Failed to open /proc/self/ns");
}
self->pid = getpid();
for (i = 0; i < PIDFD_NS_MAX; i++) {
const struct ns_info *info = &ns_info[i];
self->nsfds[i] = openat(proc_fd, info->name, O_RDONLY | O_CLOEXEC);
if (self->nsfds[i] < 0) {
EXPECT_EQ(errno, ENOENT) {
TH_LOG("%m - Failed to open %s namespace for process %d",
info->name, self->pid);
}
}
}
self->pidfd = sys_pidfd_open(self->pid, 0);
EXPECT_GT(self->pidfd, 0) {
TH_LOG("%m - Failed to open pidfd for process %d", self->pid);
}
/* Create task that exits right away. */
self->child_pid_exited = create_child(&self->child_pidfd_exited,
CLONE_NEWUSER | CLONE_NEWNET);
EXPECT_GT(self->child_pid_exited, 0);
if (self->child_pid_exited == 0)
_exit(EXIT_SUCCESS);
ASSERT_EQ(sys_waitid(P_PID, self->child_pid_exited, WEXITED | WNOWAIT), 0);
self->pidfd = sys_pidfd_open(self->pid, 0);
EXPECT_GE(self->pidfd, 0) {
TH_LOG("%m - Failed to open pidfd for process %d", self->pid);
}
ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
EXPECT_EQ(ret, 0);
/* Create tasks that will be stopped. */
self->child_pid1 = create_child(&self->child_pidfd1,
CLONE_NEWUSER | CLONE_NEWNS |
CLONE_NEWCGROUP | CLONE_NEWIPC |
CLONE_NEWUTS | CLONE_NEWPID |
CLONE_NEWNET);
EXPECT_GE(self->child_pid1, 0);
if (self->child_pid1 == 0) {
close(ipc_sockets[0]);
if (!switch_timens())
_exit(EXIT_FAILURE);
if (write_nointr(ipc_sockets[1], "1", 1) < 0)
_exit(EXIT_FAILURE);
close(ipc_sockets[1]);
pause();
_exit(EXIT_SUCCESS);
}
close(ipc_sockets[1]);
ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
close(ipc_sockets[0]);
ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
EXPECT_EQ(ret, 0);
self->child_pid2 = create_child(&self->child_pidfd2,
CLONE_NEWUSER | CLONE_NEWNS |
CLONE_NEWCGROUP | CLONE_NEWIPC |
CLONE_NEWUTS | CLONE_NEWPID |
CLONE_NEWNET);
EXPECT_GE(self->child_pid2, 0);
if (self->child_pid2 == 0) {
close(ipc_sockets[0]);
if (!switch_timens())
_exit(EXIT_FAILURE);
if (write_nointr(ipc_sockets[1], "1", 1) < 0)
_exit(EXIT_FAILURE);
close(ipc_sockets[1]);
pause();
_exit(EXIT_SUCCESS);
}
close(ipc_sockets[1]);
ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
close(ipc_sockets[0]);
for (i = 0; i < PIDFD_NS_MAX; i++) {
char p[100];
const struct ns_info *info = &ns_info[i];
self->nsfds[i] = openat(proc_fd, info->name, O_RDONLY | O_CLOEXEC);
if (self->nsfds[i] < 0) {
EXPECT_EQ(errno, ENOENT) {
TH_LOG("%m - Failed to open %s namespace for process %d",
info->name, self->pid);
}
}
ret = snprintf(p, sizeof(p), "/proc/%d/ns/%s",
self->child_pid1, info->name);
EXPECT_GT(ret, 0);
EXPECT_LT(ret, sizeof(p));
self->child_nsfds1[i] = open(p, O_RDONLY | O_CLOEXEC);
if (self->child_nsfds1[i] < 0) {
EXPECT_EQ(errno, ENOENT) {
TH_LOG("%m - Failed to open %s namespace for process %d",
info->name, self->child_pid1);
}
}
ret = snprintf(p, sizeof(p), "/proc/%d/ns/%s",
self->child_pid2, info->name);
EXPECT_GT(ret, 0);
EXPECT_LT(ret, sizeof(p));
self->child_nsfds2[i] = open(p, O_RDONLY | O_CLOEXEC);
if (self->child_nsfds2[i] < 0) {
EXPECT_EQ(errno, ENOENT) {
TH_LOG("%m - Failed to open %s namespace for process %d",
info->name, self->child_pid1);
}
}
}
close(proc_fd);
}
FIXTURE_TEARDOWN(current_nsset)
{
int i;
ASSERT_EQ(sys_pidfd_send_signal(self->child_pidfd1,
SIGKILL, NULL, 0), 0);
ASSERT_EQ(sys_pidfd_send_signal(self->child_pidfd2,
SIGKILL, NULL, 0), 0);
for (i = 0; i < PIDFD_NS_MAX; i++) {
if (self->nsfds[i] >= 0)
close(self->nsfds[i]);
if (self->child_nsfds1[i] >= 0)
close(self->child_nsfds1[i]);
if (self->child_nsfds2[i] >= 0)
close(self->child_nsfds2[i]);
}
if (self->child_pidfd1 >= 0)
EXPECT_EQ(0, close(self->child_pidfd1));
if (self->child_pidfd2 >= 0)
EXPECT_EQ(0, close(self->child_pidfd2));
ASSERT_EQ(sys_waitid(P_PID, self->child_pid_exited, WEXITED), 0);
ASSERT_EQ(sys_waitid(P_PID, self->child_pid1, WEXITED), 0);
ASSERT_EQ(sys_waitid(P_PID, self->child_pid2, WEXITED), 0);
}
static int preserve_ns(const int pid, const char *ns)
{
int ret;
char path[50];
ret = snprintf(path, sizeof(path), "/proc/%d/ns/%s", pid, ns);
if (ret < 0 || (size_t)ret >= sizeof(path))
return -EIO;
return open(path, O_RDONLY | O_CLOEXEC);
}
static int in_same_namespace(int ns_fd1, pid_t pid2, const char *ns)
{
int ns_fd2 = -EBADF;
int ret = -1;
struct stat ns_st1, ns_st2;
ret = fstat(ns_fd1, &ns_st1);
if (ret < 0)
return -1;
ns_fd2 = preserve_ns(pid2, ns);
if (ns_fd2 < 0)
return -1;
ret = fstat(ns_fd2, &ns_st2);
close(ns_fd2);
if (ret < 0)
return -1;
/* processes are in the same namespace */
if ((ns_st1.st_dev == ns_st2.st_dev) &&
(ns_st1.st_ino == ns_st2.st_ino))
return 1;
/* processes are in different namespaces */
return 0;
}
/* Test that we can't pass garbage to the kernel. */
TEST_F(current_nsset, invalid_flags)
{
ASSERT_NE(setns(self->pidfd, 0), 0);
EXPECT_EQ(errno, EINVAL);
ASSERT_NE(setns(self->pidfd, -1), 0);
EXPECT_EQ(errno, EINVAL);
ASSERT_NE(setns(self->pidfd, CLONE_VM), 0);
EXPECT_EQ(errno, EINVAL);
ASSERT_NE(setns(self->pidfd, CLONE_NEWUSER | CLONE_VM), 0);
EXPECT_EQ(errno, EINVAL);
}
/* Test that we can't attach to a task that has already exited. */
TEST_F(current_nsset, pidfd_exited_child)
{
int i;
pid_t pid;
ASSERT_NE(setns(self->child_pidfd_exited, CLONE_NEWUSER | CLONE_NEWNET),
0);
EXPECT_EQ(errno, ESRCH);
pid = getpid();
for (i = 0; i < PIDFD_NS_MAX; i++) {
const struct ns_info *info = &ns_info[i];
/* Verify that we haven't changed any namespaces. */
if (self->nsfds[i] >= 0)
ASSERT_EQ(in_same_namespace(self->nsfds[i], pid, info->name), 1);
}
}
TEST_F(current_nsset, pidfd_incremental_setns)
{
int i;
pid_t pid;
pid = getpid();
for (i = 0; i < PIDFD_NS_MAX; i++) {
const struct ns_info *info = &ns_info[i];
int nsfd;
if (self->child_nsfds1[i] < 0)
continue;
if (info->flag) {
ASSERT_EQ(setns(self->child_pidfd1, info->flag), 0) {
TH_LOG("%m - Failed to setns to %s namespace of %d via pidfd %d",
info->name, self->child_pid1,
self->child_pidfd1);
}
}
/* Verify that we have changed to the correct namespaces. */
if (info->flag == CLONE_NEWPID)
nsfd = self->nsfds[i];
else
nsfd = self->child_nsfds1[i];
ASSERT_EQ(in_same_namespace(nsfd, pid, info->name), 1) {
TH_LOG("setns failed to place us correctly into %s namespace of %d via pidfd %d",
info->name, self->child_pid1,
self->child_pidfd1);
}
TH_LOG("Managed to correctly setns to %s namespace of %d via pidfd %d",
info->name, self->child_pid1, self->child_pidfd1);
}
}
TEST_F(current_nsset, nsfd_incremental_setns)
{
int i;
pid_t pid;
pid = getpid();
for (i = 0; i < PIDFD_NS_MAX; i++) {
const struct ns_info *info = &ns_info[i];
int nsfd;
if (self->child_nsfds1[i] < 0)
continue;
if (info->flag) {
ASSERT_EQ(setns(self->child_nsfds1[i], info->flag), 0) {
TH_LOG("%m - Failed to setns to %s namespace of %d via nsfd %d",
info->name, self->child_pid1,
self->child_nsfds1[i]);
}
}
/* Verify that we have changed to the correct namespaces. */
if (info->flag == CLONE_NEWPID)
nsfd = self->nsfds[i];
else
nsfd = self->child_nsfds1[i];
ASSERT_EQ(in_same_namespace(nsfd, pid, info->name), 1) {
TH_LOG("setns failed to place us correctly into %s namespace of %d via nsfd %d",
info->name, self->child_pid1,
self->child_nsfds1[i]);
}
TH_LOG("Managed to correctly setns to %s namespace of %d via nsfd %d",
info->name, self->child_pid1, self->child_nsfds1[i]);
}
}
TEST_F(current_nsset, pidfd_one_shot_setns)
{
unsigned flags = 0;
int i;
pid_t pid;
for (i = 0; i < PIDFD_NS_MAX; i++) {
const struct ns_info *info = &ns_info[i];
if (self->child_nsfds1[i] < 0)
continue;
flags |= info->flag;
TH_LOG("Adding %s namespace of %d to list of namespaces to attach to",
info->name, self->child_pid1);
}
ASSERT_EQ(setns(self->child_pidfd1, flags), 0) {
TH_LOG("%m - Failed to setns to namespaces of %d",
self->child_pid1);
}
pid = getpid();
for (i = 0; i < PIDFD_NS_MAX; i++) {
const struct ns_info *info = &ns_info[i];
int nsfd;
if (self->child_nsfds1[i] < 0)
continue;
/* Verify that we have changed to the correct namespaces. */
if (info->flag == CLONE_NEWPID)
nsfd = self->nsfds[i];
else
nsfd = self->child_nsfds1[i];
ASSERT_EQ(in_same_namespace(nsfd, pid, info->name), 1) {
TH_LOG("setns failed to place us correctly into %s namespace of %d",
info->name, self->child_pid1);
}
TH_LOG("Managed to correctly setns to %s namespace of %d",
info->name, self->child_pid1);
}
}
TEST_F(current_nsset, no_foul_play)
{
unsigned flags = 0;
int i;
for (i = 0; i < PIDFD_NS_MAX; i++) {
const struct ns_info *info = &ns_info[i];
if (self->child_nsfds1[i] < 0)
continue;
flags |= info->flag;
if (info->flag) /* No use logging pid_for_children. */
TH_LOG("Adding %s namespace of %d to list of namespaces to attach to",
info->name, self->child_pid1);
}
ASSERT_EQ(setns(self->child_pidfd1, flags), 0) {
TH_LOG("%m - Failed to setns to namespaces of %d vid pidfd %d",
self->child_pid1, self->child_pidfd1);
}
/*
* Can't setns to a user namespace outside of our hierarchy since we
* don't have caps in there and didn't create it. That means that under
* no circumstances should we be able to setns to any of the other
* ones since they aren't owned by our user namespace.
*/
for (i = 0; i < PIDFD_NS_MAX; i++) {
const struct ns_info *info = &ns_info[i];
if (self->child_nsfds2[i] < 0 || !info->flag)
continue;
ASSERT_NE(setns(self->child_pidfd2, info->flag), 0) {
TH_LOG("Managed to setns to %s namespace of %d via pidfd %d",
info->name, self->child_pid2,
self->child_pidfd2);
}
TH_LOG("%m - Correctly failed to setns to %s namespace of %d via pidfd %d",
info->name, self->child_pid2,
self->child_pidfd2);
ASSERT_NE(setns(self->child_nsfds2[i], info->flag), 0) {
TH_LOG("Managed to setns to %s namespace of %d via nsfd %d",
info->name, self->child_pid2,
self->child_nsfds2[i]);
}
TH_LOG("%m - Correctly failed to setns to %s namespace of %d via nsfd %d",
info->name, self->child_pid2,
self->child_nsfds2[i]);
}
}
TEST(setns_einval)
{
int fd;
fd = sys_memfd_create("rostock", 0);
EXPECT_GT(fd, 0);
ASSERT_NE(setns(fd, 0), 0);
EXPECT_EQ(errno, EINVAL);
close(fd);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/pidfd/pidfd_setns_test.c |
/* SPDX-License-Identifier: GPL-2.0 */
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <linux/types.h>
#include <pthread.h>
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <syscall.h>
#include <sys/epoll.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
#include "pidfd.h"
#include "../kselftest.h"
#define str(s) _str(s)
#define _str(s) #s
#define CHILD_THREAD_MIN_WAIT 3 /* seconds */
#define MAX_EVENTS 5
static bool have_pidfd_send_signal;
static pid_t pidfd_clone(int flags, int *pidfd, int (*fn)(void *))
{
size_t stack_size = 1024;
char *stack[1024] = { 0 };
#ifdef __ia64__
return __clone2(fn, stack, stack_size, flags | SIGCHLD, NULL, pidfd);
#else
return clone(fn, stack + stack_size, flags | SIGCHLD, NULL, pidfd);
#endif
}
static int signal_received;
static void set_signal_received_on_sigusr1(int sig)
{
if (sig == SIGUSR1)
signal_received = 1;
}
/*
* Straightforward test to see whether pidfd_send_signal() works is to send
* a signal to ourself.
*/
static int test_pidfd_send_signal_simple_success(void)
{
int pidfd, ret;
const char *test_name = "pidfd_send_signal send SIGUSR1";
if (!have_pidfd_send_signal) {
ksft_test_result_skip(
"%s test: pidfd_send_signal() syscall not supported\n",
test_name);
return 0;
}
pidfd = open("/proc/self", O_DIRECTORY | O_CLOEXEC);
if (pidfd < 0)
ksft_exit_fail_msg(
"%s test: Failed to open process file descriptor\n",
test_name);
signal(SIGUSR1, set_signal_received_on_sigusr1);
ret = sys_pidfd_send_signal(pidfd, SIGUSR1, NULL, 0);
close(pidfd);
if (ret < 0)
ksft_exit_fail_msg("%s test: Failed to send signal\n",
test_name);
if (signal_received != 1)
ksft_exit_fail_msg("%s test: Failed to receive signal\n",
test_name);
signal_received = 0;
ksft_test_result_pass("%s test: Sent signal\n", test_name);
return 0;
}
static int test_pidfd_send_signal_exited_fail(void)
{
int pidfd, ret, saved_errno;
char buf[256];
pid_t pid;
const char *test_name = "pidfd_send_signal signal exited process";
if (!have_pidfd_send_signal) {
ksft_test_result_skip(
"%s test: pidfd_send_signal() syscall not supported\n",
test_name);
return 0;
}
pid = fork();
if (pid < 0)
ksft_exit_fail_msg("%s test: Failed to create new process\n",
test_name);
if (pid == 0)
_exit(EXIT_SUCCESS);
snprintf(buf, sizeof(buf), "/proc/%d", pid);
pidfd = open(buf, O_DIRECTORY | O_CLOEXEC);
ret = wait_for_pid(pid);
ksft_print_msg("waitpid WEXITSTATUS=%d\n", ret);
if (pidfd < 0)
ksft_exit_fail_msg(
"%s test: Failed to open process file descriptor\n",
test_name);
ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
saved_errno = errno;
close(pidfd);
if (ret == 0)
ksft_exit_fail_msg(
"%s test: Managed to send signal to process even though it should have failed\n",
test_name);
if (saved_errno != ESRCH)
ksft_exit_fail_msg(
"%s test: Expected to receive ESRCH as errno value but received %d instead\n",
test_name, saved_errno);
ksft_test_result_pass("%s test: Failed to send signal as expected\n",
test_name);
return 0;
}
/*
* Maximum number of cycles we allow. This is equivalent to PID_MAX_DEFAULT.
* If users set a higher limit or we have cycled PIDFD_MAX_DEFAULT number of
* times then we skip the test to not go into an infinite loop or block for a
* long time.
*/
#define PIDFD_MAX_DEFAULT 0x8000
static int test_pidfd_send_signal_recycled_pid_fail(void)
{
int i, ret;
pid_t pid1;
const char *test_name = "pidfd_send_signal signal recycled pid";
if (!have_pidfd_send_signal) {
ksft_test_result_skip(
"%s test: pidfd_send_signal() syscall not supported\n",
test_name);
return 0;
}
ret = unshare(CLONE_NEWPID);
if (ret < 0) {
if (errno == EPERM) {
ksft_test_result_skip("%s test: Unsharing pid namespace not permitted\n",
test_name);
return 0;
}
ksft_exit_fail_msg("%s test: Failed to unshare pid namespace\n",
test_name);
}
ret = unshare(CLONE_NEWNS);
if (ret < 0) {
if (errno == EPERM) {
ksft_test_result_skip("%s test: Unsharing mount namespace not permitted\n",
test_name);
return 0;
}
ksft_exit_fail_msg("%s test: Failed to unshare mount namespace\n",
test_name);
}
ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
if (ret < 0)
ksft_exit_fail_msg("%s test: Failed to remount / private\n",
test_name);
/* pid 1 in new pid namespace */
pid1 = fork();
if (pid1 < 0)
ksft_exit_fail_msg("%s test: Failed to create new process\n",
test_name);
if (pid1 == 0) {
char buf[256];
pid_t pid2;
int pidfd = -1;
(void)umount2("/proc", MNT_DETACH);
ret = mount("proc", "/proc", "proc", 0, NULL);
if (ret < 0)
_exit(PIDFD_ERROR);
/* grab pid PID_RECYCLE */
for (i = 0; i <= PIDFD_MAX_DEFAULT; i++) {
pid2 = fork();
if (pid2 < 0)
_exit(PIDFD_ERROR);
if (pid2 == 0)
_exit(PIDFD_PASS);
if (pid2 == PID_RECYCLE) {
snprintf(buf, sizeof(buf), "/proc/%d", pid2);
ksft_print_msg("pid to recycle is %d\n", pid2);
pidfd = open(buf, O_DIRECTORY | O_CLOEXEC);
}
if (wait_for_pid(pid2))
_exit(PIDFD_ERROR);
if (pid2 >= PID_RECYCLE)
break;
}
/*
* We want to be as predictable as we can so if we haven't been
* able to grab pid PID_RECYCLE skip the test.
*/
if (pid2 != PID_RECYCLE) {
/* skip test */
close(pidfd);
_exit(PIDFD_SKIP);
}
if (pidfd < 0)
_exit(PIDFD_ERROR);
for (i = 0; i <= PIDFD_MAX_DEFAULT; i++) {
char c;
int pipe_fds[2];
pid_t recycled_pid;
int child_ret = PIDFD_PASS;
ret = pipe2(pipe_fds, O_CLOEXEC);
if (ret < 0)
_exit(PIDFD_ERROR);
recycled_pid = fork();
if (recycled_pid < 0)
_exit(PIDFD_ERROR);
if (recycled_pid == 0) {
close(pipe_fds[1]);
(void)read(pipe_fds[0], &c, 1);
close(pipe_fds[0]);
_exit(PIDFD_PASS);
}
/*
* Stop the child so we can inspect whether we have
* recycled pid PID_RECYCLE.
*/
close(pipe_fds[0]);
ret = kill(recycled_pid, SIGSTOP);
close(pipe_fds[1]);
if (ret) {
(void)wait_for_pid(recycled_pid);
_exit(PIDFD_ERROR);
}
/*
* We have recycled the pid. Try to signal it. This
* needs to fail since this is a different process than
* the one the pidfd refers to.
*/
if (recycled_pid == PID_RECYCLE) {
ret = sys_pidfd_send_signal(pidfd, SIGCONT,
NULL, 0);
if (ret && errno == ESRCH)
child_ret = PIDFD_XFAIL;
else
child_ret = PIDFD_FAIL;
}
/* let the process move on */
ret = kill(recycled_pid, SIGCONT);
if (ret)
(void)kill(recycled_pid, SIGKILL);
if (wait_for_pid(recycled_pid))
_exit(PIDFD_ERROR);
switch (child_ret) {
case PIDFD_FAIL:
/* fallthrough */
case PIDFD_XFAIL:
_exit(child_ret);
case PIDFD_PASS:
break;
default:
/* not reached */
_exit(PIDFD_ERROR);
}
/*
* If the user set a custom pid_max limit we could be
* in the millions.
* Skip the test in this case.
*/
if (recycled_pid > PIDFD_MAX_DEFAULT)
_exit(PIDFD_SKIP);
}
/* failed to recycle pid */
_exit(PIDFD_SKIP);
}
ret = wait_for_pid(pid1);
switch (ret) {
case PIDFD_FAIL:
ksft_exit_fail_msg(
"%s test: Managed to signal recycled pid %d\n",
test_name, PID_RECYCLE);
case PIDFD_PASS:
ksft_exit_fail_msg("%s test: Failed to recycle pid %d\n",
test_name, PID_RECYCLE);
case PIDFD_SKIP:
ksft_test_result_skip("%s test: Skipping test\n", test_name);
ret = 0;
break;
case PIDFD_XFAIL:
ksft_test_result_pass(
"%s test: Failed to signal recycled pid as expected\n",
test_name);
ret = 0;
break;
default /* PIDFD_ERROR */:
ksft_exit_fail_msg("%s test: Error while running tests\n",
test_name);
}
return ret;
}
static int test_pidfd_send_signal_syscall_support(void)
{
int pidfd, ret;
const char *test_name = "pidfd_send_signal check for support";
pidfd = open("/proc/self", O_DIRECTORY | O_CLOEXEC);
if (pidfd < 0)
ksft_exit_fail_msg(
"%s test: Failed to open process file descriptor\n",
test_name);
ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
if (ret < 0) {
if (errno == ENOSYS) {
ksft_test_result_skip(
"%s test: pidfd_send_signal() syscall not supported\n",
test_name);
return 0;
}
ksft_exit_fail_msg("%s test: Failed to send signal\n",
test_name);
}
have_pidfd_send_signal = true;
close(pidfd);
ksft_test_result_pass(
"%s test: pidfd_send_signal() syscall is supported. Tests can be executed\n",
test_name);
return 0;
}
static void *test_pidfd_poll_exec_thread(void *priv)
{
ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
getpid(), syscall(SYS_gettid));
ksft_print_msg("Child Thread: doing exec of sleep\n");
execl("/bin/sleep", "sleep", str(CHILD_THREAD_MIN_WAIT), (char *)NULL);
ksft_print_msg("Child Thread: DONE. pid %d tid %d\n",
getpid(), syscall(SYS_gettid));
return NULL;
}
static void poll_pidfd(const char *test_name, int pidfd)
{
int c;
int epoll_fd = epoll_create1(EPOLL_CLOEXEC);
struct epoll_event event, events[MAX_EVENTS];
if (epoll_fd == -1)
ksft_exit_fail_msg("%s test: Failed to create epoll file descriptor "
"(errno %d)\n",
test_name, errno);
event.events = EPOLLIN;
event.data.fd = pidfd;
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, pidfd, &event)) {
ksft_exit_fail_msg("%s test: Failed to add epoll file descriptor "
"(errno %d)\n",
test_name, errno);
}
c = epoll_wait(epoll_fd, events, MAX_EVENTS, 5000);
if (c != 1 || !(events[0].events & EPOLLIN))
ksft_exit_fail_msg("%s test: Unexpected epoll_wait result (c=%d, events=%x) "
"(errno %d)\n",
test_name, c, events[0].events, errno);
close(epoll_fd);
return;
}
static int child_poll_exec_test(void *args)
{
pthread_t t1;
ksft_print_msg("Child (pidfd): starting. pid %d tid %d\n", getpid(),
syscall(SYS_gettid));
pthread_create(&t1, NULL, test_pidfd_poll_exec_thread, NULL);
/*
* Exec in the non-leader thread will destroy the leader immediately.
* If the wait in the parent returns too soon, the test fails.
*/
while (1)
sleep(1);
return 0;
}
static void test_pidfd_poll_exec(int use_waitpid)
{
int pid, pidfd = 0;
int status, ret;
time_t prog_start = time(NULL);
const char *test_name = "pidfd_poll check for premature notification on child thread exec";
ksft_print_msg("Parent: pid: %d\n", getpid());
pid = pidfd_clone(CLONE_PIDFD, &pidfd, child_poll_exec_test);
if (pid < 0)
ksft_exit_fail_msg("%s test: pidfd_clone failed (ret %d, errno %d)\n",
test_name, pid, errno);
ksft_print_msg("Parent: Waiting for Child (%d) to complete.\n", pid);
if (use_waitpid) {
ret = waitpid(pid, &status, 0);
if (ret == -1)
ksft_print_msg("Parent: error\n");
if (ret == pid)
ksft_print_msg("Parent: Child process waited for.\n");
} else {
poll_pidfd(test_name, pidfd);
}
time_t prog_time = time(NULL) - prog_start;
ksft_print_msg("Time waited for child: %lu\n", prog_time);
close(pidfd);
if (prog_time < CHILD_THREAD_MIN_WAIT || prog_time > CHILD_THREAD_MIN_WAIT + 2)
ksft_exit_fail_msg("%s test: Failed\n", test_name);
else
ksft_test_result_pass("%s test: Passed\n", test_name);
}
static void *test_pidfd_poll_leader_exit_thread(void *priv)
{
ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
getpid(), syscall(SYS_gettid));
sleep(CHILD_THREAD_MIN_WAIT);
ksft_print_msg("Child Thread: DONE. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
return NULL;
}
static time_t *child_exit_secs;
static int child_poll_leader_exit_test(void *args)
{
pthread_t t1, t2;
ksft_print_msg("Child: starting. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
pthread_create(&t1, NULL, test_pidfd_poll_leader_exit_thread, NULL);
pthread_create(&t2, NULL, test_pidfd_poll_leader_exit_thread, NULL);
/*
* glibc exit calls exit_group syscall, so explicity call exit only
* so that only the group leader exits, leaving the threads alone.
*/
*child_exit_secs = time(NULL);
syscall(SYS_exit, 0);
/* Never reached, but appeases compiler thinking we should return. */
exit(0);
}
static void test_pidfd_poll_leader_exit(int use_waitpid)
{
int pid, pidfd = 0;
int status, ret = 0;
const char *test_name = "pidfd_poll check for premature notification on non-empty"
"group leader exit";
child_exit_secs = mmap(NULL, sizeof *child_exit_secs, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (child_exit_secs == MAP_FAILED)
ksft_exit_fail_msg("%s test: mmap failed (errno %d)\n",
test_name, errno);
ksft_print_msg("Parent: pid: %d\n", getpid());
pid = pidfd_clone(CLONE_PIDFD, &pidfd, child_poll_leader_exit_test);
if (pid < 0)
ksft_exit_fail_msg("%s test: pidfd_clone failed (ret %d, errno %d)\n",
test_name, pid, errno);
ksft_print_msg("Parent: Waiting for Child (%d) to complete.\n", pid);
if (use_waitpid) {
ret = waitpid(pid, &status, 0);
if (ret == -1)
ksft_print_msg("Parent: error\n");
} else {
/*
* This sleep tests for the case where if the child exits, and is in
* EXIT_ZOMBIE, but the thread group leader is non-empty, then the poll
* doesn't prematurely return even though there are active threads
*/
sleep(1);
poll_pidfd(test_name, pidfd);
}
if (ret == pid)
ksft_print_msg("Parent: Child process waited for.\n");
time_t since_child_exit = time(NULL) - *child_exit_secs;
ksft_print_msg("Time since child exit: %lu\n", since_child_exit);
close(pidfd);
if (since_child_exit < CHILD_THREAD_MIN_WAIT ||
since_child_exit > CHILD_THREAD_MIN_WAIT + 2)
ksft_exit_fail_msg("%s test: Failed\n", test_name);
else
ksft_test_result_pass("%s test: Passed\n", test_name);
}
int main(int argc, char **argv)
{
ksft_print_header();
ksft_set_plan(8);
test_pidfd_poll_exec(0);
test_pidfd_poll_exec(1);
test_pidfd_poll_leader_exit(0);
test_pidfd_poll_leader_exit(1);
test_pidfd_send_signal_syscall_support();
test_pidfd_send_signal_simple_success();
test_pidfd_send_signal_exited_fail();
test_pidfd_send_signal_recycled_pid_fail();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/pidfd/pidfd_test.c |
/* SPDX-License-Identifier: GPL-2.0 */
#define _GNU_SOURCE
#include <errno.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <signal.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <sched.h>
#include <string.h>
#include <sys/resource.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "pidfd.h"
#include "../kselftest_harness.h"
#define ptr_to_u64(ptr) ((__u64)((uintptr_t)(ptr)))
/* Attempt to de-conflict with the selftests tree. */
#ifndef SKIP
#define SKIP(s, ...) XFAIL(s, ##__VA_ARGS__)
#endif
static pid_t sys_clone3(struct clone_args *args)
{
return syscall(__NR_clone3, args, sizeof(struct clone_args));
}
static int sys_waitid(int which, pid_t pid, siginfo_t *info, int options,
struct rusage *ru)
{
return syscall(__NR_waitid, which, pid, info, options, ru);
}
TEST(wait_simple)
{
int pidfd = -1;
pid_t parent_tid = -1;
struct clone_args args = {
.parent_tid = ptr_to_u64(&parent_tid),
.pidfd = ptr_to_u64(&pidfd),
.flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
.exit_signal = SIGCHLD,
};
pid_t pid;
siginfo_t info = {
.si_signo = 0,
};
pidfd = open("/proc/self", O_DIRECTORY | O_RDONLY | O_CLOEXEC);
ASSERT_GE(pidfd, 0);
pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
ASSERT_NE(pid, 0);
EXPECT_EQ(close(pidfd), 0);
pidfd = -1;
pidfd = open("/dev/null", O_RDONLY | O_CLOEXEC);
ASSERT_GE(pidfd, 0);
pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
ASSERT_NE(pid, 0);
EXPECT_EQ(close(pidfd), 0);
pidfd = -1;
pid = sys_clone3(&args);
ASSERT_GE(pid, 0);
if (pid == 0)
exit(EXIT_SUCCESS);
pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
ASSERT_GE(pid, 0);
ASSERT_EQ(WIFEXITED(info.si_status), true);
ASSERT_EQ(WEXITSTATUS(info.si_status), 0);
EXPECT_EQ(close(pidfd), 0);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_EXITED);
ASSERT_EQ(info.si_pid, parent_tid);
}
TEST(wait_states)
{
int pidfd = -1;
pid_t parent_tid = -1;
struct clone_args args = {
.parent_tid = ptr_to_u64(&parent_tid),
.pidfd = ptr_to_u64(&pidfd),
.flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
.exit_signal = SIGCHLD,
};
int pfd[2];
pid_t pid;
siginfo_t info = {
.si_signo = 0,
};
ASSERT_EQ(pipe(pfd), 0);
pid = sys_clone3(&args);
ASSERT_GE(pid, 0);
if (pid == 0) {
char buf[2];
close(pfd[1]);
kill(getpid(), SIGSTOP);
ASSERT_EQ(read(pfd[0], buf, 1), 1);
close(pfd[0]);
kill(getpid(), SIGSTOP);
exit(EXIT_SUCCESS);
}
close(pfd[0]);
ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WSTOPPED, NULL), 0);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_STOPPED);
ASSERT_EQ(info.si_pid, parent_tid);
ASSERT_EQ(sys_pidfd_send_signal(pidfd, SIGCONT, NULL, 0), 0);
ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WCONTINUED, NULL), 0);
ASSERT_EQ(write(pfd[1], "C", 1), 1);
close(pfd[1]);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_CONTINUED);
ASSERT_EQ(info.si_pid, parent_tid);
ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WUNTRACED, NULL), 0);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_STOPPED);
ASSERT_EQ(info.si_pid, parent_tid);
ASSERT_EQ(sys_pidfd_send_signal(pidfd, SIGKILL, NULL, 0), 0);
ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL), 0);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_KILLED);
ASSERT_EQ(info.si_pid, parent_tid);
EXPECT_EQ(close(pidfd), 0);
}
TEST(wait_nonblock)
{
int pidfd;
unsigned int flags = 0;
pid_t parent_tid = -1;
struct clone_args args = {
.parent_tid = ptr_to_u64(&parent_tid),
.flags = CLONE_PARENT_SETTID,
.exit_signal = SIGCHLD,
};
int ret;
pid_t pid;
siginfo_t info = {
.si_signo = 0,
};
/*
* Callers need to see ECHILD with non-blocking pidfds when no child
* processes exists.
*/
pidfd = sys_pidfd_open(getpid(), PIDFD_NONBLOCK);
EXPECT_GE(pidfd, 0) {
/* pidfd_open() doesn't support PIDFD_NONBLOCK. */
ASSERT_EQ(errno, EINVAL);
SKIP(return, "Skipping PIDFD_NONBLOCK test");
}
ret = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
ASSERT_LT(ret, 0);
ASSERT_EQ(errno, ECHILD);
EXPECT_EQ(close(pidfd), 0);
pid = sys_clone3(&args);
ASSERT_GE(pid, 0);
if (pid == 0) {
kill(getpid(), SIGSTOP);
exit(EXIT_SUCCESS);
}
pidfd = sys_pidfd_open(pid, PIDFD_NONBLOCK);
EXPECT_GE(pidfd, 0) {
/* pidfd_open() doesn't support PIDFD_NONBLOCK. */
ASSERT_EQ(errno, EINVAL);
SKIP(return, "Skipping PIDFD_NONBLOCK test");
}
flags = fcntl(pidfd, F_GETFL, 0);
ASSERT_GT(flags, 0);
ASSERT_GT((flags & O_NONBLOCK), 0);
/*
* Callers need to see EAGAIN/EWOULDBLOCK with non-blocking pidfd when
* child processes exist but none have exited.
*/
ret = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
ASSERT_LT(ret, 0);
ASSERT_EQ(errno, EAGAIN);
/*
* Callers need to continue seeing 0 with non-blocking pidfd and
* WNOHANG raised explicitly when child processes exist but none have
* exited.
*/
ret = sys_waitid(P_PIDFD, pidfd, &info, WEXITED | WNOHANG, NULL);
ASSERT_EQ(ret, 0);
ASSERT_EQ(fcntl(pidfd, F_SETFL, (flags & ~O_NONBLOCK)), 0);
ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WSTOPPED, NULL), 0);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_STOPPED);
ASSERT_EQ(info.si_pid, parent_tid);
ASSERT_EQ(sys_pidfd_send_signal(pidfd, SIGCONT, NULL, 0), 0);
ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL), 0);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_EXITED);
ASSERT_EQ(info.si_pid, parent_tid);
EXPECT_EQ(close(pidfd), 0);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/pidfd/pidfd_wait.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/types.h>
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <syscall.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include "pidfd.h"
#include "../kselftest.h"
struct error {
int code;
char msg[512];
};
static int error_set(struct error *err, int code, const char *fmt, ...)
{
va_list args;
int r;
if (code == PIDFD_PASS || !err || err->code != PIDFD_PASS)
return code;
err->code = code;
va_start(args, fmt);
r = vsnprintf(err->msg, sizeof(err->msg), fmt, args);
assert((size_t)r < sizeof(err->msg));
va_end(args);
return code;
}
static void error_report(struct error *err, const char *test_name)
{
switch (err->code) {
case PIDFD_ERROR:
ksft_exit_fail_msg("%s test: Fatal: %s\n", test_name, err->msg);
break;
case PIDFD_FAIL:
/* will be: not ok %d # error %s test: %s */
ksft_test_result_error("%s test: %s\n", test_name, err->msg);
break;
case PIDFD_SKIP:
/* will be: not ok %d # SKIP %s test: %s */
ksft_test_result_skip("%s test: %s\n", test_name, err->msg);
break;
case PIDFD_XFAIL:
ksft_test_result_pass("%s test: Expected failure: %s\n",
test_name, err->msg);
break;
case PIDFD_PASS:
ksft_test_result_pass("%s test: Passed\n");
break;
default:
ksft_exit_fail_msg("%s test: Unknown code: %d %s\n",
test_name, err->code, err->msg);
break;
}
}
static inline int error_check(struct error *err, const char *test_name)
{
/* In case of error we bail out and terminate the test program */
if (err->code == PIDFD_ERROR)
error_report(err, test_name);
return err->code;
}
#define CHILD_STACK_SIZE 8192
struct child {
char *stack;
pid_t pid;
int fd;
};
static struct child clone_newns(int (*fn)(void *), void *args,
struct error *err)
{
static int flags = CLONE_PIDFD | CLONE_NEWPID | CLONE_NEWNS | SIGCHLD;
struct child ret;
if (!(flags & CLONE_NEWUSER) && geteuid() != 0)
flags |= CLONE_NEWUSER;
ret.stack = mmap(NULL, CHILD_STACK_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
if (ret.stack == MAP_FAILED) {
error_set(err, -1, "mmap of stack failed (errno %d)", errno);
return ret;
}
#ifdef __ia64__
ret.pid = __clone2(fn, ret.stack, CHILD_STACK_SIZE, flags, args, &ret.fd);
#else
ret.pid = clone(fn, ret.stack + CHILD_STACK_SIZE, flags, args, &ret.fd);
#endif
if (ret.pid < 0) {
error_set(err, PIDFD_ERROR, "clone failed (ret %d, errno %d)",
ret.fd, errno);
return ret;
}
ksft_print_msg("New child: %d, fd: %d\n", ret.pid, ret.fd);
return ret;
}
static inline void child_close(struct child *child)
{
close(child->fd);
}
static inline int child_join(struct child *child, struct error *err)
{
int r;
r = wait_for_pid(child->pid);
if (r < 0)
error_set(err, PIDFD_ERROR, "waitpid failed (ret %d, errno %d)",
r, errno);
else if (r > 0)
error_set(err, r, "child %d reported: %d", child->pid, r);
if (munmap(child->stack, CHILD_STACK_SIZE)) {
error_set(err, -1, "munmap of child stack failed (errno %d)", errno);
r = -1;
}
ksft_print_msg("waitpid WEXITSTATUS=%d\n", r);
return r;
}
static inline int child_join_close(struct child *child, struct error *err)
{
child_close(child);
return child_join(child, err);
}
static inline void trim_newline(char *str)
{
char *pos = strrchr(str, '\n');
if (pos)
*pos = '\0';
}
static int verify_fdinfo(int pidfd, struct error *err, const char *prefix,
size_t prefix_len, const char *expect, ...)
{
char buffer[512] = {0, };
char path[512] = {0, };
va_list args;
FILE *f;
char *line = NULL;
size_t n = 0;
int found = 0;
int r;
va_start(args, expect);
r = vsnprintf(buffer, sizeof(buffer), expect, args);
assert((size_t)r < sizeof(buffer));
va_end(args);
snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", pidfd);
f = fopen(path, "re");
if (!f)
return error_set(err, PIDFD_ERROR, "fdinfo open failed for %d",
pidfd);
while (getline(&line, &n, f) != -1) {
char *val;
if (strncmp(line, prefix, prefix_len))
continue;
found = 1;
val = line + prefix_len;
r = strcmp(val, buffer);
if (r != 0) {
trim_newline(line);
trim_newline(buffer);
error_set(err, PIDFD_FAIL, "%s '%s' != '%s'",
prefix, val, buffer);
}
break;
}
free(line);
fclose(f);
if (found == 0)
return error_set(err, PIDFD_FAIL, "%s not found for fd %d",
prefix, pidfd);
return PIDFD_PASS;
}
static int child_fdinfo_nspid_test(void *args)
{
struct error err;
int pidfd;
int r;
/* if we got no fd for the sibling, we are done */
if (!args)
return PIDFD_PASS;
/* verify that we can not resolve the pidfd for a process
* in a sibling pid namespace, i.e. a pid namespace it is
* not in our or a descended namespace
*/
r = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
if (r < 0) {
ksft_print_msg("Failed to remount / private\n");
return PIDFD_ERROR;
}
(void)umount2("/proc", MNT_DETACH);
r = mount("proc", "/proc", "proc", 0, NULL);
if (r < 0) {
ksft_print_msg("Failed to remount /proc\n");
return PIDFD_ERROR;
}
pidfd = *(int *)args;
r = verify_fdinfo(pidfd, &err, "NSpid:", 6, "\t0\n");
if (r != PIDFD_PASS)
ksft_print_msg("NSpid fdinfo check failed: %s\n", err.msg);
return r;
}
static void test_pidfd_fdinfo_nspid(void)
{
struct child a, b;
struct error err = {0, };
const char *test_name = "pidfd check for NSpid in fdinfo";
/* Create a new child in a new pid and mount namespace */
a = clone_newns(child_fdinfo_nspid_test, NULL, &err);
error_check(&err, test_name);
/* Pass the pidfd representing the first child to the
* second child, which will be in a sibling pid namespace,
* which means that the fdinfo NSpid entry for the pidfd
* should only contain '0'.
*/
b = clone_newns(child_fdinfo_nspid_test, &a.fd, &err);
error_check(&err, test_name);
/* The children will have pid 1 in the new pid namespace,
* so the line must be 'NSPid:\t<pid>\t1'.
*/
verify_fdinfo(a.fd, &err, "NSpid:", 6, "\t%d\t%d\n", a.pid, 1);
verify_fdinfo(b.fd, &err, "NSpid:", 6, "\t%d\t%d\n", b.pid, 1);
/* wait for the process, check the exit status and set
* 'err' accordingly, if it is not already set.
*/
child_join_close(&a, &err);
child_join_close(&b, &err);
error_report(&err, test_name);
}
static void test_pidfd_dead_fdinfo(void)
{
struct child a;
struct error err = {0, };
const char *test_name = "pidfd check fdinfo for dead process";
/* Create a new child in a new pid and mount namespace */
a = clone_newns(child_fdinfo_nspid_test, NULL, &err);
error_check(&err, test_name);
child_join(&a, &err);
verify_fdinfo(a.fd, &err, "Pid:", 4, "\t-1\n");
verify_fdinfo(a.fd, &err, "NSpid:", 6, "\t-1\n");
child_close(&a);
error_report(&err, test_name);
}
int main(int argc, char **argv)
{
ksft_print_header();
ksft_set_plan(2);
test_pidfd_fdinfo_nspid();
test_pidfd_dead_fdinfo();
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/pidfd/pidfd_fdinfo_test.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <linux/types.h>
#include <poll.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <syscall.h>
#include <sys/wait.h>
#include <unistd.h>
#include "pidfd.h"
#include "../kselftest.h"
static bool timeout;
static void handle_alarm(int sig)
{
timeout = true;
}
int main(int argc, char **argv)
{
struct pollfd fds;
int iter, nevents;
int nr_iterations = 10000;
fds.events = POLLIN;
if (argc > 2)
ksft_exit_fail_msg("Unexpected command line argument\n");
if (argc == 2) {
nr_iterations = atoi(argv[1]);
if (nr_iterations <= 0)
ksft_exit_fail_msg("invalid input parameter %s\n",
argv[1]);
}
ksft_print_msg("running pidfd poll test for %d iterations\n",
nr_iterations);
for (iter = 0; iter < nr_iterations; iter++) {
int pidfd;
int child_pid = fork();
if (child_pid < 0) {
if (errno == EAGAIN) {
iter--;
continue;
}
ksft_exit_fail_msg(
"%s - failed to fork a child process\n",
strerror(errno));
}
if (child_pid == 0) {
/* Child process just sleeps for a min and exits */
sleep(60);
exit(EXIT_SUCCESS);
}
/* Parent kills the child and waits for its death */
pidfd = sys_pidfd_open(child_pid, 0);
if (pidfd < 0)
ksft_exit_fail_msg("%s - pidfd_open failed\n",
strerror(errno));
/* Setup 3 sec alarm - plenty of time */
if (signal(SIGALRM, handle_alarm) == SIG_ERR)
ksft_exit_fail_msg("%s - signal failed\n",
strerror(errno));
alarm(3);
/* Send SIGKILL to the child */
if (sys_pidfd_send_signal(pidfd, SIGKILL, NULL, 0))
ksft_exit_fail_msg("%s - pidfd_send_signal failed\n",
strerror(errno));
/* Wait for the death notification */
fds.fd = pidfd;
nevents = poll(&fds, 1, -1);
/* Check for error conditions */
if (nevents < 0)
ksft_exit_fail_msg("%s - poll failed\n",
strerror(errno));
if (nevents != 1)
ksft_exit_fail_msg("unexpected poll result: %d\n",
nevents);
if (!(fds.revents & POLLIN))
ksft_exit_fail_msg(
"unexpected event type received: 0x%x\n",
fds.revents);
if (timeout)
ksft_exit_fail_msg(
"death notification wait timeout\n");
close(pidfd);
/* Wait for child to prevent zombies */
if (waitpid(child_pid, NULL, 0) < 0)
ksft_exit_fail_msg("%s - waitpid failed\n",
strerror(errno));
}
ksft_test_result_pass("pidfd poll test: pass\n");
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/pidfd/pidfd_poll_test.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <limits.h>
#include <linux/types.h>
#include <sched.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <syscall.h>
#include <sys/mount.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <unistd.h>
#include "pidfd.h"
#include "../kselftest.h"
static int safe_int(const char *numstr, int *converted)
{
char *err = NULL;
long sli;
errno = 0;
sli = strtol(numstr, &err, 0);
if (errno == ERANGE && (sli == LONG_MAX || sli == LONG_MIN))
return -ERANGE;
if (errno != 0 && sli == 0)
return -EINVAL;
if (err == numstr || *err != '\0')
return -EINVAL;
if (sli > INT_MAX || sli < INT_MIN)
return -ERANGE;
*converted = (int)sli;
return 0;
}
static int char_left_gc(const char *buffer, size_t len)
{
size_t i;
for (i = 0; i < len; i++) {
if (buffer[i] == ' ' ||
buffer[i] == '\t')
continue;
return i;
}
return 0;
}
static int char_right_gc(const char *buffer, size_t len)
{
int i;
for (i = len - 1; i >= 0; i--) {
if (buffer[i] == ' ' ||
buffer[i] == '\t' ||
buffer[i] == '\n' ||
buffer[i] == '\0')
continue;
return i + 1;
}
return 0;
}
static char *trim_whitespace_in_place(char *buffer)
{
buffer += char_left_gc(buffer, strlen(buffer));
buffer[char_right_gc(buffer, strlen(buffer))] = '\0';
return buffer;
}
static pid_t get_pid_from_fdinfo_file(int pidfd, const char *key, size_t keylen)
{
int ret;
char path[512];
FILE *f;
size_t n = 0;
pid_t result = -1;
char *line = NULL;
snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", pidfd);
f = fopen(path, "re");
if (!f)
return -1;
while (getline(&line, &n, f) != -1) {
char *numstr;
if (strncmp(line, key, keylen))
continue;
numstr = trim_whitespace_in_place(line + 4);
ret = safe_int(numstr, &result);
if (ret < 0)
goto out;
break;
}
out:
free(line);
fclose(f);
return result;
}
int main(int argc, char **argv)
{
int pidfd = -1, ret = 1;
pid_t pid;
ksft_set_plan(3);
pidfd = sys_pidfd_open(-1, 0);
if (pidfd >= 0) {
ksft_print_msg(
"%s - succeeded to open pidfd for invalid pid -1\n",
strerror(errno));
goto on_error;
}
ksft_test_result_pass("do not allow invalid pid test: passed\n");
pidfd = sys_pidfd_open(getpid(), 1);
if (pidfd >= 0) {
ksft_print_msg(
"%s - succeeded to open pidfd with invalid flag value specified\n",
strerror(errno));
goto on_error;
}
ksft_test_result_pass("do not allow invalid flag test: passed\n");
pidfd = sys_pidfd_open(getpid(), 0);
if (pidfd < 0) {
ksft_print_msg("%s - failed to open pidfd\n", strerror(errno));
goto on_error;
}
ksft_test_result_pass("open a new pidfd test: passed\n");
pid = get_pid_from_fdinfo_file(pidfd, "Pid:", sizeof("Pid:") - 1);
ksft_print_msg("pidfd %d refers to process with pid %d\n", pidfd, pid);
ret = 0;
on_error:
if (pidfd >= 0)
close(pidfd);
return !ret ? ksft_exit_pass() : ksft_exit_fail();
}
| linux-master | tools/testing/selftests/pidfd/pidfd_open_test.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <linux/types.h>
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <syscall.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <unistd.h>
#include <sys/socket.h>
#include <linux/kcmp.h>
#include "pidfd.h"
#include "../kselftest_harness.h"
/*
* UNKNOWN_FD is an fd number that should never exist in the child, as it is
* used to check the negative case.
*/
#define UNKNOWN_FD 111
#define UID_NOBODY 65535
static int sys_kcmp(pid_t pid1, pid_t pid2, int type, unsigned long idx1,
unsigned long idx2)
{
return syscall(__NR_kcmp, pid1, pid2, type, idx1, idx2);
}
static int __child(int sk, int memfd)
{
int ret;
char buf;
/*
* Ensure we don't leave around a bunch of orphaned children if our
* tests fail.
*/
ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
if (ret) {
fprintf(stderr, "%s: Child could not set DEATHSIG\n",
strerror(errno));
return -1;
}
ret = send(sk, &memfd, sizeof(memfd), 0);
if (ret != sizeof(memfd)) {
fprintf(stderr, "%s: Child failed to send fd number\n",
strerror(errno));
return -1;
}
/*
* The fixture setup is completed at this point. The tests will run.
*
* This blocking recv enables the parent to message the child.
* Either we will read 'P' off of the sk, indicating that we need
* to disable ptrace, or we will read a 0, indicating that the other
* side has closed the sk. This occurs during fixture teardown time,
* indicating that the child should exit.
*/
while ((ret = recv(sk, &buf, sizeof(buf), 0)) > 0) {
if (buf == 'P') {
ret = prctl(PR_SET_DUMPABLE, 0);
if (ret < 0) {
fprintf(stderr,
"%s: Child failed to disable ptrace\n",
strerror(errno));
return -1;
}
} else {
fprintf(stderr, "Child received unknown command %c\n",
buf);
return -1;
}
ret = send(sk, &buf, sizeof(buf), 0);
if (ret != 1) {
fprintf(stderr, "%s: Child failed to ack\n",
strerror(errno));
return -1;
}
}
if (ret < 0) {
fprintf(stderr, "%s: Child failed to read from socket\n",
strerror(errno));
return -1;
}
return 0;
}
static int child(int sk)
{
int memfd, ret;
memfd = sys_memfd_create("test", 0);
if (memfd < 0) {
fprintf(stderr, "%s: Child could not create memfd\n",
strerror(errno));
ret = -1;
} else {
ret = __child(sk, memfd);
close(memfd);
}
close(sk);
return ret;
}
FIXTURE(child)
{
/*
* remote_fd is the number of the FD which we are trying to retrieve
* from the child.
*/
int remote_fd;
/* pid points to the child which we are fetching FDs from */
pid_t pid;
/* pidfd is the pidfd of the child */
int pidfd;
/*
* sk is our side of the socketpair used to communicate with the child.
* When it is closed, the child will exit.
*/
int sk;
};
FIXTURE_SETUP(child)
{
int ret, sk_pair[2];
ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair)) {
TH_LOG("%s: failed to create socketpair", strerror(errno));
}
self->sk = sk_pair[0];
self->pid = fork();
ASSERT_GE(self->pid, 0);
if (self->pid == 0) {
close(sk_pair[0]);
if (child(sk_pair[1]))
_exit(EXIT_FAILURE);
_exit(EXIT_SUCCESS);
}
close(sk_pair[1]);
self->pidfd = sys_pidfd_open(self->pid, 0);
ASSERT_GE(self->pidfd, 0);
/*
* Wait for the child to complete setup. It'll send the remote memfd's
* number when ready.
*/
ret = recv(sk_pair[0], &self->remote_fd, sizeof(self->remote_fd), 0);
ASSERT_EQ(sizeof(self->remote_fd), ret);
}
FIXTURE_TEARDOWN(child)
{
EXPECT_EQ(0, close(self->pidfd));
EXPECT_EQ(0, close(self->sk));
EXPECT_EQ(0, wait_for_pid(self->pid));
}
TEST_F(child, disable_ptrace)
{
int uid, fd;
char c;
/*
* Turn into nobody if we're root, to avoid CAP_SYS_PTRACE
*
* The tests should run in their own process, so even this test fails,
* it shouldn't result in subsequent tests failing.
*/
uid = getuid();
if (uid == 0)
ASSERT_EQ(0, seteuid(UID_NOBODY));
ASSERT_EQ(1, send(self->sk, "P", 1, 0));
ASSERT_EQ(1, recv(self->sk, &c, 1, 0));
fd = sys_pidfd_getfd(self->pidfd, self->remote_fd, 0);
EXPECT_EQ(-1, fd);
EXPECT_EQ(EPERM, errno);
if (uid == 0)
ASSERT_EQ(0, seteuid(0));
}
TEST_F(child, fetch_fd)
{
int fd, ret;
fd = sys_pidfd_getfd(self->pidfd, self->remote_fd, 0);
ASSERT_GE(fd, 0);
ret = sys_kcmp(getpid(), self->pid, KCMP_FILE, fd, self->remote_fd);
if (ret < 0 && errno == ENOSYS)
SKIP(return, "kcmp() syscall not supported");
EXPECT_EQ(ret, 0);
ret = fcntl(fd, F_GETFD);
ASSERT_GE(ret, 0);
EXPECT_GE(ret & FD_CLOEXEC, 0);
close(fd);
}
TEST_F(child, test_unknown_fd)
{
int fd;
fd = sys_pidfd_getfd(self->pidfd, UNKNOWN_FD, 0);
EXPECT_EQ(-1, fd) {
TH_LOG("getfd succeeded while fetching unknown fd");
};
EXPECT_EQ(EBADF, errno) {
TH_LOG("%s: getfd did not get EBADF", strerror(errno));
}
}
TEST(flags_set)
{
ASSERT_EQ(-1, sys_pidfd_getfd(0, 0, 1));
EXPECT_EQ(errno, EINVAL);
}
#if __NR_pidfd_getfd == -1
int main(void)
{
fprintf(stderr, "__NR_pidfd_getfd undefined. The pidfd_getfd syscall is unavailable. Test aborting\n");
return KSFT_SKIP;
}
#else
TEST_HARNESS_MAIN
#endif
| linux-master | tools/testing/selftests/pidfd/pidfd_getfd_test.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Red Hat */
#include "hid.skel.h"
#include "../kselftest_harness.h"
#include <bpf/bpf.h>
#include <fcntl.h>
#include <fnmatch.h>
#include <dirent.h>
#include <poll.h>
#include <pthread.h>
#include <stdbool.h>
#include <linux/hidraw.h>
#include <linux/uhid.h>
#define SHOW_UHID_DEBUG 0
static unsigned char rdesc[] = {
0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
0x09, 0x21, /* Usage (Vendor Usage 0x21) */
0xa1, 0x01, /* COLLECTION (Application) */
0x09, 0x01, /* Usage (Vendor Usage 0x01) */
0xa1, 0x00, /* COLLECTION (Physical) */
0x85, 0x02, /* REPORT_ID (2) */
0x19, 0x01, /* USAGE_MINIMUM (1) */
0x29, 0x08, /* USAGE_MAXIMUM (3) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
0x25, 0xff, /* LOGICAL_MAXIMUM (255) */
0x95, 0x08, /* REPORT_COUNT (8) */
0x75, 0x08, /* REPORT_SIZE (8) */
0x81, 0x02, /* INPUT (Data,Var,Abs) */
0xc0, /* END_COLLECTION */
0x09, 0x01, /* Usage (Vendor Usage 0x01) */
0xa1, 0x00, /* COLLECTION (Physical) */
0x85, 0x01, /* REPORT_ID (1) */
0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
0x19, 0x01, /* USAGE_MINIMUM (1) */
0x29, 0x03, /* USAGE_MAXIMUM (3) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
0x95, 0x03, /* REPORT_COUNT (3) */
0x75, 0x01, /* REPORT_SIZE (1) */
0x81, 0x02, /* INPUT (Data,Var,Abs) */
0x95, 0x01, /* REPORT_COUNT (1) */
0x75, 0x05, /* REPORT_SIZE (5) */
0x81, 0x01, /* INPUT (Cnst,Var,Abs) */
0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
0x09, 0x30, /* USAGE (X) */
0x09, 0x31, /* USAGE (Y) */
0x15, 0x81, /* LOGICAL_MINIMUM (-127) */
0x25, 0x7f, /* LOGICAL_MAXIMUM (127) */
0x75, 0x10, /* REPORT_SIZE (16) */
0x95, 0x02, /* REPORT_COUNT (2) */
0x81, 0x06, /* INPUT (Data,Var,Rel) */
0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
0x19, 0x01, /* USAGE_MINIMUM (1) */
0x29, 0x03, /* USAGE_MAXIMUM (3) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
0x95, 0x03, /* REPORT_COUNT (3) */
0x75, 0x01, /* REPORT_SIZE (1) */
0x91, 0x02, /* Output (Data,Var,Abs) */
0x95, 0x01, /* REPORT_COUNT (1) */
0x75, 0x05, /* REPORT_SIZE (5) */
0x91, 0x01, /* Output (Cnst,Var,Abs) */
0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
0x19, 0x06, /* USAGE_MINIMUM (6) */
0x29, 0x08, /* USAGE_MAXIMUM (8) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
0x95, 0x03, /* REPORT_COUNT (3) */
0x75, 0x01, /* REPORT_SIZE (1) */
0xb1, 0x02, /* Feature (Data,Var,Abs) */
0x95, 0x01, /* REPORT_COUNT (1) */
0x75, 0x05, /* REPORT_SIZE (5) */
0x91, 0x01, /* Output (Cnst,Var,Abs) */
0xc0, /* END_COLLECTION */
0xc0, /* END_COLLECTION */
};
static __u8 feature_data[] = { 1, 2 };
struct attach_prog_args {
int prog_fd;
unsigned int hid;
int retval;
int insert_head;
};
struct hid_hw_request_syscall_args {
__u8 data[10];
unsigned int hid;
int retval;
size_t size;
enum hid_report_type type;
__u8 request_type;
};
#define ASSERT_OK(data) ASSERT_FALSE(data)
#define ASSERT_OK_PTR(ptr) ASSERT_NE(NULL, ptr)
#define UHID_LOG(fmt, ...) do { \
if (SHOW_UHID_DEBUG) \
TH_LOG(fmt, ##__VA_ARGS__); \
} while (0)
static pthread_mutex_t uhid_started_mtx = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t uhid_started = PTHREAD_COND_INITIALIZER;
/* no need to protect uhid_stopped, only one thread accesses it */
static bool uhid_stopped;
static int uhid_write(struct __test_metadata *_metadata, int fd, const struct uhid_event *ev)
{
ssize_t ret;
ret = write(fd, ev, sizeof(*ev));
if (ret < 0) {
TH_LOG("Cannot write to uhid: %m");
return -errno;
} else if (ret != sizeof(*ev)) {
TH_LOG("Wrong size written to uhid: %zd != %zu",
ret, sizeof(ev));
return -EFAULT;
} else {
return 0;
}
}
static int uhid_create(struct __test_metadata *_metadata, int fd, int rand_nb)
{
struct uhid_event ev;
char buf[25];
sprintf(buf, "test-uhid-device-%d", rand_nb);
memset(&ev, 0, sizeof(ev));
ev.type = UHID_CREATE;
strcpy((char *)ev.u.create.name, buf);
ev.u.create.rd_data = rdesc;
ev.u.create.rd_size = sizeof(rdesc);
ev.u.create.bus = BUS_USB;
ev.u.create.vendor = 0x0001;
ev.u.create.product = 0x0a37;
ev.u.create.version = 0;
ev.u.create.country = 0;
sprintf(buf, "%d", rand_nb);
strcpy((char *)ev.u.create.phys, buf);
return uhid_write(_metadata, fd, &ev);
}
static void uhid_destroy(struct __test_metadata *_metadata, int fd)
{
struct uhid_event ev;
memset(&ev, 0, sizeof(ev));
ev.type = UHID_DESTROY;
uhid_write(_metadata, fd, &ev);
}
static int uhid_event(struct __test_metadata *_metadata, int fd)
{
struct uhid_event ev, answer;
ssize_t ret;
memset(&ev, 0, sizeof(ev));
ret = read(fd, &ev, sizeof(ev));
if (ret == 0) {
UHID_LOG("Read HUP on uhid-cdev");
return -EFAULT;
} else if (ret < 0) {
UHID_LOG("Cannot read uhid-cdev: %m");
return -errno;
} else if (ret != sizeof(ev)) {
UHID_LOG("Invalid size read from uhid-dev: %zd != %zu",
ret, sizeof(ev));
return -EFAULT;
}
switch (ev.type) {
case UHID_START:
pthread_mutex_lock(&uhid_started_mtx);
pthread_cond_signal(&uhid_started);
pthread_mutex_unlock(&uhid_started_mtx);
UHID_LOG("UHID_START from uhid-dev");
break;
case UHID_STOP:
uhid_stopped = true;
UHID_LOG("UHID_STOP from uhid-dev");
break;
case UHID_OPEN:
UHID_LOG("UHID_OPEN from uhid-dev");
break;
case UHID_CLOSE:
UHID_LOG("UHID_CLOSE from uhid-dev");
break;
case UHID_OUTPUT:
UHID_LOG("UHID_OUTPUT from uhid-dev");
break;
case UHID_GET_REPORT:
UHID_LOG("UHID_GET_REPORT from uhid-dev");
answer.type = UHID_GET_REPORT_REPLY;
answer.u.get_report_reply.id = ev.u.get_report.id;
answer.u.get_report_reply.err = ev.u.get_report.rnum == 1 ? 0 : -EIO;
answer.u.get_report_reply.size = sizeof(feature_data);
memcpy(answer.u.get_report_reply.data, feature_data, sizeof(feature_data));
uhid_write(_metadata, fd, &answer);
break;
case UHID_SET_REPORT:
UHID_LOG("UHID_SET_REPORT from uhid-dev");
break;
default:
TH_LOG("Invalid event from uhid-dev: %u", ev.type);
}
return 0;
}
struct uhid_thread_args {
int fd;
struct __test_metadata *_metadata;
};
static void *uhid_read_events_thread(void *arg)
{
struct uhid_thread_args *args = (struct uhid_thread_args *)arg;
struct __test_metadata *_metadata = args->_metadata;
struct pollfd pfds[1];
int fd = args->fd;
int ret = 0;
pfds[0].fd = fd;
pfds[0].events = POLLIN;
uhid_stopped = false;
while (!uhid_stopped) {
ret = poll(pfds, 1, 100);
if (ret < 0) {
TH_LOG("Cannot poll for fds: %m");
break;
}
if (pfds[0].revents & POLLIN) {
ret = uhid_event(_metadata, fd);
if (ret)
break;
}
}
return (void *)(long)ret;
}
static int uhid_start_listener(struct __test_metadata *_metadata, pthread_t *tid, int uhid_fd)
{
struct uhid_thread_args args = {
.fd = uhid_fd,
._metadata = _metadata,
};
int err;
pthread_mutex_lock(&uhid_started_mtx);
err = pthread_create(tid, NULL, uhid_read_events_thread, (void *)&args);
ASSERT_EQ(0, err) {
TH_LOG("Could not start the uhid thread: %d", err);
pthread_mutex_unlock(&uhid_started_mtx);
close(uhid_fd);
return -EIO;
}
pthread_cond_wait(&uhid_started, &uhid_started_mtx);
pthread_mutex_unlock(&uhid_started_mtx);
return 0;
}
static int uhid_send_event(struct __test_metadata *_metadata, int fd, __u8 *buf, size_t size)
{
struct uhid_event ev;
if (size > sizeof(ev.u.input.data))
return -E2BIG;
memset(&ev, 0, sizeof(ev));
ev.type = UHID_INPUT2;
ev.u.input2.size = size;
memcpy(ev.u.input2.data, buf, size);
return uhid_write(_metadata, fd, &ev);
}
static int setup_uhid(struct __test_metadata *_metadata, int rand_nb)
{
int fd;
const char *path = "/dev/uhid";
int ret;
fd = open(path, O_RDWR | O_CLOEXEC);
ASSERT_GE(fd, 0) TH_LOG("open uhid-cdev failed; %d", fd);
ret = uhid_create(_metadata, fd, rand_nb);
ASSERT_EQ(0, ret) {
TH_LOG("create uhid device failed: %d", ret);
close(fd);
}
return fd;
}
static bool match_sysfs_device(int dev_id, const char *workdir, struct dirent *dir)
{
const char *target = "0003:0001:0A37.*";
char phys[512];
char uevent[1024];
char temp[512];
int fd, nread;
bool found = false;
if (fnmatch(target, dir->d_name, 0))
return false;
/* we found the correct VID/PID, now check for phys */
sprintf(uevent, "%s/%s/uevent", workdir, dir->d_name);
fd = open(uevent, O_RDONLY | O_NONBLOCK);
if (fd < 0)
return false;
sprintf(phys, "PHYS=%d", dev_id);
nread = read(fd, temp, ARRAY_SIZE(temp));
if (nread > 0 && (strstr(temp, phys)) != NULL)
found = true;
close(fd);
return found;
}
static int get_hid_id(int dev_id)
{
const char *workdir = "/sys/devices/virtual/misc/uhid";
const char *str_id;
DIR *d;
struct dirent *dir;
int found = -1, attempts = 3;
/* it would be nice to be able to use nftw, but the no_alu32 target doesn't support it */
while (found < 0 && attempts > 0) {
attempts--;
d = opendir(workdir);
if (d) {
while ((dir = readdir(d)) != NULL) {
if (!match_sysfs_device(dev_id, workdir, dir))
continue;
str_id = dir->d_name + sizeof("0003:0001:0A37.");
found = (int)strtol(str_id, NULL, 16);
break;
}
closedir(d);
}
if (found < 0)
usleep(100000);
}
return found;
}
static int get_hidraw(int dev_id)
{
const char *workdir = "/sys/devices/virtual/misc/uhid";
char sysfs[1024];
DIR *d, *subd;
struct dirent *dir, *subdir;
int i, found = -1;
/* retry 5 times in case the system is loaded */
for (i = 5; i > 0; i--) {
usleep(10);
d = opendir(workdir);
if (!d)
continue;
while ((dir = readdir(d)) != NULL) {
if (!match_sysfs_device(dev_id, workdir, dir))
continue;
sprintf(sysfs, "%s/%s/hidraw", workdir, dir->d_name);
subd = opendir(sysfs);
if (!subd)
continue;
while ((subdir = readdir(subd)) != NULL) {
if (fnmatch("hidraw*", subdir->d_name, 0))
continue;
found = atoi(subdir->d_name + strlen("hidraw"));
}
closedir(subd);
if (found > 0)
break;
}
closedir(d);
}
return found;
}
static int open_hidraw(int dev_id)
{
int hidraw_number;
char hidraw_path[64] = { 0 };
hidraw_number = get_hidraw(dev_id);
if (hidraw_number < 0)
return hidraw_number;
/* open hidraw node to check the other side of the pipe */
sprintf(hidraw_path, "/dev/hidraw%d", hidraw_number);
return open(hidraw_path, O_RDWR | O_NONBLOCK);
}
FIXTURE(hid_bpf) {
int dev_id;
int uhid_fd;
int hidraw_fd;
int hid_id;
pthread_t tid;
struct hid *skel;
int hid_links[3]; /* max number of programs loaded in a single test */
};
static void detach_bpf(FIXTURE_DATA(hid_bpf) * self)
{
int i;
if (self->hidraw_fd)
close(self->hidraw_fd);
self->hidraw_fd = 0;
for (i = 0; i < ARRAY_SIZE(self->hid_links); i++) {
if (self->hid_links[i])
close(self->hid_links[i]);
}
hid__destroy(self->skel);
self->skel = NULL;
}
FIXTURE_TEARDOWN(hid_bpf) {
void *uhid_err;
uhid_destroy(_metadata, self->uhid_fd);
detach_bpf(self);
pthread_join(self->tid, &uhid_err);
}
#define TEARDOWN_LOG(fmt, ...) do { \
TH_LOG(fmt, ##__VA_ARGS__); \
hid_bpf_teardown(_metadata, self, variant); \
} while (0)
FIXTURE_SETUP(hid_bpf)
{
time_t t;
int err;
/* initialize random number generator */
srand((unsigned int)time(&t));
self->dev_id = rand() % 1024;
self->uhid_fd = setup_uhid(_metadata, self->dev_id);
/* locate the uev, self, variant);ent file of the created device */
self->hid_id = get_hid_id(self->dev_id);
ASSERT_GT(self->hid_id, 0)
TEARDOWN_LOG("Could not locate uhid device id: %d", self->hid_id);
err = uhid_start_listener(_metadata, &self->tid, self->uhid_fd);
ASSERT_EQ(0, err) TEARDOWN_LOG("could not start udev listener: %d", err);
}
struct test_program {
const char *name;
int insert_head;
};
#define LOAD_PROGRAMS(progs) \
load_programs(progs, ARRAY_SIZE(progs), _metadata, self, variant)
#define LOAD_BPF \
load_programs(NULL, 0, _metadata, self, variant)
static void load_programs(const struct test_program programs[],
const size_t progs_count,
struct __test_metadata *_metadata,
FIXTURE_DATA(hid_bpf) * self,
const FIXTURE_VARIANT(hid_bpf) * variant)
{
int attach_fd, err = -EINVAL;
struct attach_prog_args args = {
.retval = -1,
};
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, tattr,
.ctx_in = &args,
.ctx_size_in = sizeof(args),
);
ASSERT_LE(progs_count, ARRAY_SIZE(self->hid_links))
TH_LOG("too many programs are to be loaded");
/* open the bpf file */
self->skel = hid__open();
ASSERT_OK_PTR(self->skel) TEARDOWN_LOG("Error while calling hid__open");
for (int i = 0; i < progs_count; i++) {
struct bpf_program *prog;
prog = bpf_object__find_program_by_name(*self->skel->skeleton->obj,
programs[i].name);
ASSERT_OK_PTR(prog) TH_LOG("can not find program by name '%s'", programs[i].name);
bpf_program__set_autoload(prog, true);
}
err = hid__load(self->skel);
ASSERT_OK(err) TH_LOG("hid_skel_load failed: %d", err);
attach_fd = bpf_program__fd(self->skel->progs.attach_prog);
ASSERT_GE(attach_fd, 0) TH_LOG("locate attach_prog: %d", attach_fd);
for (int i = 0; i < progs_count; i++) {
struct bpf_program *prog;
prog = bpf_object__find_program_by_name(*self->skel->skeleton->obj,
programs[i].name);
ASSERT_OK_PTR(prog) TH_LOG("can not find program by name '%s'", programs[i].name);
args.prog_fd = bpf_program__fd(prog);
args.hid = self->hid_id;
args.insert_head = programs[i].insert_head;
err = bpf_prog_test_run_opts(attach_fd, &tattr);
ASSERT_GE(args.retval, 0)
TH_LOG("attach_hid(%s): %d", programs[i].name, args.retval);
self->hid_links[i] = args.retval;
}
self->hidraw_fd = open_hidraw(self->dev_id);
ASSERT_GE(self->hidraw_fd, 0) TH_LOG("open_hidraw");
}
/*
* A simple test to see if the fixture is working fine.
* If this fails, none of the other tests will pass.
*/
TEST_F(hid_bpf, test_create_uhid)
{
}
/*
* Attach hid_first_event to the given uhid device,
* retrieve and open the matching hidraw node,
* inject one event in the uhid device,
* check that the program sees it and can change the data
*/
TEST_F(hid_bpf, raw_event)
{
const struct test_program progs[] = {
{ .name = "hid_first_event" },
};
__u8 buf[10] = {0};
int err;
LOAD_PROGRAMS(progs);
/* check that the program is correctly loaded */
ASSERT_EQ(self->skel->data->callback_check, 52) TH_LOG("callback_check1");
ASSERT_EQ(self->skel->data->callback2_check, 52) TH_LOG("callback2_check1");
/* inject one event */
buf[0] = 1;
buf[1] = 42;
uhid_send_event(_metadata, self->uhid_fd, buf, 6);
/* check that hid_first_event() was executed */
ASSERT_EQ(self->skel->data->callback_check, 42) TH_LOG("callback_check1");
/* read the data from hidraw */
memset(buf, 0, sizeof(buf));
err = read(self->hidraw_fd, buf, sizeof(buf));
ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
ASSERT_EQ(buf[0], 1);
ASSERT_EQ(buf[2], 47);
/* inject another event */
memset(buf, 0, sizeof(buf));
buf[0] = 1;
buf[1] = 47;
uhid_send_event(_metadata, self->uhid_fd, buf, 6);
/* check that hid_first_event() was executed */
ASSERT_EQ(self->skel->data->callback_check, 47) TH_LOG("callback_check1");
/* read the data from hidraw */
memset(buf, 0, sizeof(buf));
err = read(self->hidraw_fd, buf, sizeof(buf));
ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
ASSERT_EQ(buf[2], 52);
}
/*
* Ensures that we can attach/detach programs
*/
TEST_F(hid_bpf, test_attach_detach)
{
const struct test_program progs[] = {
{ .name = "hid_first_event" },
{ .name = "hid_second_event" },
};
__u8 buf[10] = {0};
int err, link;
LOAD_PROGRAMS(progs);
link = self->hid_links[0];
ASSERT_GT(link, 0) TH_LOG("HID-BPF link not created");
/* inject one event */
buf[0] = 1;
buf[1] = 42;
uhid_send_event(_metadata, self->uhid_fd, buf, 6);
/* read the data from hidraw */
memset(buf, 0, sizeof(buf));
err = read(self->hidraw_fd, buf, sizeof(buf));
ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
ASSERT_EQ(buf[0], 1);
ASSERT_EQ(buf[2], 47);
/* make sure both programs are run */
ASSERT_EQ(buf[3], 52);
/* pin the first program and immediately unpin it */
#define PIN_PATH "/sys/fs/bpf/hid_first_event"
err = bpf_obj_pin(link, PIN_PATH);
ASSERT_OK(err) TH_LOG("error while calling bpf_obj_pin");
remove(PIN_PATH);
#undef PIN_PATH
usleep(100000);
/* detach the program */
detach_bpf(self);
self->hidraw_fd = open_hidraw(self->dev_id);
ASSERT_GE(self->hidraw_fd, 0) TH_LOG("open_hidraw");
/* inject another event */
memset(buf, 0, sizeof(buf));
buf[0] = 1;
buf[1] = 47;
uhid_send_event(_metadata, self->uhid_fd, buf, 6);
/* read the data from hidraw */
memset(buf, 0, sizeof(buf));
err = read(self->hidraw_fd, buf, sizeof(buf));
ASSERT_EQ(err, 6) TH_LOG("read_hidraw_no_bpf");
ASSERT_EQ(buf[0], 1);
ASSERT_EQ(buf[1], 47);
ASSERT_EQ(buf[2], 0);
ASSERT_EQ(buf[3], 0);
/* re-attach our program */
LOAD_PROGRAMS(progs);
/* inject one event */
memset(buf, 0, sizeof(buf));
buf[0] = 1;
buf[1] = 42;
uhid_send_event(_metadata, self->uhid_fd, buf, 6);
/* read the data from hidraw */
memset(buf, 0, sizeof(buf));
err = read(self->hidraw_fd, buf, sizeof(buf));
ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
ASSERT_EQ(buf[0], 1);
ASSERT_EQ(buf[2], 47);
ASSERT_EQ(buf[3], 52);
}
/*
* Attach hid_change_report_id to the given uhid device,
* retrieve and open the matching hidraw node,
* inject one event in the uhid device,
* check that the program sees it and can change the data
*/
TEST_F(hid_bpf, test_hid_change_report)
{
const struct test_program progs[] = {
{ .name = "hid_change_report_id" },
};
__u8 buf[10] = {0};
int err;
LOAD_PROGRAMS(progs);
/* inject one event */
buf[0] = 1;
buf[1] = 42;
uhid_send_event(_metadata, self->uhid_fd, buf, 6);
/* read the data from hidraw */
memset(buf, 0, sizeof(buf));
err = read(self->hidraw_fd, buf, sizeof(buf));
ASSERT_EQ(err, 9) TH_LOG("read_hidraw");
ASSERT_EQ(buf[0], 2);
ASSERT_EQ(buf[1], 42);
ASSERT_EQ(buf[2], 0) TH_LOG("leftovers_from_previous_test");
}
/*
* Attach hid_user_raw_request to the given uhid device,
* call the bpf program from userspace
* check that the program is called and does the expected.
*/
TEST_F(hid_bpf, test_hid_user_raw_request_call)
{
struct hid_hw_request_syscall_args args = {
.retval = -1,
.type = HID_FEATURE_REPORT,
.request_type = HID_REQ_GET_REPORT,
.size = 10,
};
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, tattrs,
.ctx_in = &args,
.ctx_size_in = sizeof(args),
);
int err, prog_fd;
LOAD_BPF;
args.hid = self->hid_id;
args.data[0] = 1; /* report ID */
prog_fd = bpf_program__fd(self->skel->progs.hid_user_raw_request);
err = bpf_prog_test_run_opts(prog_fd, &tattrs);
ASSERT_OK(err) TH_LOG("error while calling bpf_prog_test_run_opts");
ASSERT_EQ(args.retval, 2);
ASSERT_EQ(args.data[1], 2);
}
/*
* Attach hid_insert{0,1,2} to the given uhid device,
* retrieve and open the matching hidraw node,
* inject one event in the uhid device,
* check that the programs have been inserted in the correct order.
*/
TEST_F(hid_bpf, test_hid_attach_flags)
{
const struct test_program progs[] = {
{
.name = "hid_test_insert2",
.insert_head = 0,
},
{
.name = "hid_test_insert1",
.insert_head = 1,
},
{
.name = "hid_test_insert3",
.insert_head = 0,
},
};
__u8 buf[10] = {0};
int err;
LOAD_PROGRAMS(progs);
/* inject one event */
buf[0] = 1;
uhid_send_event(_metadata, self->uhid_fd, buf, 6);
/* read the data from hidraw */
memset(buf, 0, sizeof(buf));
err = read(self->hidraw_fd, buf, sizeof(buf));
ASSERT_EQ(err, 6) TH_LOG("read_hidraw");
ASSERT_EQ(buf[1], 1);
ASSERT_EQ(buf[2], 2);
ASSERT_EQ(buf[3], 3);
}
/*
* Attach hid_rdesc_fixup to the given uhid device,
* retrieve and open the matching hidraw node,
* check that the hidraw report descriptor has been updated.
*/
TEST_F(hid_bpf, test_rdesc_fixup)
{
struct hidraw_report_descriptor rpt_desc = {0};
const struct test_program progs[] = {
{ .name = "hid_rdesc_fixup" },
};
int err, desc_size;
LOAD_PROGRAMS(progs);
/* check that hid_rdesc_fixup() was executed */
ASSERT_EQ(self->skel->data->callback2_check, 0x21);
/* read the exposed report descriptor from hidraw */
err = ioctl(self->hidraw_fd, HIDIOCGRDESCSIZE, &desc_size);
ASSERT_GE(err, 0) TH_LOG("error while reading HIDIOCGRDESCSIZE: %d", err);
/* ensure the new size of the rdesc is bigger than the old one */
ASSERT_GT(desc_size, sizeof(rdesc));
rpt_desc.size = desc_size;
err = ioctl(self->hidraw_fd, HIDIOCGRDESC, &rpt_desc);
ASSERT_GE(err, 0) TH_LOG("error while reading HIDIOCGRDESC: %d", err);
ASSERT_EQ(rpt_desc.value[4], 0x42);
}
static int libbpf_print_fn(enum libbpf_print_level level,
const char *format, va_list args)
{
char buf[1024];
if (level == LIBBPF_DEBUG)
return 0;
snprintf(buf, sizeof(buf), "# %s", format);
vfprintf(stdout, buf, args);
return 0;
}
static void __attribute__((constructor)) __constructor_order_last(void)
{
if (!__constructor_order)
__constructor_order = _CONSTRUCTOR_ORDER_BACKWARD;
}
int main(int argc, char **argv)
{
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
libbpf_set_print(libbpf_print_fn);
return test_harness_run(argc, argv);
}
| linux-master | tools/testing/selftests/hid/hid_bpf.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Red hat */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "hid_bpf_helpers.h"
char _license[] SEC("license") = "GPL";
struct attach_prog_args {
int prog_fd;
unsigned int hid;
int retval;
int insert_head;
};
__u64 callback_check = 52;
__u64 callback2_check = 52;
SEC("?fmod_ret/hid_bpf_device_event")
int BPF_PROG(hid_first_event, struct hid_bpf_ctx *hid_ctx)
{
__u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
if (!rw_data)
return 0; /* EPERM check */
callback_check = rw_data[1];
rw_data[2] = rw_data[1] + 5;
return hid_ctx->size;
}
SEC("?fmod_ret/hid_bpf_device_event")
int BPF_PROG(hid_second_event, struct hid_bpf_ctx *hid_ctx)
{
__u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
if (!rw_data)
return 0; /* EPERM check */
rw_data[3] = rw_data[2] + 5;
return hid_ctx->size;
}
SEC("?fmod_ret/hid_bpf_device_event")
int BPF_PROG(hid_change_report_id, struct hid_bpf_ctx *hid_ctx)
{
__u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
if (!rw_data)
return 0; /* EPERM check */
rw_data[0] = 2;
return 9;
}
SEC("syscall")
int attach_prog(struct attach_prog_args *ctx)
{
ctx->retval = hid_bpf_attach_prog(ctx->hid,
ctx->prog_fd,
ctx->insert_head ? HID_BPF_FLAG_INSERT_HEAD :
HID_BPF_FLAG_NONE);
return 0;
}
struct hid_hw_request_syscall_args {
/* data needs to come at offset 0 so we can use it in calls */
__u8 data[10];
unsigned int hid;
int retval;
size_t size;
enum hid_report_type type;
__u8 request_type;
};
SEC("syscall")
int hid_user_raw_request(struct hid_hw_request_syscall_args *args)
{
struct hid_bpf_ctx *ctx;
const size_t size = args->size;
int i, ret = 0;
if (size > sizeof(args->data))
return -7; /* -E2BIG */
ctx = hid_bpf_allocate_context(args->hid);
if (!ctx)
return -1; /* EPERM check */
ret = hid_bpf_hw_request(ctx,
args->data,
size,
args->type,
args->request_type);
args->retval = ret;
hid_bpf_release_context(ctx);
return 0;
}
static const __u8 rdesc[] = {
0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
0x09, 0x32, /* USAGE (Z) */
0x95, 0x01, /* REPORT_COUNT (1) */
0x81, 0x06, /* INPUT (Data,Var,Rel) */
0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
0x19, 0x01, /* USAGE_MINIMUM (1) */
0x29, 0x03, /* USAGE_MAXIMUM (3) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
0x95, 0x03, /* REPORT_COUNT (3) */
0x75, 0x01, /* REPORT_SIZE (1) */
0x91, 0x02, /* Output (Data,Var,Abs) */
0x95, 0x01, /* REPORT_COUNT (1) */
0x75, 0x05, /* REPORT_SIZE (5) */
0x91, 0x01, /* Output (Cnst,Var,Abs) */
0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
0x19, 0x06, /* USAGE_MINIMUM (6) */
0x29, 0x08, /* USAGE_MAXIMUM (8) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
0x95, 0x03, /* REPORT_COUNT (3) */
0x75, 0x01, /* REPORT_SIZE (1) */
0xb1, 0x02, /* Feature (Data,Var,Abs) */
0x95, 0x01, /* REPORT_COUNT (1) */
0x75, 0x05, /* REPORT_SIZE (5) */
0x91, 0x01, /* Output (Cnst,Var,Abs) */
0xc0, /* END_COLLECTION */
0xc0, /* END_COLLECTION */
};
SEC("?fmod_ret/hid_bpf_rdesc_fixup")
int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hid_ctx)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4096 /* size */);
if (!data)
return 0; /* EPERM check */
callback2_check = data[4];
/* insert rdesc at offset 73 */
__builtin_memcpy(&data[73], rdesc, sizeof(rdesc));
/* Change Usage Vendor globally */
data[4] = 0x42;
return sizeof(rdesc) + 73;
}
SEC("?fmod_ret/hid_bpf_device_event")
int BPF_PROG(hid_test_insert1, struct hid_bpf_ctx *hid_ctx)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
if (!data)
return 0; /* EPERM check */
/* we need to be run first */
if (data[2] || data[3])
return -1;
data[1] = 1;
return 0;
}
SEC("?fmod_ret/hid_bpf_device_event")
int BPF_PROG(hid_test_insert2, struct hid_bpf_ctx *hid_ctx)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
if (!data)
return 0; /* EPERM check */
/* after insert0 and before insert2 */
if (!data[1] || data[3])
return -1;
data[2] = 2;
return 0;
}
SEC("?fmod_ret/hid_bpf_device_event")
int BPF_PROG(hid_test_insert3, struct hid_bpf_ctx *hid_ctx)
{
__u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
if (!data)
return 0; /* EPERM check */
/* at the end */
if (!data[1] || !data[2])
return -1;
data[3] = 3;
return 0;
}
| linux-master | tools/testing/selftests/hid/progs/hid.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <sys/msg.h>
#include <fcntl.h>
#include "../kselftest.h"
#define MAX_MSG_SIZE 32
struct msg1 {
int msize;
long mtype;
char mtext[MAX_MSG_SIZE];
};
#define TEST_STRING "Test sysv5 msg"
#define MSG_TYPE 1
#define ANOTHER_TEST_STRING "Yet another test sysv5 msg"
#define ANOTHER_MSG_TYPE 26538
struct msgque_data {
key_t key;
int msq_id;
int qbytes;
int qnum;
int mode;
struct msg1 *messages;
};
int restore_queue(struct msgque_data *msgque)
{
int fd, ret, id, i;
char buf[32];
fd = open("/proc/sys/kernel/msg_next_id", O_WRONLY);
if (fd == -1) {
printf("Failed to open /proc/sys/kernel/msg_next_id\n");
return -errno;
}
sprintf(buf, "%d", msgque->msq_id);
ret = write(fd, buf, strlen(buf));
if (ret != strlen(buf)) {
printf("Failed to write to /proc/sys/kernel/msg_next_id\n");
return -errno;
}
id = msgget(msgque->key, msgque->mode | IPC_CREAT | IPC_EXCL);
if (id == -1) {
printf("Failed to create queue\n");
return -errno;
}
if (id != msgque->msq_id) {
printf("Restored queue has wrong id (%d instead of %d)\n",
id, msgque->msq_id);
ret = -EFAULT;
goto destroy;
}
for (i = 0; i < msgque->qnum; i++) {
if (msgsnd(msgque->msq_id, &msgque->messages[i].mtype,
msgque->messages[i].msize, IPC_NOWAIT) != 0) {
printf("msgsnd failed (%m)\n");
ret = -errno;
goto destroy;
}
}
return 0;
destroy:
if (msgctl(id, IPC_RMID, NULL))
printf("Failed to destroy queue: %d\n", -errno);
return ret;
}
int check_and_destroy_queue(struct msgque_data *msgque)
{
struct msg1 message;
int cnt = 0, ret;
while (1) {
ret = msgrcv(msgque->msq_id, &message.mtype, MAX_MSG_SIZE,
0, IPC_NOWAIT);
if (ret < 0) {
if (errno == ENOMSG)
break;
printf("Failed to read IPC message: %m\n");
ret = -errno;
goto err;
}
if (ret != msgque->messages[cnt].msize) {
printf("Wrong message size: %d (expected %d)\n", ret,
msgque->messages[cnt].msize);
ret = -EINVAL;
goto err;
}
if (message.mtype != msgque->messages[cnt].mtype) {
printf("Wrong message type\n");
ret = -EINVAL;
goto err;
}
if (memcmp(message.mtext, msgque->messages[cnt].mtext, ret)) {
printf("Wrong message content\n");
ret = -EINVAL;
goto err;
}
cnt++;
}
if (cnt != msgque->qnum) {
printf("Wrong message number\n");
ret = -EINVAL;
goto err;
}
ret = 0;
err:
if (msgctl(msgque->msq_id, IPC_RMID, NULL)) {
printf("Failed to destroy queue: %d\n", -errno);
return -errno;
}
return ret;
}
int dump_queue(struct msgque_data *msgque)
{
struct msqid_ds ds;
int kern_id;
int i, ret;
for (kern_id = 0; kern_id < 256; kern_id++) {
ret = msgctl(kern_id, MSG_STAT, &ds);
if (ret < 0) {
if (errno == EINVAL)
continue;
printf("Failed to get stats for IPC queue with id %d\n",
kern_id);
return -errno;
}
if (ret == msgque->msq_id)
break;
}
msgque->messages = malloc(sizeof(struct msg1) * ds.msg_qnum);
if (msgque->messages == NULL) {
printf("Failed to get stats for IPC queue\n");
return -ENOMEM;
}
msgque->qnum = ds.msg_qnum;
msgque->mode = ds.msg_perm.mode;
msgque->qbytes = ds.msg_qbytes;
for (i = 0; i < msgque->qnum; i++) {
ret = msgrcv(msgque->msq_id, &msgque->messages[i].mtype,
MAX_MSG_SIZE, i, IPC_NOWAIT | MSG_COPY);
if (ret < 0) {
printf("Failed to copy IPC message: %m (%d)\n", errno);
return -errno;
}
msgque->messages[i].msize = ret;
}
return 0;
}
int fill_msgque(struct msgque_data *msgque)
{
struct msg1 msgbuf;
msgbuf.mtype = MSG_TYPE;
memcpy(msgbuf.mtext, TEST_STRING, sizeof(TEST_STRING));
if (msgsnd(msgque->msq_id, &msgbuf.mtype, sizeof(TEST_STRING),
IPC_NOWAIT) != 0) {
printf("First message send failed (%m)\n");
return -errno;
}
msgbuf.mtype = ANOTHER_MSG_TYPE;
memcpy(msgbuf.mtext, ANOTHER_TEST_STRING, sizeof(ANOTHER_TEST_STRING));
if (msgsnd(msgque->msq_id, &msgbuf.mtype, sizeof(ANOTHER_TEST_STRING),
IPC_NOWAIT) != 0) {
printf("Second message send failed (%m)\n");
return -errno;
}
return 0;
}
int main(int argc, char **argv)
{
int msg, pid, err;
struct msgque_data msgque;
if (getuid() != 0)
return ksft_exit_skip(
"Please run the test as root - Exiting.\n");
msgque.key = ftok(argv[0], 822155650);
if (msgque.key == -1) {
printf("Can't make key: %d\n", -errno);
return ksft_exit_fail();
}
msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666);
if (msgque.msq_id == -1) {
err = -errno;
printf("Can't create queue: %d\n", err);
goto err_out;
}
err = fill_msgque(&msgque);
if (err) {
printf("Failed to fill queue: %d\n", err);
goto err_destroy;
}
err = dump_queue(&msgque);
if (err) {
printf("Failed to dump queue: %d\n", err);
goto err_destroy;
}
err = check_and_destroy_queue(&msgque);
if (err) {
printf("Failed to check and destroy queue: %d\n", err);
goto err_out;
}
err = restore_queue(&msgque);
if (err) {
printf("Failed to restore queue: %d\n", err);
goto err_destroy;
}
err = check_and_destroy_queue(&msgque);
if (err) {
printf("Failed to test queue: %d\n", err);
goto err_out;
}
return ksft_exit_pass();
err_destroy:
if (msgctl(msgque.msq_id, IPC_RMID, NULL)) {
printf("Failed to destroy queue: %d\n", -errno);
return ksft_exit_fail();
}
err_out:
return ksft_exit_fail();
}
| linux-master | tools/testing/selftests/ipc/msgque.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <fcntl.h>
int main(int argc, char **argv)
{
splice(0, 0, 1, 0, 1<<30, 0);
return 0;
}
| linux-master | tools/testing/selftests/splice/default_file_splice_read.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
int main(int argc, char *argv[])
{
int fd;
size_t size;
ssize_t spliced;
if (argc < 2) {
fprintf(stderr, "Usage: %s INPUT [BYTES]\n", argv[0]);
return EXIT_FAILURE;
}
fd = open(argv[1], O_RDONLY);
if (fd < 0) {
perror(argv[1]);
return EXIT_FAILURE;
}
if (argc == 3)
size = atol(argv[2]);
else {
struct stat statbuf;
if (fstat(fd, &statbuf) < 0) {
perror(argv[1]);
return EXIT_FAILURE;
}
if (statbuf.st_size > INT_MAX) {
fprintf(stderr, "%s: Too big\n", argv[1]);
return EXIT_FAILURE;
}
size = statbuf.st_size;
}
/* splice(2) file to stdout. */
spliced = splice(fd, NULL, STDOUT_FILENO, NULL,
size, SPLICE_F_MOVE);
if (spliced < 0) {
perror("splice");
return EXIT_FAILURE;
}
close(fd);
return EXIT_SUCCESS;
}
| linux-master | tools/testing/selftests/splice/splice_read.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 HiSilicon Limited.
*/
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <linux/types.h>
#include <linux/map_benchmark.h>
#define NSEC_PER_MSEC 1000000L
static char *directions[] = {
"BIDIRECTIONAL",
"TO_DEVICE",
"FROM_DEVICE",
};
int main(int argc, char **argv)
{
struct map_benchmark map;
int fd, opt;
/* default single thread, run 20 seconds on NUMA_NO_NODE */
int threads = 1, seconds = 20, node = -1;
/* default dma mask 32bit, bidirectional DMA */
int bits = 32, xdelay = 0, dir = DMA_MAP_BIDIRECTIONAL;
/* default granule 1 PAGESIZE */
int granule = 1;
int cmd = DMA_MAP_BENCHMARK;
char *p;
while ((opt = getopt(argc, argv, "t:s:n:b:d:x:g:")) != -1) {
switch (opt) {
case 't':
threads = atoi(optarg);
break;
case 's':
seconds = atoi(optarg);
break;
case 'n':
node = atoi(optarg);
break;
case 'b':
bits = atoi(optarg);
break;
case 'd':
dir = atoi(optarg);
break;
case 'x':
xdelay = atoi(optarg);
break;
case 'g':
granule = atoi(optarg);
break;
default:
return -1;
}
}
if (threads <= 0 || threads > DMA_MAP_MAX_THREADS) {
fprintf(stderr, "invalid number of threads, must be in 1-%d\n",
DMA_MAP_MAX_THREADS);
exit(1);
}
if (seconds <= 0 || seconds > DMA_MAP_MAX_SECONDS) {
fprintf(stderr, "invalid number of seconds, must be in 1-%d\n",
DMA_MAP_MAX_SECONDS);
exit(1);
}
if (xdelay < 0 || xdelay > DMA_MAP_MAX_TRANS_DELAY) {
fprintf(stderr, "invalid transmit delay, must be in 0-%ld\n",
DMA_MAP_MAX_TRANS_DELAY);
exit(1);
}
/* suppose the mininum DMA zone is 1MB in the world */
if (bits < 20 || bits > 64) {
fprintf(stderr, "invalid dma mask bit, must be in 20-64\n");
exit(1);
}
if (dir != DMA_MAP_BIDIRECTIONAL && dir != DMA_MAP_TO_DEVICE &&
dir != DMA_MAP_FROM_DEVICE) {
fprintf(stderr, "invalid dma direction\n");
exit(1);
}
if (granule < 1 || granule > 1024) {
fprintf(stderr, "invalid granule size\n");
exit(1);
}
fd = open("/sys/kernel/debug/dma_map_benchmark", O_RDWR);
if (fd == -1) {
perror("open");
exit(1);
}
memset(&map, 0, sizeof(map));
map.seconds = seconds;
map.threads = threads;
map.node = node;
map.dma_bits = bits;
map.dma_dir = dir;
map.dma_trans_ns = xdelay;
map.granule = granule;
if (ioctl(fd, cmd, &map)) {
perror("ioctl");
exit(1);
}
printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s granule: %d\n",
threads, seconds, node, dir[directions], granule);
printf("average map latency(us):%.1f standard deviation:%.1f\n",
map.avg_map_100ns/10.0, map.map_stddev/10.0);
printf("average unmap latency(us):%.1f standard deviation:%.1f\n",
map.avg_unmap_100ns/10.0, map.unmap_stddev/10.0);
return 0;
}
| linux-master | tools/testing/selftests/dma/dma_map_benchmark.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Google, Inc.
*/
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <sched.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/ptrace.h>
#include <sys/stat.h>
#include <sys/timerfd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "../kselftest.h"
void child(int cpu)
{
cpu_set_t set;
CPU_ZERO(&set);
CPU_SET(cpu, &set);
if (sched_setaffinity(0, sizeof(set), &set) != 0) {
ksft_print_msg("sched_setaffinity() failed: %s\n",
strerror(errno));
_exit(1);
}
if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) {
ksft_print_msg("ptrace(PTRACE_TRACEME) failed: %s\n",
strerror(errno));
_exit(1);
}
if (raise(SIGSTOP) != 0) {
ksft_print_msg("raise(SIGSTOP) failed: %s\n", strerror(errno));
_exit(1);
}
_exit(0);
}
int run_test(int cpu)
{
int status;
pid_t pid = fork();
pid_t wpid;
if (pid < 0) {
ksft_print_msg("fork() failed: %s\n", strerror(errno));
return KSFT_FAIL;
}
if (pid == 0)
child(cpu);
wpid = waitpid(pid, &status, __WALL);
if (wpid != pid) {
ksft_print_msg("waitpid() failed: %s\n", strerror(errno));
return KSFT_FAIL;
}
if (!WIFSTOPPED(status)) {
ksft_print_msg("child did not stop: %s\n", strerror(errno));
return KSFT_FAIL;
}
if (WSTOPSIG(status) != SIGSTOP) {
ksft_print_msg("child did not stop with SIGSTOP: %s\n",
strerror(errno));
return KSFT_FAIL;
}
if (ptrace(PTRACE_SINGLESTEP, pid, NULL, NULL) < 0) {
if (errno == EIO) {
ksft_print_msg(
"ptrace(PTRACE_SINGLESTEP) not supported on this architecture: %s\n",
strerror(errno));
return KSFT_SKIP;
}
ksft_print_msg("ptrace(PTRACE_SINGLESTEP) failed: %s\n",
strerror(errno));
return KSFT_FAIL;
}
wpid = waitpid(pid, &status, __WALL);
if (wpid != pid) {
ksft_print_msg("waitpid() failed: $s\n", strerror(errno));
return KSFT_FAIL;
}
if (WIFEXITED(status)) {
ksft_print_msg("child did not single-step: %s\n",
strerror(errno));
return KSFT_FAIL;
}
if (!WIFSTOPPED(status)) {
ksft_print_msg("child did not stop: %s\n", strerror(errno));
return KSFT_FAIL;
}
if (WSTOPSIG(status) != SIGTRAP) {
ksft_print_msg("child did not stop with SIGTRAP: %s\n",
strerror(errno));
return KSFT_FAIL;
}
if (ptrace(PTRACE_CONT, pid, NULL, NULL) < 0) {
ksft_print_msg("ptrace(PTRACE_CONT) failed: %s\n",
strerror(errno));
return KSFT_FAIL;
}
wpid = waitpid(pid, &status, __WALL);
if (wpid != pid) {
ksft_print_msg("waitpid() failed: %s\n", strerror(errno));
return KSFT_FAIL;
}
if (!WIFEXITED(status)) {
ksft_print_msg("child did not exit after PTRACE_CONT: %s\n",
strerror(errno));
return KSFT_FAIL;
}
return KSFT_PASS;
}
void suspend(void)
{
int power_state_fd;
struct sigevent event = {};
int timerfd;
int err;
struct itimerspec spec = {};
if (getuid() != 0)
ksft_exit_skip("Please run the test as root - Exiting.\n");
power_state_fd = open("/sys/power/state", O_RDWR);
if (power_state_fd < 0)
ksft_exit_fail_msg(
"open(\"/sys/power/state\") failed %s)\n",
strerror(errno));
timerfd = timerfd_create(CLOCK_BOOTTIME_ALARM, 0);
if (timerfd < 0)
ksft_exit_fail_msg("timerfd_create() failed\n");
spec.it_value.tv_sec = 5;
err = timerfd_settime(timerfd, 0, &spec, NULL);
if (err < 0)
ksft_exit_fail_msg("timerfd_settime() failed\n");
if (write(power_state_fd, "mem", strlen("mem")) != strlen("mem"))
ksft_exit_fail_msg("Failed to enter Suspend state\n");
close(timerfd);
close(power_state_fd);
}
int main(int argc, char **argv)
{
int opt;
bool do_suspend = true;
bool succeeded = true;
unsigned int tests = 0;
cpu_set_t available_cpus;
int err;
int cpu;
ksft_print_header();
while ((opt = getopt(argc, argv, "n")) != -1) {
switch (opt) {
case 'n':
do_suspend = false;
break;
default:
printf("Usage: %s [-n]\n", argv[0]);
printf(" -n: do not trigger a suspend/resume cycle before the test\n");
return -1;
}
}
err = sched_getaffinity(0, sizeof(available_cpus), &available_cpus);
if (err < 0)
ksft_exit_fail_msg("sched_getaffinity() failed\n");
for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
if (!CPU_ISSET(cpu, &available_cpus))
continue;
tests++;
}
if (do_suspend)
suspend();
ksft_set_plan(tests);
for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
int test_success;
if (!CPU_ISSET(cpu, &available_cpus))
continue;
test_success = run_test(cpu);
switch (test_success) {
case KSFT_PASS:
ksft_test_result_pass("CPU %d\n", cpu);
break;
case KSFT_SKIP:
ksft_test_result_skip("CPU %d\n", cpu);
break;
case KSFT_FAIL:
ksft_test_result_fail("CPU %d\n", cpu);
succeeded = false;
break;
}
}
if (succeeded)
ksft_exit_pass();
else
ksft_exit_fail();
}
| linux-master | tools/testing/selftests/breakpoints/step_after_suspend_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc., Frederic Weisbecker <[email protected]>
*
* Selftests for breakpoints (and more generally the do_debug() path) in x86.
*/
#include <sys/ptrace.h>
#include <unistd.h>
#include <stddef.h>
#include <sys/user.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <errno.h>
#include <string.h>
#include "../kselftest.h"
#define COUNT_ISN_BPS 4
#define COUNT_WPS 4
/* Breakpoint access modes */
enum {
BP_X = 1,
BP_RW = 2,
BP_W = 4,
};
static pid_t child_pid;
/*
* Ensures the child and parent are always "talking" about
* the same test sequence. (ie: that we haven't forgotten
* to call check_trapped() somewhere).
*/
static int nr_tests;
static void set_breakpoint_addr(void *addr, int n)
{
int ret;
ret = ptrace(PTRACE_POKEUSER, child_pid,
offsetof(struct user, u_debugreg[n]), addr);
if (ret)
ksft_exit_fail_msg("Can't set breakpoint addr: %s\n",
strerror(errno));
}
static void toggle_breakpoint(int n, int type, int len,
int local, int global, int set)
{
int ret;
int xtype, xlen;
unsigned long vdr7, dr7;
switch (type) {
case BP_X:
xtype = 0;
break;
case BP_W:
xtype = 1;
break;
case BP_RW:
xtype = 3;
break;
}
switch (len) {
case 1:
xlen = 0;
break;
case 2:
xlen = 4;
break;
case 4:
xlen = 0xc;
break;
case 8:
xlen = 8;
break;
}
dr7 = ptrace(PTRACE_PEEKUSER, child_pid,
offsetof(struct user, u_debugreg[7]), 0);
vdr7 = (xlen | xtype) << 16;
vdr7 <<= 4 * n;
if (local) {
vdr7 |= 1 << (2 * n);
vdr7 |= 1 << 8;
}
if (global) {
vdr7 |= 2 << (2 * n);
vdr7 |= 1 << 9;
}
if (set)
dr7 |= vdr7;
else
dr7 &= ~vdr7;
ret = ptrace(PTRACE_POKEUSER, child_pid,
offsetof(struct user, u_debugreg[7]), dr7);
if (ret) {
ksft_print_msg("Can't set dr7: %s\n", strerror(errno));
exit(-1);
}
}
/* Dummy variables to test read/write accesses */
static unsigned long long dummy_var[4];
/* Dummy functions to test execution accesses */
static void dummy_func(void) { }
static void dummy_func1(void) { }
static void dummy_func2(void) { }
static void dummy_func3(void) { }
static void (*dummy_funcs[])(void) = {
dummy_func,
dummy_func1,
dummy_func2,
dummy_func3,
};
static int trapped;
static void check_trapped(void)
{
/*
* If we haven't trapped, wake up the parent
* so that it notices the failure.
*/
if (!trapped)
kill(getpid(), SIGUSR1);
trapped = 0;
nr_tests++;
}
static void write_var(int len)
{
char *pcval; short *psval; int *pival; long long *plval;
int i;
for (i = 0; i < 4; i++) {
switch (len) {
case 1:
pcval = (char *)&dummy_var[i];
*pcval = 0xff;
break;
case 2:
psval = (short *)&dummy_var[i];
*psval = 0xffff;
break;
case 4:
pival = (int *)&dummy_var[i];
*pival = 0xffffffff;
break;
case 8:
plval = (long long *)&dummy_var[i];
*plval = 0xffffffffffffffffLL;
break;
}
check_trapped();
}
}
static void read_var(int len)
{
char cval; short sval; int ival; long long lval;
int i;
for (i = 0; i < 4; i++) {
switch (len) {
case 1:
cval = *(char *)&dummy_var[i];
break;
case 2:
sval = *(short *)&dummy_var[i];
break;
case 4:
ival = *(int *)&dummy_var[i];
break;
case 8:
lval = *(long long *)&dummy_var[i];
break;
}
check_trapped();
}
}
/*
* Do the r/w/x accesses to trigger the breakpoints. And run
* the usual traps.
*/
static void trigger_tests(void)
{
int len, local, global, i;
char val;
int ret;
ret = ptrace(PTRACE_TRACEME, 0, NULL, 0);
if (ret) {
ksft_print_msg("Can't be traced? %s\n", strerror(errno));
return;
}
/* Wake up father so that it sets up the first test */
kill(getpid(), SIGUSR1);
/* Test instruction breakpoints */
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
if (!local && !global)
continue;
for (i = 0; i < COUNT_ISN_BPS; i++) {
dummy_funcs[i]();
check_trapped();
}
}
}
/* Test write watchpoints */
for (len = 1; len <= sizeof(long); len <<= 1) {
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
if (!local && !global)
continue;
write_var(len);
}
}
}
/* Test read/write watchpoints (on read accesses) */
for (len = 1; len <= sizeof(long); len <<= 1) {
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
if (!local && !global)
continue;
read_var(len);
}
}
}
/* Icebp trap */
asm(".byte 0xf1\n");
check_trapped();
/* Int 3 trap */
asm("int $3\n");
check_trapped();
kill(getpid(), SIGUSR1);
}
static void check_success(const char *msg)
{
int child_nr_tests;
int status;
int ret;
/* Wait for the child to SIGTRAP */
wait(&status);
ret = 0;
if (WSTOPSIG(status) == SIGTRAP) {
child_nr_tests = ptrace(PTRACE_PEEKDATA, child_pid,
&nr_tests, 0);
if (child_nr_tests == nr_tests)
ret = 1;
if (ptrace(PTRACE_POKEDATA, child_pid, &trapped, 1))
ksft_exit_fail_msg("Can't poke: %s\n", strerror(errno));
}
nr_tests++;
if (ret)
ksft_test_result_pass(msg);
else
ksft_test_result_fail(msg);
}
static void launch_instruction_breakpoints(char *buf, int local, int global)
{
int i;
for (i = 0; i < COUNT_ISN_BPS; i++) {
set_breakpoint_addr(dummy_funcs[i], i);
toggle_breakpoint(i, BP_X, 1, local, global, 1);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
sprintf(buf, "Test breakpoint %d with local: %d global: %d\n",
i, local, global);
check_success(buf);
toggle_breakpoint(i, BP_X, 1, local, global, 0);
}
}
static void launch_watchpoints(char *buf, int mode, int len,
int local, int global)
{
const char *mode_str;
int i;
if (mode == BP_W)
mode_str = "write";
else
mode_str = "read";
for (i = 0; i < COUNT_WPS; i++) {
set_breakpoint_addr(&dummy_var[i], i);
toggle_breakpoint(i, mode, len, local, global, 1);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
sprintf(buf,
"Test %s watchpoint %d with len: %d local: %d global: %d\n",
mode_str, i, len, local, global);
check_success(buf);
toggle_breakpoint(i, mode, len, local, global, 0);
}
}
/* Set the breakpoints and check the child successfully trigger them */
static void launch_tests(void)
{
char buf[1024];
unsigned int tests = 0;
int len, local, global, i;
tests += 3 * COUNT_ISN_BPS;
tests += sizeof(long) / 2 * 3 * COUNT_WPS;
tests += sizeof(long) / 2 * 3 * COUNT_WPS;
tests += 2;
ksft_set_plan(tests);
/* Instruction breakpoints */
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
if (!local && !global)
continue;
launch_instruction_breakpoints(buf, local, global);
}
}
/* Write watchpoint */
for (len = 1; len <= sizeof(long); len <<= 1) {
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
if (!local && !global)
continue;
launch_watchpoints(buf, BP_W, len,
local, global);
}
}
}
/* Read-Write watchpoint */
for (len = 1; len <= sizeof(long); len <<= 1) {
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
if (!local && !global)
continue;
launch_watchpoints(buf, BP_RW, len,
local, global);
}
}
}
/* Icebp traps */
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success("Test icebp\n");
/* Int 3 traps */
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success("Test int 3 trap\n");
ptrace(PTRACE_CONT, child_pid, NULL, 0);
}
int main(int argc, char **argv)
{
pid_t pid;
int ret;
ksft_print_header();
pid = fork();
if (!pid) {
trigger_tests();
exit(0);
}
child_pid = pid;
wait(NULL);
launch_tests();
wait(NULL);
ksft_exit_pass();
}
| linux-master | tools/testing/selftests/breakpoints/breakpoint_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Google, Inc.
*
* Original Code by Pavel Labath <[email protected]>
*
* Code modified by Pratyush Anand <[email protected]>
* for testing different byte select for each access size.
*/
#define _GNU_SOURCE
#include <asm/ptrace.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/ptrace.h>
#include <sys/param.h>
#include <sys/uio.h>
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
#include <string.h>
#include <stdio.h>
#include <unistd.h>
#include <elf.h>
#include <errno.h>
#include <signal.h>
#include "../kselftest.h"
static volatile uint8_t var[96] __attribute__((__aligned__(32)));
static void child(int size, int wr)
{
volatile uint8_t *addr = &var[32 + wr];
if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) {
ksft_print_msg(
"ptrace(PTRACE_TRACEME) failed: %s\n",
strerror(errno));
_exit(1);
}
if (raise(SIGSTOP) != 0) {
ksft_print_msg(
"raise(SIGSTOP) failed: %s\n", strerror(errno));
_exit(1);
}
if ((uintptr_t) addr % size) {
ksft_print_msg(
"Wrong address write for the given size: %s\n",
strerror(errno));
_exit(1);
}
switch (size) {
case 1:
*addr = 47;
break;
case 2:
*(uint16_t *)addr = 47;
break;
case 4:
*(uint32_t *)addr = 47;
break;
case 8:
*(uint64_t *)addr = 47;
break;
case 16:
__asm__ volatile ("stp x29, x30, %0" : "=m" (addr[0]));
break;
case 32:
__asm__ volatile ("stp q29, q30, %0" : "=m" (addr[0]));
break;
}
_exit(0);
}
static bool set_watchpoint(pid_t pid, int size, int wp)
{
const volatile uint8_t *addr = &var[32 + wp];
const int offset = (uintptr_t)addr % 8;
const unsigned int byte_mask = ((1 << size) - 1) << offset;
const unsigned int type = 2; /* Write */
const unsigned int enable = 1;
const unsigned int control = byte_mask << 5 | type << 3 | enable;
struct user_hwdebug_state dreg_state;
struct iovec iov;
memset(&dreg_state, 0, sizeof(dreg_state));
dreg_state.dbg_regs[0].addr = (uintptr_t)(addr - offset);
dreg_state.dbg_regs[0].ctrl = control;
iov.iov_base = &dreg_state;
iov.iov_len = offsetof(struct user_hwdebug_state, dbg_regs) +
sizeof(dreg_state.dbg_regs[0]);
if (ptrace(PTRACE_SETREGSET, pid, NT_ARM_HW_WATCH, &iov) == 0)
return true;
if (errno == EIO)
ksft_print_msg(
"ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) not supported on this hardware: %s\n",
strerror(errno));
ksft_print_msg(
"ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) failed: %s\n",
strerror(errno));
return false;
}
static bool run_test(int wr_size, int wp_size, int wr, int wp)
{
int status;
siginfo_t siginfo;
pid_t pid = fork();
pid_t wpid;
if (pid < 0) {
ksft_test_result_fail(
"fork() failed: %s\n", strerror(errno));
return false;
}
if (pid == 0)
child(wr_size, wr);
wpid = waitpid(pid, &status, __WALL);
if (wpid != pid) {
ksft_print_msg(
"waitpid() failed: %s\n", strerror(errno));
return false;
}
if (!WIFSTOPPED(status)) {
ksft_print_msg(
"child did not stop: %s\n", strerror(errno));
return false;
}
if (WSTOPSIG(status) != SIGSTOP) {
ksft_print_msg("child did not stop with SIGSTOP\n");
return false;
}
if (!set_watchpoint(pid, wp_size, wp))
return false;
if (ptrace(PTRACE_CONT, pid, NULL, NULL) < 0) {
ksft_print_msg(
"ptrace(PTRACE_CONT) failed: %s\n",
strerror(errno));
return false;
}
alarm(3);
wpid = waitpid(pid, &status, __WALL);
if (wpid != pid) {
ksft_print_msg(
"waitpid() failed: %s\n", strerror(errno));
return false;
}
alarm(0);
if (WIFEXITED(status)) {
ksft_print_msg("child exited prematurely\n");
return false;
}
if (!WIFSTOPPED(status)) {
ksft_print_msg("child did not stop\n");
return false;
}
if (WSTOPSIG(status) != SIGTRAP) {
ksft_print_msg("child did not stop with SIGTRAP\n");
return false;
}
if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0) {
ksft_print_msg(
"ptrace(PTRACE_GETSIGINFO): %s\n",
strerror(errno));
return false;
}
if (siginfo.si_code != TRAP_HWBKPT) {
ksft_print_msg(
"Unexpected si_code %d\n", siginfo.si_code);
return false;
}
kill(pid, SIGKILL);
wpid = waitpid(pid, &status, 0);
if (wpid != pid) {
ksft_print_msg(
"waitpid() failed: %s\n", strerror(errno));
return false;
}
return true;
}
static void sigalrm(int sig)
{
}
int main(int argc, char **argv)
{
int opt;
bool succeeded = true;
struct sigaction act;
int wr, wp, size;
bool result;
ksft_print_header();
ksft_set_plan(213);
act.sa_handler = sigalrm;
sigemptyset(&act.sa_mask);
act.sa_flags = 0;
sigaction(SIGALRM, &act, NULL);
for (size = 1; size <= 32; size = size*2) {
for (wr = 0; wr <= 32; wr = wr + size) {
for (wp = wr - size; wp <= wr + size; wp = wp + size) {
result = run_test(size, MIN(size, 8), wr, wp);
if ((result && wr == wp) ||
(!result && wr != wp))
ksft_test_result_pass(
"Test size = %d write offset = %d watchpoint offset = %d\n",
size, wr, wp);
else {
ksft_test_result_fail(
"Test size = %d write offset = %d watchpoint offset = %d\n",
size, wr, wp);
succeeded = false;
}
}
}
}
for (size = 1; size <= 32; size = size*2) {
if (run_test(size, 8, -size, -8))
ksft_test_result_pass(
"Test size = %d write offset = %d watchpoint offset = -8\n",
size, -size);
else {
ksft_test_result_fail(
"Test size = %d write offset = %d watchpoint offset = -8\n",
size, -size);
succeeded = false;
}
}
if (succeeded)
ksft_exit_pass();
else
ksft_exit_fail();
}
| linux-master | tools/testing/selftests/breakpoints/breakpoint_test_arm64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This is the test which covers PCM middle layer data transferring using
* the virtual pcm test driver (snd-pcmtest).
*
* Copyright 2023 Ivan Orlov <[email protected]>
*/
#include <string.h>
#include <alsa/asoundlib.h>
#include "../kselftest_harness.h"
#define CH_NUM 4
struct pattern_buf {
char buf[1024];
int len;
};
struct pattern_buf patterns[CH_NUM];
struct pcmtest_test_params {
unsigned long buffer_size;
unsigned long period_size;
unsigned long channels;
unsigned int rate;
snd_pcm_access_t access;
size_t sec_buf_len;
size_t sample_size;
int time;
snd_pcm_format_t format;
};
static int read_patterns(void)
{
FILE *fp, *fpl;
int i;
char pf[64];
char plf[64];
for (i = 0; i < CH_NUM; i++) {
sprintf(plf, "/sys/kernel/debug/pcmtest/fill_pattern%d_len", i);
fpl = fopen(plf, "r");
if (!fpl)
return -1;
fscanf(fpl, "%u", &patterns[i].len);
fclose(fpl);
sprintf(pf, "/sys/kernel/debug/pcmtest/fill_pattern%d", i);
fp = fopen(pf, "r");
if (!fp)
return -1;
fread(patterns[i].buf, 1, patterns[i].len, fp);
fclose(fp);
}
return 0;
}
static int get_test_results(char *debug_name)
{
int result;
FILE *f;
char fname[128];
sprintf(fname, "/sys/kernel/debug/pcmtest/%s", debug_name);
f = fopen(fname, "r");
if (!f) {
printf("Failed to open file\n");
return -1;
}
fscanf(f, "%d", &result);
fclose(f);
return result;
}
static size_t get_sec_buf_len(unsigned int rate, unsigned long channels, snd_pcm_format_t format)
{
return rate * channels * snd_pcm_format_physical_width(format) / 8;
}
static int setup_handle(snd_pcm_t **handle, snd_pcm_sw_params_t *swparams,
snd_pcm_hw_params_t *hwparams, struct pcmtest_test_params *params,
int card, snd_pcm_stream_t stream)
{
char pcm_name[32];
int err;
sprintf(pcm_name, "hw:%d,0,0", card);
err = snd_pcm_open(handle, pcm_name, stream, 0);
if (err < 0)
return err;
snd_pcm_hw_params_any(*handle, hwparams);
snd_pcm_hw_params_set_rate_resample(*handle, hwparams, 0);
snd_pcm_hw_params_set_access(*handle, hwparams, params->access);
snd_pcm_hw_params_set_format(*handle, hwparams, params->format);
snd_pcm_hw_params_set_channels(*handle, hwparams, params->channels);
snd_pcm_hw_params_set_rate_near(*handle, hwparams, ¶ms->rate, 0);
snd_pcm_hw_params_set_period_size_near(*handle, hwparams, ¶ms->period_size, 0);
snd_pcm_hw_params_set_buffer_size_near(*handle, hwparams, ¶ms->buffer_size);
snd_pcm_hw_params(*handle, hwparams);
snd_pcm_sw_params_current(*handle, swparams);
snd_pcm_hw_params_set_rate_resample(*handle, hwparams, 0);
snd_pcm_sw_params_set_avail_min(*handle, swparams, params->period_size);
snd_pcm_hw_params_set_buffer_size_near(*handle, hwparams, ¶ms->buffer_size);
snd_pcm_hw_params_set_period_size_near(*handle, hwparams, ¶ms->period_size, 0);
snd_pcm_sw_params(*handle, swparams);
snd_pcm_hw_params(*handle, hwparams);
return 0;
}
FIXTURE(pcmtest) {
int card;
snd_pcm_sw_params_t *swparams;
snd_pcm_hw_params_t *hwparams;
struct pcmtest_test_params params;
};
FIXTURE_TEARDOWN(pcmtest) {
}
FIXTURE_SETUP(pcmtest) {
char *card_name;
int err;
if (geteuid())
SKIP(exit(-1), "This test needs root to run!");
err = read_patterns();
if (err)
SKIP(exit(-1), "Can't read patterns. Probably, module isn't loaded");
card_name = malloc(127);
ASSERT_NE(card_name, NULL);
self->params.buffer_size = 16384;
self->params.period_size = 4096;
self->params.channels = CH_NUM;
self->params.rate = 8000;
self->params.access = SND_PCM_ACCESS_RW_INTERLEAVED;
self->params.format = SND_PCM_FORMAT_S16_LE;
self->card = -1;
self->params.sample_size = snd_pcm_format_physical_width(self->params.format) / 8;
self->params.sec_buf_len = get_sec_buf_len(self->params.rate, self->params.channels,
self->params.format);
self->params.time = 4;
while (snd_card_next(&self->card) >= 0) {
if (self->card == -1)
break;
snd_card_get_name(self->card, &card_name);
if (!strcmp(card_name, "PCM-Test"))
break;
}
free(card_name);
ASSERT_NE(self->card, -1);
}
/*
* Here we are trying to send the looped monotonically increasing sequence of bytes to the driver.
* If our data isn't corrupted, the driver will set the content of 'pc_test' debugfs file to '1'
*/
TEST_F(pcmtest, playback) {
snd_pcm_t *handle;
unsigned char *it;
size_t write_res;
int test_results;
int i, cur_ch, pos_in_ch;
void *samples;
struct pcmtest_test_params *params = &self->params;
samples = calloc(self->params.sec_buf_len * self->params.time, 1);
ASSERT_NE(samples, NULL);
snd_pcm_sw_params_alloca(&self->swparams);
snd_pcm_hw_params_alloca(&self->hwparams);
ASSERT_EQ(setup_handle(&handle, self->swparams, self->hwparams, params,
self->card, SND_PCM_STREAM_PLAYBACK), 0);
snd_pcm_format_set_silence(params->format, samples,
params->rate * params->channels * params->time);
it = samples;
for (i = 0; i < self->params.sec_buf_len * params->time; i++) {
cur_ch = (i / params->sample_size) % CH_NUM;
pos_in_ch = i / params->sample_size / CH_NUM * params->sample_size
+ (i % params->sample_size);
it[i] = patterns[cur_ch].buf[pos_in_ch % patterns[cur_ch].len];
}
write_res = snd_pcm_writei(handle, samples, params->rate * params->time);
ASSERT_GE(write_res, 0);
snd_pcm_close(handle);
free(samples);
test_results = get_test_results("pc_test");
ASSERT_EQ(test_results, 1);
}
/*
* Here we test that the virtual alsa driver returns looped and monotonically increasing sequence
* of bytes. In the interleaved mode the buffer will contain samples in the following order:
* C0, C1, C2, C3, C0, C1, ...
*/
TEST_F(pcmtest, capture) {
snd_pcm_t *handle;
unsigned char *it;
size_t read_res;
int i, cur_ch, pos_in_ch;
void *samples;
struct pcmtest_test_params *params = &self->params;
samples = calloc(self->params.sec_buf_len * self->params.time, 1);
ASSERT_NE(samples, NULL);
snd_pcm_sw_params_alloca(&self->swparams);
snd_pcm_hw_params_alloca(&self->hwparams);
ASSERT_EQ(setup_handle(&handle, self->swparams, self->hwparams,
params, self->card, SND_PCM_STREAM_CAPTURE), 0);
snd_pcm_format_set_silence(params->format, samples,
params->rate * params->channels * params->time);
read_res = snd_pcm_readi(handle, samples, params->rate * params->time);
ASSERT_GE(read_res, 0);
snd_pcm_close(handle);
it = (unsigned char *)samples;
for (i = 0; i < self->params.sec_buf_len * self->params.time; i++) {
cur_ch = (i / params->sample_size) % CH_NUM;
pos_in_ch = i / params->sample_size / CH_NUM * params->sample_size
+ (i % params->sample_size);
ASSERT_EQ(it[i], patterns[cur_ch].buf[pos_in_ch % patterns[cur_ch].len]);
}
free(samples);
}
// Test capture in the non-interleaved access mode. The are buffers for each recorded channel
TEST_F(pcmtest, ni_capture) {
snd_pcm_t *handle;
struct pcmtest_test_params params = self->params;
char **chan_samples;
size_t i, j, read_res;
chan_samples = calloc(CH_NUM, sizeof(*chan_samples));
ASSERT_NE(chan_samples, NULL);
snd_pcm_sw_params_alloca(&self->swparams);
snd_pcm_hw_params_alloca(&self->hwparams);
params.access = SND_PCM_ACCESS_RW_NONINTERLEAVED;
ASSERT_EQ(setup_handle(&handle, self->swparams, self->hwparams,
¶ms, self->card, SND_PCM_STREAM_CAPTURE), 0);
for (i = 0; i < CH_NUM; i++)
chan_samples[i] = calloc(params.sec_buf_len * params.time, 1);
for (i = 0; i < 1; i++) {
read_res = snd_pcm_readn(handle, (void **)chan_samples, params.rate * params.time);
ASSERT_GE(read_res, 0);
}
snd_pcm_close(handle);
for (i = 0; i < CH_NUM; i++) {
for (j = 0; j < params.rate * params.time; j++)
ASSERT_EQ(chan_samples[i][j], patterns[i].buf[j % patterns[i].len]);
free(chan_samples[i]);
}
free(chan_samples);
}
TEST_F(pcmtest, ni_playback) {
snd_pcm_t *handle;
struct pcmtest_test_params params = self->params;
char **chan_samples;
size_t i, j, read_res;
int test_res;
chan_samples = calloc(CH_NUM, sizeof(*chan_samples));
ASSERT_NE(chan_samples, NULL);
snd_pcm_sw_params_alloca(&self->swparams);
snd_pcm_hw_params_alloca(&self->hwparams);
params.access = SND_PCM_ACCESS_RW_NONINTERLEAVED;
ASSERT_EQ(setup_handle(&handle, self->swparams, self->hwparams,
¶ms, self->card, SND_PCM_STREAM_PLAYBACK), 0);
for (i = 0; i < CH_NUM; i++) {
chan_samples[i] = calloc(params.sec_buf_len * params.time, 1);
for (j = 0; j < params.sec_buf_len * params.time; j++)
chan_samples[i][j] = patterns[i].buf[j % patterns[i].len];
}
for (i = 0; i < 1; i++) {
read_res = snd_pcm_writen(handle, (void **)chan_samples, params.rate * params.time);
ASSERT_GE(read_res, 0);
}
snd_pcm_close(handle);
test_res = get_test_results("pc_test");
ASSERT_EQ(test_res, 1);
for (i = 0; i < CH_NUM; i++)
free(chan_samples[i]);
free(chan_samples);
}
/*
* Here we are testing the custom ioctl definition inside the virtual driver. If it triggers
* successfully, the driver sets the content of 'ioctl_test' debugfs file to '1'.
*/
TEST_F(pcmtest, reset_ioctl) {
snd_pcm_t *handle;
int test_res;
struct pcmtest_test_params *params = &self->params;
snd_pcm_sw_params_alloca(&self->swparams);
snd_pcm_hw_params_alloca(&self->hwparams);
ASSERT_EQ(setup_handle(&handle, self->swparams, self->hwparams, params,
self->card, SND_PCM_STREAM_CAPTURE), 0);
snd_pcm_reset(handle);
test_res = get_test_results("ioctl_test");
ASSERT_EQ(test_res, 1);
snd_pcm_close(handle);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/alsa/test-pcmtest-driver.c |
// SPDX-License-Identifier: GPL-2.0
//
// kselftest for the ALSA PCM API
//
// Original author: Jaroslav Kysela <[email protected]>
// Copyright (c) 2022 Red Hat Inc.
// This test will iterate over all cards detected in the system, exercising
// every PCM device it can find. This may conflict with other system
// software if there is audio activity so is best run on a system with a
// minimal active userspace.
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <errno.h>
#include <assert.h>
#include <pthread.h>
#include "../kselftest.h"
#include "alsa-local.h"
typedef struct timespec timestamp_t;
struct card_data {
int card;
pthread_t thread;
struct card_data *next;
};
struct card_data *card_list = NULL;
struct pcm_data {
snd_pcm_t *handle;
int card;
int device;
int subdevice;
snd_pcm_stream_t stream;
snd_config_t *pcm_config;
struct pcm_data *next;
};
struct pcm_data *pcm_list = NULL;
int num_missing = 0;
struct pcm_data *pcm_missing = NULL;
snd_config_t *default_pcm_config;
/* Lock while reporting results since kselftest doesn't */
pthread_mutex_t results_lock = PTHREAD_MUTEX_INITIALIZER;
enum test_class {
TEST_CLASS_DEFAULT,
TEST_CLASS_SYSTEM,
};
void timestamp_now(timestamp_t *tstamp)
{
if (clock_gettime(CLOCK_MONOTONIC_RAW, tstamp))
ksft_exit_fail_msg("clock_get_time\n");
}
long long timestamp_diff_ms(timestamp_t *tstamp)
{
timestamp_t now, diff;
timestamp_now(&now);
if (tstamp->tv_nsec > now.tv_nsec) {
diff.tv_sec = now.tv_sec - tstamp->tv_sec - 1;
diff.tv_nsec = (now.tv_nsec + 1000000000L) - tstamp->tv_nsec;
} else {
diff.tv_sec = now.tv_sec - tstamp->tv_sec;
diff.tv_nsec = now.tv_nsec - tstamp->tv_nsec;
}
return (diff.tv_sec * 1000) + ((diff.tv_nsec + 500000L) / 1000000L);
}
static long device_from_id(snd_config_t *node)
{
const char *id;
char *end;
long v;
if (snd_config_get_id(node, &id))
ksft_exit_fail_msg("snd_config_get_id\n");
errno = 0;
v = strtol(id, &end, 10);
if (errno || *end)
return -1;
return v;
}
static void missing_device(int card, int device, int subdevice, snd_pcm_stream_t stream)
{
struct pcm_data *pcm_data;
for (pcm_data = pcm_list; pcm_data != NULL; pcm_data = pcm_data->next) {
if (pcm_data->card != card)
continue;
if (pcm_data->device != device)
continue;
if (pcm_data->subdevice != subdevice)
continue;
if (pcm_data->stream != stream)
continue;
return;
}
pcm_data = calloc(1, sizeof(*pcm_data));
if (!pcm_data)
ksft_exit_fail_msg("Out of memory\n");
pcm_data->card = card;
pcm_data->device = device;
pcm_data->subdevice = subdevice;
pcm_data->stream = stream;
pcm_data->next = pcm_missing;
pcm_missing = pcm_data;
num_missing++;
}
static void missing_devices(int card, snd_config_t *card_config)
{
snd_config_t *pcm_config, *node1, *node2;
snd_config_iterator_t i1, i2, next1, next2;
int device, subdevice;
pcm_config = conf_get_subtree(card_config, "pcm", NULL);
if (!pcm_config)
return;
snd_config_for_each(i1, next1, pcm_config) {
node1 = snd_config_iterator_entry(i1);
device = device_from_id(node1);
if (device < 0)
continue;
if (snd_config_get_type(node1) != SND_CONFIG_TYPE_COMPOUND)
continue;
snd_config_for_each(i2, next2, node1) {
node2 = snd_config_iterator_entry(i2);
subdevice = device_from_id(node2);
if (subdevice < 0)
continue;
if (conf_get_subtree(node2, "PLAYBACK", NULL))
missing_device(card, device, subdevice, SND_PCM_STREAM_PLAYBACK);
if (conf_get_subtree(node2, "CAPTURE", NULL))
missing_device(card, device, subdevice, SND_PCM_STREAM_CAPTURE);
}
}
}
static void find_pcms(void)
{
char name[32], key[64];
char *card_name, *card_longname;
int card, dev, subdev, count, direction, err;
snd_pcm_stream_t stream;
struct pcm_data *pcm_data;
snd_ctl_t *handle;
snd_pcm_info_t *pcm_info;
snd_config_t *config, *card_config, *pcm_config;
struct card_data *card_data;
snd_pcm_info_alloca(&pcm_info);
card = -1;
if (snd_card_next(&card) < 0 || card < 0)
return;
config = get_alsalib_config();
while (card >= 0) {
sprintf(name, "hw:%d", card);
err = snd_ctl_open_lconf(&handle, name, 0, config);
if (err < 0) {
ksft_print_msg("Failed to get hctl for card %d: %s\n",
card, snd_strerror(err));
goto next_card;
}
err = snd_card_get_name(card, &card_name);
if (err != 0)
card_name = "Unknown";
err = snd_card_get_longname(card, &card_longname);
if (err != 0)
card_longname = "Unknown";
ksft_print_msg("Card %d - %s (%s)\n", card,
card_name, card_longname);
card_config = conf_by_card(card);
card_data = calloc(1, sizeof(*card_data));
if (!card_data)
ksft_exit_fail_msg("Out of memory\n");
card_data->card = card;
card_data->next = card_list;
card_list = card_data;
dev = -1;
while (1) {
if (snd_ctl_pcm_next_device(handle, &dev) < 0)
ksft_exit_fail_msg("snd_ctl_pcm_next_device\n");
if (dev < 0)
break;
for (direction = 0; direction < 2; direction++) {
stream = direction ? SND_PCM_STREAM_CAPTURE : SND_PCM_STREAM_PLAYBACK;
sprintf(key, "pcm.%d.%s", dev, snd_pcm_stream_name(stream));
pcm_config = conf_get_subtree(card_config, key, NULL);
if (conf_get_bool(card_config, key, "skip", false)) {
ksft_print_msg("skipping pcm %d.%d.%s\n", card, dev, snd_pcm_stream_name(stream));
continue;
}
snd_pcm_info_set_device(pcm_info, dev);
snd_pcm_info_set_subdevice(pcm_info, 0);
snd_pcm_info_set_stream(pcm_info, stream);
err = snd_ctl_pcm_info(handle, pcm_info);
if (err == -ENOENT)
continue;
if (err < 0)
ksft_exit_fail_msg("snd_ctl_pcm_info: %d:%d:%d\n",
dev, 0, stream);
count = snd_pcm_info_get_subdevices_count(pcm_info);
for (subdev = 0; subdev < count; subdev++) {
sprintf(key, "pcm.%d.%d.%s", dev, subdev, snd_pcm_stream_name(stream));
if (conf_get_bool(card_config, key, "skip", false)) {
ksft_print_msg("skipping pcm %d.%d.%d.%s\n", card, dev,
subdev, snd_pcm_stream_name(stream));
continue;
}
pcm_data = calloc(1, sizeof(*pcm_data));
if (!pcm_data)
ksft_exit_fail_msg("Out of memory\n");
pcm_data->card = card;
pcm_data->device = dev;
pcm_data->subdevice = subdev;
pcm_data->stream = stream;
pcm_data->pcm_config = conf_get_subtree(card_config, key, NULL);
pcm_data->next = pcm_list;
pcm_list = pcm_data;
}
}
}
/* check for missing devices */
missing_devices(card, card_config);
next_card:
snd_ctl_close(handle);
if (snd_card_next(&card) < 0) {
ksft_print_msg("snd_card_next");
break;
}
}
snd_config_delete(config);
}
static void test_pcm_time(struct pcm_data *data, enum test_class class,
const char *test_name, snd_config_t *pcm_cfg)
{
char name[64], msg[256];
const int duration_s = 2, margin_ms = 100;
const int duration_ms = duration_s * 1000;
const char *cs;
int i, err;
snd_pcm_t *handle = NULL;
snd_pcm_access_t access = SND_PCM_ACCESS_RW_INTERLEAVED;
snd_pcm_format_t format, old_format;
const char *alt_formats[8];
unsigned char *samples = NULL;
snd_pcm_sframes_t frames;
long long ms;
long rate, channels, period_size, buffer_size;
unsigned int rrate;
snd_pcm_uframes_t rperiod_size, rbuffer_size, start_threshold;
timestamp_t tstamp;
bool pass = false;
snd_pcm_hw_params_t *hw_params;
snd_pcm_sw_params_t *sw_params;
const char *test_class_name;
bool skip = true;
const char *desc;
switch (class) {
case TEST_CLASS_DEFAULT:
test_class_name = "default";
break;
case TEST_CLASS_SYSTEM:
test_class_name = "system";
break;
default:
ksft_exit_fail_msg("Unknown test class %d\n", class);
break;
}
desc = conf_get_string(pcm_cfg, "description", NULL, NULL);
if (desc)
ksft_print_msg("%s.%s.%d.%d.%d.%s - %s\n",
test_class_name, test_name,
data->card, data->device, data->subdevice,
snd_pcm_stream_name(data->stream),
desc);
snd_pcm_hw_params_alloca(&hw_params);
snd_pcm_sw_params_alloca(&sw_params);
cs = conf_get_string(pcm_cfg, "format", NULL, "S16_LE");
format = snd_pcm_format_value(cs);
if (format == SND_PCM_FORMAT_UNKNOWN)
ksft_exit_fail_msg("Wrong format '%s'\n", cs);
conf_get_string_array(pcm_cfg, "alt_formats", NULL,
alt_formats, ARRAY_SIZE(alt_formats), NULL);
rate = conf_get_long(pcm_cfg, "rate", NULL, 48000);
channels = conf_get_long(pcm_cfg, "channels", NULL, 2);
period_size = conf_get_long(pcm_cfg, "period_size", NULL, 4096);
buffer_size = conf_get_long(pcm_cfg, "buffer_size", NULL, 16384);
samples = malloc((rate * channels * snd_pcm_format_physical_width(format)) / 8);
if (!samples)
ksft_exit_fail_msg("Out of memory\n");
snd_pcm_format_set_silence(format, samples, rate * channels);
sprintf(name, "hw:%d,%d,%d", data->card, data->device, data->subdevice);
err = snd_pcm_open(&handle, name, data->stream, 0);
if (err < 0) {
snprintf(msg, sizeof(msg), "Failed to get pcm handle: %s", snd_strerror(err));
goto __close;
}
err = snd_pcm_hw_params_any(handle, hw_params);
if (err < 0) {
snprintf(msg, sizeof(msg), "snd_pcm_hw_params_any: %s", snd_strerror(err));
goto __close;
}
err = snd_pcm_hw_params_set_rate_resample(handle, hw_params, 0);
if (err < 0) {
snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_rate_resample: %s", snd_strerror(err));
goto __close;
}
err = snd_pcm_hw_params_set_access(handle, hw_params, access);
if (err < 0) {
snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_access %s: %s",
snd_pcm_access_name(access), snd_strerror(err));
goto __close;
}
i = -1;
__format:
err = snd_pcm_hw_params_set_format(handle, hw_params, format);
if (err < 0) {
i++;
if (i < ARRAY_SIZE(alt_formats) && alt_formats[i]) {
old_format = format;
format = snd_pcm_format_value(alt_formats[i]);
if (format != SND_PCM_FORMAT_UNKNOWN) {
ksft_print_msg("%s.%d.%d.%d.%s.%s format %s -> %s\n",
test_name,
data->card, data->device, data->subdevice,
snd_pcm_stream_name(data->stream),
snd_pcm_access_name(access),
snd_pcm_format_name(old_format),
snd_pcm_format_name(format));
samples = realloc(samples, (rate * channels *
snd_pcm_format_physical_width(format)) / 8);
if (!samples)
ksft_exit_fail_msg("Out of memory\n");
snd_pcm_format_set_silence(format, samples, rate * channels);
goto __format;
}
}
snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_format %s: %s",
snd_pcm_format_name(format), snd_strerror(err));
goto __close;
}
err = snd_pcm_hw_params_set_channels(handle, hw_params, channels);
if (err < 0) {
snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_channels %ld: %s", channels, snd_strerror(err));
goto __close;
}
rrate = rate;
err = snd_pcm_hw_params_set_rate_near(handle, hw_params, &rrate, 0);
if (err < 0) {
snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_rate %ld: %s", rate, snd_strerror(err));
goto __close;
}
if (rrate != rate) {
snprintf(msg, sizeof(msg), "rate mismatch %ld != %d", rate, rrate);
goto __close;
}
rperiod_size = period_size;
err = snd_pcm_hw_params_set_period_size_near(handle, hw_params, &rperiod_size, 0);
if (err < 0) {
snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_period_size %ld: %s", period_size, snd_strerror(err));
goto __close;
}
rbuffer_size = buffer_size;
err = snd_pcm_hw_params_set_buffer_size_near(handle, hw_params, &rbuffer_size);
if (err < 0) {
snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_buffer_size %ld: %s", buffer_size, snd_strerror(err));
goto __close;
}
err = snd_pcm_hw_params(handle, hw_params);
if (err < 0) {
snprintf(msg, sizeof(msg), "snd_pcm_hw_params: %s", snd_strerror(err));
goto __close;
}
err = snd_pcm_sw_params_current(handle, sw_params);
if (err < 0) {
snprintf(msg, sizeof(msg), "snd_pcm_sw_params_current: %s", snd_strerror(err));
goto __close;
}
if (data->stream == SND_PCM_STREAM_PLAYBACK) {
start_threshold = (rbuffer_size / rperiod_size) * rperiod_size;
} else {
start_threshold = rperiod_size;
}
err = snd_pcm_sw_params_set_start_threshold(handle, sw_params, start_threshold);
if (err < 0) {
snprintf(msg, sizeof(msg), "snd_pcm_sw_params_set_start_threshold %ld: %s", (long)start_threshold, snd_strerror(err));
goto __close;
}
err = snd_pcm_sw_params_set_avail_min(handle, sw_params, rperiod_size);
if (err < 0) {
snprintf(msg, sizeof(msg), "snd_pcm_sw_params_set_avail_min %ld: %s", (long)rperiod_size, snd_strerror(err));
goto __close;
}
err = snd_pcm_sw_params(handle, sw_params);
if (err < 0) {
snprintf(msg, sizeof(msg), "snd_pcm_sw_params: %s", snd_strerror(err));
goto __close;
}
ksft_print_msg("%s.%s.%d.%d.%d.%s hw_params.%s.%s.%ld.%ld.%ld.%ld sw_params.%ld\n",
test_class_name, test_name,
data->card, data->device, data->subdevice,
snd_pcm_stream_name(data->stream),
snd_pcm_access_name(access),
snd_pcm_format_name(format),
(long)rate, (long)channels,
(long)rperiod_size, (long)rbuffer_size,
(long)start_threshold);
/* Set all the params, actually run the test */
skip = false;
timestamp_now(&tstamp);
for (i = 0; i < duration_s; i++) {
if (data->stream == SND_PCM_STREAM_PLAYBACK) {
frames = snd_pcm_writei(handle, samples, rate);
if (frames < 0) {
snprintf(msg, sizeof(msg),
"Write failed: expected %ld, wrote %li", rate, frames);
goto __close;
}
if (frames < rate) {
snprintf(msg, sizeof(msg),
"expected %ld, wrote %li", rate, frames);
goto __close;
}
} else {
frames = snd_pcm_readi(handle, samples, rate);
if (frames < 0) {
snprintf(msg, sizeof(msg),
"expected %ld, wrote %li", rate, frames);
goto __close;
}
if (frames < rate) {
snprintf(msg, sizeof(msg),
"expected %ld, wrote %li", rate, frames);
goto __close;
}
}
}
snd_pcm_drain(handle);
ms = timestamp_diff_ms(&tstamp);
if (ms < duration_ms - margin_ms || ms > duration_ms + margin_ms) {
snprintf(msg, sizeof(msg), "time mismatch: expected %dms got %lld", duration_ms, ms);
goto __close;
}
msg[0] = '\0';
pass = true;
__close:
pthread_mutex_lock(&results_lock);
switch (class) {
case TEST_CLASS_SYSTEM:
test_class_name = "system";
/*
* Anything specified as specific to this system
* should always be supported.
*/
ksft_test_result(!skip, "%s.%s.%d.%d.%d.%s.params\n",
test_class_name, test_name,
data->card, data->device, data->subdevice,
snd_pcm_stream_name(data->stream));
break;
default:
break;
}
if (!skip)
ksft_test_result(pass, "%s.%s.%d.%d.%d.%s\n",
test_class_name, test_name,
data->card, data->device, data->subdevice,
snd_pcm_stream_name(data->stream));
else
ksft_test_result_skip("%s.%s.%d.%d.%d.%s\n",
test_class_name, test_name,
data->card, data->device, data->subdevice,
snd_pcm_stream_name(data->stream));
if (msg[0])
ksft_print_msg("%s\n", msg);
pthread_mutex_unlock(&results_lock);
free(samples);
if (handle)
snd_pcm_close(handle);
}
void run_time_tests(struct pcm_data *pcm, enum test_class class,
snd_config_t *cfg)
{
const char *test_name, *test_type;
snd_config_t *pcm_cfg;
snd_config_iterator_t i, next;
if (!cfg)
return;
cfg = conf_get_subtree(cfg, "test", NULL);
if (cfg == NULL)
return;
snd_config_for_each(i, next, cfg) {
pcm_cfg = snd_config_iterator_entry(i);
if (snd_config_get_id(pcm_cfg, &test_name) < 0)
ksft_exit_fail_msg("snd_config_get_id\n");
test_type = conf_get_string(pcm_cfg, "type", NULL, "time");
if (strcmp(test_type, "time") == 0)
test_pcm_time(pcm, class, test_name, pcm_cfg);
else
ksft_exit_fail_msg("unknown test type '%s'\n", test_type);
}
}
void *card_thread(void *data)
{
struct card_data *card = data;
struct pcm_data *pcm;
for (pcm = pcm_list; pcm != NULL; pcm = pcm->next) {
if (pcm->card != card->card)
continue;
run_time_tests(pcm, TEST_CLASS_DEFAULT, default_pcm_config);
run_time_tests(pcm, TEST_CLASS_SYSTEM, pcm->pcm_config);
}
return 0;
}
int main(void)
{
struct card_data *card;
struct pcm_data *pcm;
snd_config_t *global_config, *cfg;
int num_pcm_tests = 0, num_tests, num_std_pcm_tests;
int ret;
void *thread_ret;
ksft_print_header();
global_config = conf_load_from_file("pcm-test.conf");
default_pcm_config = conf_get_subtree(global_config, "pcm", NULL);
if (default_pcm_config == NULL)
ksft_exit_fail_msg("default pcm test configuration (pcm compound) is missing\n");
conf_load();
find_pcms();
num_std_pcm_tests = conf_get_count(default_pcm_config, "test", NULL);
for (pcm = pcm_list; pcm != NULL; pcm = pcm->next) {
num_pcm_tests += num_std_pcm_tests;
cfg = pcm->pcm_config;
if (cfg == NULL)
continue;
/* Setting params is reported as a separate test */
num_tests = conf_get_count(cfg, "test", NULL) * 2;
if (num_tests > 0)
num_pcm_tests += num_tests;
}
ksft_set_plan(num_missing + num_pcm_tests);
for (pcm = pcm_missing; pcm != NULL; pcm = pcm->next) {
ksft_test_result(false, "test.missing.%d.%d.%d.%s\n",
pcm->card, pcm->device, pcm->subdevice,
snd_pcm_stream_name(pcm->stream));
}
for (card = card_list; card != NULL; card = card->next) {
ret = pthread_create(&card->thread, NULL, card_thread, card);
if (ret != 0) {
ksft_exit_fail_msg("Failed to create card %d thread: %d (%s)\n",
card->card, ret,
strerror(errno));
}
}
for (card = card_list; card != NULL; card = card->next) {
ret = pthread_join(card->thread, &thread_ret);
if (ret != 0) {
ksft_exit_fail_msg("Failed to join card %d thread: %d (%s)\n",
card->card, ret,
strerror(errno));
}
}
snd_config_delete(global_config);
conf_free();
ksft_exit_pass();
return 0;
}
| linux-master | tools/testing/selftests/alsa/pcm-test.c |
// SPDX-License-Identifier: GPL-2.0
//
// kselftest for the ALSA mixer API
//
// Original author: Mark Brown <[email protected]>
// Copyright (c) 2021-2 Arm Limited
// This test will iterate over all cards detected in the system, exercising
// every mixer control it can find. This may conflict with other system
// software if there is audio activity so is best run on a system with a
// minimal active userspace.
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <limits.h>
#include <string.h>
#include <getopt.h>
#include <stdarg.h>
#include <ctype.h>
#include <math.h>
#include <errno.h>
#include <assert.h>
#include <alsa/asoundlib.h>
#include <poll.h>
#include <stdint.h>
#include "../kselftest.h"
#include "alsa-local.h"
#define TESTS_PER_CONTROL 7
struct card_data {
snd_ctl_t *handle;
int card;
struct pollfd pollfd;
int num_ctls;
snd_ctl_elem_list_t *ctls;
struct card_data *next;
};
struct ctl_data {
const char *name;
snd_ctl_elem_id_t *id;
snd_ctl_elem_info_t *info;
snd_ctl_elem_value_t *def_val;
int elem;
int event_missing;
int event_spurious;
struct card_data *card;
struct ctl_data *next;
};
int num_cards = 0;
int num_controls = 0;
struct card_data *card_list = NULL;
struct ctl_data *ctl_list = NULL;
static void find_controls(void)
{
char name[32];
int card, ctl, err;
struct card_data *card_data;
struct ctl_data *ctl_data;
snd_config_t *config;
char *card_name, *card_longname;
card = -1;
if (snd_card_next(&card) < 0 || card < 0)
return;
config = get_alsalib_config();
while (card >= 0) {
sprintf(name, "hw:%d", card);
card_data = malloc(sizeof(*card_data));
if (!card_data)
ksft_exit_fail_msg("Out of memory\n");
err = snd_ctl_open_lconf(&card_data->handle, name, 0, config);
if (err < 0) {
ksft_print_msg("Failed to get hctl for card %d: %s\n",
card, snd_strerror(err));
goto next_card;
}
err = snd_card_get_name(card, &card_name);
if (err != 0)
card_name = "Unknown";
err = snd_card_get_longname(card, &card_longname);
if (err != 0)
card_longname = "Unknown";
ksft_print_msg("Card %d - %s (%s)\n", card,
card_name, card_longname);
/* Count controls */
snd_ctl_elem_list_malloc(&card_data->ctls);
snd_ctl_elem_list(card_data->handle, card_data->ctls);
card_data->num_ctls = snd_ctl_elem_list_get_count(card_data->ctls);
/* Enumerate control information */
snd_ctl_elem_list_alloc_space(card_data->ctls, card_data->num_ctls);
snd_ctl_elem_list(card_data->handle, card_data->ctls);
card_data->card = num_cards++;
card_data->next = card_list;
card_list = card_data;
num_controls += card_data->num_ctls;
for (ctl = 0; ctl < card_data->num_ctls; ctl++) {
ctl_data = malloc(sizeof(*ctl_data));
if (!ctl_data)
ksft_exit_fail_msg("Out of memory\n");
memset(ctl_data, 0, sizeof(*ctl_data));
ctl_data->card = card_data;
ctl_data->elem = ctl;
ctl_data->name = snd_ctl_elem_list_get_name(card_data->ctls,
ctl);
err = snd_ctl_elem_id_malloc(&ctl_data->id);
if (err < 0)
ksft_exit_fail_msg("Out of memory\n");
err = snd_ctl_elem_info_malloc(&ctl_data->info);
if (err < 0)
ksft_exit_fail_msg("Out of memory\n");
err = snd_ctl_elem_value_malloc(&ctl_data->def_val);
if (err < 0)
ksft_exit_fail_msg("Out of memory\n");
snd_ctl_elem_list_get_id(card_data->ctls, ctl,
ctl_data->id);
snd_ctl_elem_info_set_id(ctl_data->info, ctl_data->id);
err = snd_ctl_elem_info(card_data->handle,
ctl_data->info);
if (err < 0) {
ksft_print_msg("%s getting info for %d\n",
snd_strerror(err),
ctl_data->name);
}
snd_ctl_elem_value_set_id(ctl_data->def_val,
ctl_data->id);
ctl_data->next = ctl_list;
ctl_list = ctl_data;
}
/* Set up for events */
err = snd_ctl_subscribe_events(card_data->handle, true);
if (err < 0) {
ksft_exit_fail_msg("snd_ctl_subscribe_events() failed for card %d: %d\n",
card, err);
}
err = snd_ctl_poll_descriptors_count(card_data->handle);
if (err != 1) {
ksft_exit_fail_msg("Unexpected descriptor count %d for card %d\n",
err, card);
}
err = snd_ctl_poll_descriptors(card_data->handle,
&card_data->pollfd, 1);
if (err != 1) {
ksft_exit_fail_msg("snd_ctl_poll_descriptors() failed for %d\n",
card, err);
}
next_card:
if (snd_card_next(&card) < 0) {
ksft_print_msg("snd_card_next");
break;
}
}
snd_config_delete(config);
}
/*
* Block for up to timeout ms for an event, returns a negative value
* on error, 0 for no event and 1 for an event.
*/
static int wait_for_event(struct ctl_data *ctl, int timeout)
{
unsigned short revents;
snd_ctl_event_t *event;
int err;
unsigned int mask = 0;
unsigned int ev_id;
snd_ctl_event_alloca(&event);
do {
err = poll(&(ctl->card->pollfd), 1, timeout);
if (err < 0) {
ksft_print_msg("poll() failed for %s: %s (%d)\n",
ctl->name, strerror(errno), errno);
return -1;
}
/* Timeout */
if (err == 0)
return 0;
err = snd_ctl_poll_descriptors_revents(ctl->card->handle,
&(ctl->card->pollfd),
1, &revents);
if (err < 0) {
ksft_print_msg("snd_ctl_poll_descriptors_revents() failed for %s: %d\n",
ctl->name, err);
return err;
}
if (revents & POLLERR) {
ksft_print_msg("snd_ctl_poll_descriptors_revents() reported POLLERR for %s\n",
ctl->name);
return -1;
}
/* No read events */
if (!(revents & POLLIN)) {
ksft_print_msg("No POLLIN\n");
continue;
}
err = snd_ctl_read(ctl->card->handle, event);
if (err < 0) {
ksft_print_msg("snd_ctl_read() failed for %s: %d\n",
ctl->name, err);
return err;
}
if (snd_ctl_event_get_type(event) != SND_CTL_EVENT_ELEM)
continue;
/* The ID returned from the event is 1 less than numid */
mask = snd_ctl_event_elem_get_mask(event);
ev_id = snd_ctl_event_elem_get_numid(event);
if (ev_id != snd_ctl_elem_info_get_numid(ctl->info)) {
ksft_print_msg("Event for unexpected ctl %s\n",
snd_ctl_event_elem_get_name(event));
continue;
}
if ((mask & SND_CTL_EVENT_MASK_REMOVE) == SND_CTL_EVENT_MASK_REMOVE) {
ksft_print_msg("Removal event for %s\n",
ctl->name);
return -1;
}
} while ((mask & SND_CTL_EVENT_MASK_VALUE) != SND_CTL_EVENT_MASK_VALUE);
return 1;
}
static bool ctl_value_index_valid(struct ctl_data *ctl,
snd_ctl_elem_value_t *val,
int index)
{
long int_val;
long long int64_val;
switch (snd_ctl_elem_info_get_type(ctl->info)) {
case SND_CTL_ELEM_TYPE_NONE:
ksft_print_msg("%s.%d Invalid control type NONE\n",
ctl->name, index);
return false;
case SND_CTL_ELEM_TYPE_BOOLEAN:
int_val = snd_ctl_elem_value_get_boolean(val, index);
switch (int_val) {
case 0:
case 1:
break;
default:
ksft_print_msg("%s.%d Invalid boolean value %ld\n",
ctl->name, index, int_val);
return false;
}
break;
case SND_CTL_ELEM_TYPE_INTEGER:
int_val = snd_ctl_elem_value_get_integer(val, index);
if (int_val < snd_ctl_elem_info_get_min(ctl->info)) {
ksft_print_msg("%s.%d value %ld less than minimum %ld\n",
ctl->name, index, int_val,
snd_ctl_elem_info_get_min(ctl->info));
return false;
}
if (int_val > snd_ctl_elem_info_get_max(ctl->info)) {
ksft_print_msg("%s.%d value %ld more than maximum %ld\n",
ctl->name, index, int_val,
snd_ctl_elem_info_get_max(ctl->info));
return false;
}
/* Only check step size if there is one and we're in bounds */
if (snd_ctl_elem_info_get_step(ctl->info) &&
(int_val - snd_ctl_elem_info_get_min(ctl->info) %
snd_ctl_elem_info_get_step(ctl->info))) {
ksft_print_msg("%s.%d value %ld invalid for step %ld minimum %ld\n",
ctl->name, index, int_val,
snd_ctl_elem_info_get_step(ctl->info),
snd_ctl_elem_info_get_min(ctl->info));
return false;
}
break;
case SND_CTL_ELEM_TYPE_INTEGER64:
int64_val = snd_ctl_elem_value_get_integer64(val, index);
if (int64_val < snd_ctl_elem_info_get_min64(ctl->info)) {
ksft_print_msg("%s.%d value %lld less than minimum %lld\n",
ctl->name, index, int64_val,
snd_ctl_elem_info_get_min64(ctl->info));
return false;
}
if (int64_val > snd_ctl_elem_info_get_max64(ctl->info)) {
ksft_print_msg("%s.%d value %lld more than maximum %lld\n",
ctl->name, index, int64_val,
snd_ctl_elem_info_get_max(ctl->info));
return false;
}
/* Only check step size if there is one and we're in bounds */
if (snd_ctl_elem_info_get_step64(ctl->info) &&
(int64_val - snd_ctl_elem_info_get_min64(ctl->info)) %
snd_ctl_elem_info_get_step64(ctl->info)) {
ksft_print_msg("%s.%d value %lld invalid for step %lld minimum %lld\n",
ctl->name, index, int64_val,
snd_ctl_elem_info_get_step64(ctl->info),
snd_ctl_elem_info_get_min64(ctl->info));
return false;
}
break;
case SND_CTL_ELEM_TYPE_ENUMERATED:
int_val = snd_ctl_elem_value_get_enumerated(val, index);
if (int_val < 0) {
ksft_print_msg("%s.%d negative value %ld for enumeration\n",
ctl->name, index, int_val);
return false;
}
if (int_val >= snd_ctl_elem_info_get_items(ctl->info)) {
ksft_print_msg("%s.%d value %ld more than item count %ld\n",
ctl->name, index, int_val,
snd_ctl_elem_info_get_items(ctl->info));
return false;
}
break;
default:
/* No tests for other types */
break;
}
return true;
}
/*
* Check that the provided value meets the constraints for the
* provided control.
*/
static bool ctl_value_valid(struct ctl_data *ctl, snd_ctl_elem_value_t *val)
{
int i;
bool valid = true;
for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++)
if (!ctl_value_index_valid(ctl, val, i))
valid = false;
return valid;
}
/*
* Check that we can read the default value and it is valid. Write
* tests use the read value to restore the default.
*/
static void test_ctl_get_value(struct ctl_data *ctl)
{
int err;
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
ksft_print_msg("%s is inactive\n", ctl->name);
ksft_test_result_skip("get_value.%d.%d\n",
ctl->card->card, ctl->elem);
return;
}
/* Can't test reading on an unreadable control */
if (!snd_ctl_elem_info_is_readable(ctl->info)) {
ksft_print_msg("%s is not readable\n", ctl->name);
ksft_test_result_skip("get_value.%d.%d\n",
ctl->card->card, ctl->elem);
return;
}
err = snd_ctl_elem_read(ctl->card->handle, ctl->def_val);
if (err < 0) {
ksft_print_msg("snd_ctl_elem_read() failed: %s\n",
snd_strerror(err));
goto out;
}
if (!ctl_value_valid(ctl, ctl->def_val))
err = -EINVAL;
out:
ksft_test_result(err >= 0, "get_value.%d.%d\n",
ctl->card->card, ctl->elem);
}
static bool strend(const char *haystack, const char *needle)
{
size_t haystack_len = strlen(haystack);
size_t needle_len = strlen(needle);
if (needle_len > haystack_len)
return false;
return strcmp(haystack + haystack_len - needle_len, needle) == 0;
}
static void test_ctl_name(struct ctl_data *ctl)
{
bool name_ok = true;
ksft_print_msg("%d.%d %s\n", ctl->card->card, ctl->elem,
ctl->name);
/* Only boolean controls should end in Switch */
if (strend(ctl->name, " Switch")) {
if (snd_ctl_elem_info_get_type(ctl->info) != SND_CTL_ELEM_TYPE_BOOLEAN) {
ksft_print_msg("%d.%d %s ends in Switch but is not boolean\n",
ctl->card->card, ctl->elem, ctl->name);
name_ok = false;
}
}
/* Writeable boolean controls should end in Switch */
if (snd_ctl_elem_info_get_type(ctl->info) == SND_CTL_ELEM_TYPE_BOOLEAN &&
snd_ctl_elem_info_is_writable(ctl->info)) {
if (!strend(ctl->name, " Switch")) {
ksft_print_msg("%d.%d %s is a writeable boolean but not a Switch\n",
ctl->card->card, ctl->elem, ctl->name);
name_ok = false;
}
}
ksft_test_result(name_ok, "name.%d.%d\n",
ctl->card->card, ctl->elem);
}
static void show_values(struct ctl_data *ctl, snd_ctl_elem_value_t *orig_val,
snd_ctl_elem_value_t *read_val)
{
long long orig_int, read_int;
int i;
for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
switch (snd_ctl_elem_info_get_type(ctl->info)) {
case SND_CTL_ELEM_TYPE_BOOLEAN:
orig_int = snd_ctl_elem_value_get_boolean(orig_val, i);
read_int = snd_ctl_elem_value_get_boolean(read_val, i);
break;
case SND_CTL_ELEM_TYPE_INTEGER:
orig_int = snd_ctl_elem_value_get_integer(orig_val, i);
read_int = snd_ctl_elem_value_get_integer(read_val, i);
break;
case SND_CTL_ELEM_TYPE_INTEGER64:
orig_int = snd_ctl_elem_value_get_integer64(orig_val,
i);
read_int = snd_ctl_elem_value_get_integer64(read_val,
i);
break;
case SND_CTL_ELEM_TYPE_ENUMERATED:
orig_int = snd_ctl_elem_value_get_enumerated(orig_val,
i);
read_int = snd_ctl_elem_value_get_enumerated(read_val,
i);
break;
default:
return;
}
ksft_print_msg("%s.%d orig %lld read %lld, is_volatile %d\n",
ctl->name, i, orig_int, read_int,
snd_ctl_elem_info_is_volatile(ctl->info));
}
}
static bool show_mismatch(struct ctl_data *ctl, int index,
snd_ctl_elem_value_t *read_val,
snd_ctl_elem_value_t *expected_val)
{
long long expected_int, read_int;
/*
* We factor out the code to compare values representable as
* integers, ensure that check doesn't log otherwise.
*/
expected_int = 0;
read_int = 0;
switch (snd_ctl_elem_info_get_type(ctl->info)) {
case SND_CTL_ELEM_TYPE_BOOLEAN:
expected_int = snd_ctl_elem_value_get_boolean(expected_val,
index);
read_int = snd_ctl_elem_value_get_boolean(read_val, index);
break;
case SND_CTL_ELEM_TYPE_INTEGER:
expected_int = snd_ctl_elem_value_get_integer(expected_val,
index);
read_int = snd_ctl_elem_value_get_integer(read_val, index);
break;
case SND_CTL_ELEM_TYPE_INTEGER64:
expected_int = snd_ctl_elem_value_get_integer64(expected_val,
index);
read_int = snd_ctl_elem_value_get_integer64(read_val,
index);
break;
case SND_CTL_ELEM_TYPE_ENUMERATED:
expected_int = snd_ctl_elem_value_get_enumerated(expected_val,
index);
read_int = snd_ctl_elem_value_get_enumerated(read_val,
index);
break;
default:
break;
}
if (expected_int != read_int) {
/*
* NOTE: The volatile attribute means that the hardware
* can voluntarily change the state of control element
* independent of any operation by software.
*/
bool is_volatile = snd_ctl_elem_info_is_volatile(ctl->info);
ksft_print_msg("%s.%d expected %lld but read %lld, is_volatile %d\n",
ctl->name, index, expected_int, read_int, is_volatile);
return !is_volatile;
} else {
return false;
}
}
/*
* Write a value then if possible verify that we get the expected
* result. An optional expected value can be provided if we expect
* the write to fail, for verifying that invalid writes don't corrupt
* anything.
*/
static int write_and_verify(struct ctl_data *ctl,
snd_ctl_elem_value_t *write_val,
snd_ctl_elem_value_t *expected_val)
{
int err, i;
bool error_expected, mismatch_shown;
snd_ctl_elem_value_t *initial_val, *read_val, *w_val;
snd_ctl_elem_value_alloca(&initial_val);
snd_ctl_elem_value_alloca(&read_val);
snd_ctl_elem_value_alloca(&w_val);
/*
* We need to copy the write value since writing can modify
* the value which causes surprises, and allocate an expected
* value if we expect to read back what we wrote.
*/
snd_ctl_elem_value_copy(w_val, write_val);
if (expected_val) {
error_expected = true;
} else {
error_expected = false;
snd_ctl_elem_value_alloca(&expected_val);
snd_ctl_elem_value_copy(expected_val, write_val);
}
/* Store the value before we write */
if (snd_ctl_elem_info_is_readable(ctl->info)) {
snd_ctl_elem_value_set_id(initial_val, ctl->id);
err = snd_ctl_elem_read(ctl->card->handle, initial_val);
if (err < 0) {
ksft_print_msg("snd_ctl_elem_read() failed: %s\n",
snd_strerror(err));
return err;
}
}
/*
* Do the write, if we have an expected value ignore the error
* and carry on to validate the expected value.
*/
err = snd_ctl_elem_write(ctl->card->handle, w_val);
if (err < 0 && !error_expected) {
ksft_print_msg("snd_ctl_elem_write() failed: %s\n",
snd_strerror(err));
return err;
}
/* Can we do the verification part? */
if (!snd_ctl_elem_info_is_readable(ctl->info))
return err;
snd_ctl_elem_value_set_id(read_val, ctl->id);
err = snd_ctl_elem_read(ctl->card->handle, read_val);
if (err < 0) {
ksft_print_msg("snd_ctl_elem_read() failed: %s\n",
snd_strerror(err));
return err;
}
/*
* Check for an event if the value changed, or confirm that
* there was none if it didn't. We rely on the kernel
* generating the notification before it returns from the
* write, this is currently true, should that ever change this
* will most likely break and need updating.
*/
if (!snd_ctl_elem_info_is_volatile(ctl->info)) {
err = wait_for_event(ctl, 0);
if (snd_ctl_elem_value_compare(initial_val, read_val)) {
if (err < 1) {
ksft_print_msg("No event generated for %s\n",
ctl->name);
show_values(ctl, initial_val, read_val);
ctl->event_missing++;
}
} else {
if (err != 0) {
ksft_print_msg("Spurious event generated for %s\n",
ctl->name);
show_values(ctl, initial_val, read_val);
ctl->event_spurious++;
}
}
}
/*
* Use the libray to compare values, if there's a mismatch
* carry on and try to provide a more useful diagnostic than
* just "mismatch".
*/
if (!snd_ctl_elem_value_compare(expected_val, read_val))
return 0;
mismatch_shown = false;
for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++)
if (show_mismatch(ctl, i, read_val, expected_val))
mismatch_shown = true;
if (!mismatch_shown)
ksft_print_msg("%s read and written values differ\n",
ctl->name);
return -1;
}
/*
* Make sure we can write the default value back to the control, this
* should validate that at least some write works.
*/
static void test_ctl_write_default(struct ctl_data *ctl)
{
int err;
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
ksft_print_msg("%s is inactive\n", ctl->name);
ksft_test_result_skip("write_default.%d.%d\n",
ctl->card->card, ctl->elem);
return;
}
if (!snd_ctl_elem_info_is_writable(ctl->info)) {
ksft_print_msg("%s is not writeable\n", ctl->name);
ksft_test_result_skip("write_default.%d.%d\n",
ctl->card->card, ctl->elem);
return;
}
/* No idea what the default was for unreadable controls */
if (!snd_ctl_elem_info_is_readable(ctl->info)) {
ksft_print_msg("%s couldn't read default\n", ctl->name);
ksft_test_result_skip("write_default.%d.%d\n",
ctl->card->card, ctl->elem);
return;
}
err = write_and_verify(ctl, ctl->def_val, NULL);
ksft_test_result(err >= 0, "write_default.%d.%d\n",
ctl->card->card, ctl->elem);
}
static bool test_ctl_write_valid_boolean(struct ctl_data *ctl)
{
int err, i, j;
bool fail = false;
snd_ctl_elem_value_t *val;
snd_ctl_elem_value_alloca(&val);
snd_ctl_elem_value_set_id(val, ctl->id);
for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
for (j = 0; j < 2; j++) {
snd_ctl_elem_value_set_boolean(val, i, j);
err = write_and_verify(ctl, val, NULL);
if (err != 0)
fail = true;
}
}
return !fail;
}
static bool test_ctl_write_valid_integer(struct ctl_data *ctl)
{
int err;
int i;
long j, step;
bool fail = false;
snd_ctl_elem_value_t *val;
snd_ctl_elem_value_alloca(&val);
snd_ctl_elem_value_set_id(val, ctl->id);
step = snd_ctl_elem_info_get_step(ctl->info);
if (!step)
step = 1;
for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
for (j = snd_ctl_elem_info_get_min(ctl->info);
j <= snd_ctl_elem_info_get_max(ctl->info); j += step) {
snd_ctl_elem_value_set_integer(val, i, j);
err = write_and_verify(ctl, val, NULL);
if (err != 0)
fail = true;
}
}
return !fail;
}
static bool test_ctl_write_valid_integer64(struct ctl_data *ctl)
{
int err, i;
long long j, step;
bool fail = false;
snd_ctl_elem_value_t *val;
snd_ctl_elem_value_alloca(&val);
snd_ctl_elem_value_set_id(val, ctl->id);
step = snd_ctl_elem_info_get_step64(ctl->info);
if (!step)
step = 1;
for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
for (j = snd_ctl_elem_info_get_min64(ctl->info);
j <= snd_ctl_elem_info_get_max64(ctl->info); j += step) {
snd_ctl_elem_value_set_integer64(val, i, j);
err = write_and_verify(ctl, val, NULL);
if (err != 0)
fail = true;
}
}
return !fail;
}
static bool test_ctl_write_valid_enumerated(struct ctl_data *ctl)
{
int err, i, j;
bool fail = false;
snd_ctl_elem_value_t *val;
snd_ctl_elem_value_alloca(&val);
snd_ctl_elem_value_set_id(val, ctl->id);
for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
for (j = 0; j < snd_ctl_elem_info_get_items(ctl->info); j++) {
snd_ctl_elem_value_set_enumerated(val, i, j);
err = write_and_verify(ctl, val, NULL);
if (err != 0)
fail = true;
}
}
return !fail;
}
static void test_ctl_write_valid(struct ctl_data *ctl)
{
bool pass;
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
ksft_print_msg("%s is inactive\n", ctl->name);
ksft_test_result_skip("write_valid.%d.%d\n",
ctl->card->card, ctl->elem);
return;
}
if (!snd_ctl_elem_info_is_writable(ctl->info)) {
ksft_print_msg("%s is not writeable\n", ctl->name);
ksft_test_result_skip("write_valid.%d.%d\n",
ctl->card->card, ctl->elem);
return;
}
switch (snd_ctl_elem_info_get_type(ctl->info)) {
case SND_CTL_ELEM_TYPE_BOOLEAN:
pass = test_ctl_write_valid_boolean(ctl);
break;
case SND_CTL_ELEM_TYPE_INTEGER:
pass = test_ctl_write_valid_integer(ctl);
break;
case SND_CTL_ELEM_TYPE_INTEGER64:
pass = test_ctl_write_valid_integer64(ctl);
break;
case SND_CTL_ELEM_TYPE_ENUMERATED:
pass = test_ctl_write_valid_enumerated(ctl);
break;
default:
/* No tests for this yet */
ksft_test_result_skip("write_valid.%d.%d\n",
ctl->card->card, ctl->elem);
return;
}
/* Restore the default value to minimise disruption */
write_and_verify(ctl, ctl->def_val, NULL);
ksft_test_result(pass, "write_valid.%d.%d\n",
ctl->card->card, ctl->elem);
}
static bool test_ctl_write_invalid_value(struct ctl_data *ctl,
snd_ctl_elem_value_t *val)
{
int err;
/* Ideally this will fail... */
err = snd_ctl_elem_write(ctl->card->handle, val);
if (err < 0)
return false;
/* ...but some devices will clamp to an in range value */
err = snd_ctl_elem_read(ctl->card->handle, val);
if (err < 0) {
ksft_print_msg("%s failed to read: %s\n",
ctl->name, snd_strerror(err));
return true;
}
return !ctl_value_valid(ctl, val);
}
static bool test_ctl_write_invalid_boolean(struct ctl_data *ctl)
{
int i;
bool fail = false;
snd_ctl_elem_value_t *val;
snd_ctl_elem_value_alloca(&val);
for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
snd_ctl_elem_value_copy(val, ctl->def_val);
snd_ctl_elem_value_set_boolean(val, i, 2);
if (test_ctl_write_invalid_value(ctl, val))
fail = true;
}
return !fail;
}
static bool test_ctl_write_invalid_integer(struct ctl_data *ctl)
{
int i;
bool fail = false;
snd_ctl_elem_value_t *val;
snd_ctl_elem_value_alloca(&val);
for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
if (snd_ctl_elem_info_get_min(ctl->info) != LONG_MIN) {
/* Just under range */
snd_ctl_elem_value_copy(val, ctl->def_val);
snd_ctl_elem_value_set_integer(val, i,
snd_ctl_elem_info_get_min(ctl->info) - 1);
if (test_ctl_write_invalid_value(ctl, val))
fail = true;
/* Minimum representable value */
snd_ctl_elem_value_copy(val, ctl->def_val);
snd_ctl_elem_value_set_integer(val, i, LONG_MIN);
if (test_ctl_write_invalid_value(ctl, val))
fail = true;
}
if (snd_ctl_elem_info_get_max(ctl->info) != LONG_MAX) {
/* Just over range */
snd_ctl_elem_value_copy(val, ctl->def_val);
snd_ctl_elem_value_set_integer(val, i,
snd_ctl_elem_info_get_max(ctl->info) + 1);
if (test_ctl_write_invalid_value(ctl, val))
fail = true;
/* Maximum representable value */
snd_ctl_elem_value_copy(val, ctl->def_val);
snd_ctl_elem_value_set_integer(val, i, LONG_MAX);
if (test_ctl_write_invalid_value(ctl, val))
fail = true;
}
}
return !fail;
}
static bool test_ctl_write_invalid_integer64(struct ctl_data *ctl)
{
int i;
bool fail = false;
snd_ctl_elem_value_t *val;
snd_ctl_elem_value_alloca(&val);
for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
if (snd_ctl_elem_info_get_min64(ctl->info) != LLONG_MIN) {
/* Just under range */
snd_ctl_elem_value_copy(val, ctl->def_val);
snd_ctl_elem_value_set_integer64(val, i,
snd_ctl_elem_info_get_min64(ctl->info) - 1);
if (test_ctl_write_invalid_value(ctl, val))
fail = true;
/* Minimum representable value */
snd_ctl_elem_value_copy(val, ctl->def_val);
snd_ctl_elem_value_set_integer64(val, i, LLONG_MIN);
if (test_ctl_write_invalid_value(ctl, val))
fail = true;
}
if (snd_ctl_elem_info_get_max64(ctl->info) != LLONG_MAX) {
/* Just over range */
snd_ctl_elem_value_copy(val, ctl->def_val);
snd_ctl_elem_value_set_integer64(val, i,
snd_ctl_elem_info_get_max64(ctl->info) + 1);
if (test_ctl_write_invalid_value(ctl, val))
fail = true;
/* Maximum representable value */
snd_ctl_elem_value_copy(val, ctl->def_val);
snd_ctl_elem_value_set_integer64(val, i, LLONG_MAX);
if (test_ctl_write_invalid_value(ctl, val))
fail = true;
}
}
return !fail;
}
static bool test_ctl_write_invalid_enumerated(struct ctl_data *ctl)
{
int i;
bool fail = false;
snd_ctl_elem_value_t *val;
snd_ctl_elem_value_alloca(&val);
snd_ctl_elem_value_set_id(val, ctl->id);
for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
/* One beyond maximum */
snd_ctl_elem_value_copy(val, ctl->def_val);
snd_ctl_elem_value_set_enumerated(val, i,
snd_ctl_elem_info_get_items(ctl->info));
if (test_ctl_write_invalid_value(ctl, val))
fail = true;
/* Maximum representable value */
snd_ctl_elem_value_copy(val, ctl->def_val);
snd_ctl_elem_value_set_enumerated(val, i, UINT_MAX);
if (test_ctl_write_invalid_value(ctl, val))
fail = true;
}
return !fail;
}
static void test_ctl_write_invalid(struct ctl_data *ctl)
{
bool pass;
/* If the control is turned off let's be polite */
if (snd_ctl_elem_info_is_inactive(ctl->info)) {
ksft_print_msg("%s is inactive\n", ctl->name);
ksft_test_result_skip("write_invalid.%d.%d\n",
ctl->card->card, ctl->elem);
return;
}
if (!snd_ctl_elem_info_is_writable(ctl->info)) {
ksft_print_msg("%s is not writeable\n", ctl->name);
ksft_test_result_skip("write_invalid.%d.%d\n",
ctl->card->card, ctl->elem);
return;
}
switch (snd_ctl_elem_info_get_type(ctl->info)) {
case SND_CTL_ELEM_TYPE_BOOLEAN:
pass = test_ctl_write_invalid_boolean(ctl);
break;
case SND_CTL_ELEM_TYPE_INTEGER:
pass = test_ctl_write_invalid_integer(ctl);
break;
case SND_CTL_ELEM_TYPE_INTEGER64:
pass = test_ctl_write_invalid_integer64(ctl);
break;
case SND_CTL_ELEM_TYPE_ENUMERATED:
pass = test_ctl_write_invalid_enumerated(ctl);
break;
default:
/* No tests for this yet */
ksft_test_result_skip("write_invalid.%d.%d\n",
ctl->card->card, ctl->elem);
return;
}
/* Restore the default value to minimise disruption */
write_and_verify(ctl, ctl->def_val, NULL);
ksft_test_result(pass, "write_invalid.%d.%d\n",
ctl->card->card, ctl->elem);
}
static void test_ctl_event_missing(struct ctl_data *ctl)
{
ksft_test_result(!ctl->event_missing, "event_missing.%d.%d\n",
ctl->card->card, ctl->elem);
}
static void test_ctl_event_spurious(struct ctl_data *ctl)
{
ksft_test_result(!ctl->event_spurious, "event_spurious.%d.%d\n",
ctl->card->card, ctl->elem);
}
int main(void)
{
struct ctl_data *ctl;
ksft_print_header();
find_controls();
ksft_set_plan(num_controls * TESTS_PER_CONTROL);
for (ctl = ctl_list; ctl != NULL; ctl = ctl->next) {
/*
* Must test get_value() before we write anything, the
* test stores the default value for later cleanup.
*/
test_ctl_get_value(ctl);
test_ctl_name(ctl);
test_ctl_write_default(ctl);
test_ctl_write_valid(ctl);
test_ctl_write_invalid(ctl);
test_ctl_event_missing(ctl);
test_ctl_event_spurious(ctl);
}
ksft_exit_pass();
return 0;
}
| linux-master | tools/testing/selftests/alsa/mixer-test.c |
// SPDX-License-Identifier: GPL-2.0
//
// kselftest configuration helpers for the hw specific configuration
//
// Original author: Jaroslav Kysela <[email protected]>
// Copyright (c) 2022 Red Hat Inc.
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <errno.h>
#include <assert.h>
#include <dirent.h>
#include <regex.h>
#include <sys/stat.h>
#include "../kselftest.h"
#include "alsa-local.h"
#define SYSFS_ROOT "/sys"
struct card_data {
int card;
snd_config_t *config;
const char *filename;
struct card_data *next;
};
static struct card_data *conf_cards;
static const char *alsa_config =
"ctl.hw {\n"
" @args [ CARD ]\n"
" @args.CARD.type string\n"
" type hw\n"
" card $CARD\n"
"}\n"
"pcm.hw {\n"
" @args [ CARD DEV SUBDEV ]\n"
" @args.CARD.type string\n"
" @args.DEV.type integer\n"
" @args.SUBDEV.type integer\n"
" type hw\n"
" card $CARD\n"
" device $DEV\n"
" subdevice $SUBDEV\n"
"}\n"
;
#ifdef SND_LIB_VER
#if SND_LIB_VERSION >= SND_LIB_VER(1, 2, 6)
#define LIB_HAS_LOAD_STRING
#endif
#endif
#ifndef LIB_HAS_LOAD_STRING
static int snd_config_load_string(snd_config_t **config, const char *s,
size_t size)
{
snd_input_t *input;
snd_config_t *dst;
int err;
assert(config && s);
if (size == 0)
size = strlen(s);
err = snd_input_buffer_open(&input, s, size);
if (err < 0)
return err;
err = snd_config_top(&dst);
if (err < 0) {
snd_input_close(input);
return err;
}
err = snd_config_load(dst, input);
snd_input_close(input);
if (err < 0) {
snd_config_delete(dst);
return err;
}
*config = dst;
return 0;
}
#endif
snd_config_t *get_alsalib_config(void)
{
snd_config_t *config;
int err;
err = snd_config_load_string(&config, alsa_config, strlen(alsa_config));
if (err < 0) {
ksft_print_msg("Unable to parse custom alsa-lib configuration: %s\n",
snd_strerror(err));
ksft_exit_fail();
}
return config;
}
static struct card_data *conf_data_by_card(int card, bool msg)
{
struct card_data *conf;
for (conf = conf_cards; conf; conf = conf->next) {
if (conf->card == card) {
if (msg)
ksft_print_msg("using hw card config %s for card %d\n",
conf->filename, card);
return conf;
}
}
return NULL;
}
static int dump_config_tree(snd_config_t *top)
{
snd_output_t *out;
int err;
err = snd_output_stdio_attach(&out, stdout, 0);
if (err < 0)
ksft_exit_fail_msg("stdout attach\n");
if (snd_config_save(top, out))
ksft_exit_fail_msg("config save\n");
snd_output_close(out);
}
snd_config_t *conf_load_from_file(const char *filename)
{
snd_config_t *dst;
snd_input_t *input;
int err;
err = snd_input_stdio_open(&input, filename, "r");
if (err < 0)
ksft_exit_fail_msg("Unable to parse filename %s\n", filename);
err = snd_config_top(&dst);
if (err < 0)
ksft_exit_fail_msg("Out of memory\n");
err = snd_config_load(dst, input);
snd_input_close(input);
if (err < 0)
ksft_exit_fail_msg("Unable to parse filename %s\n", filename);
return dst;
}
static char *sysfs_get(const char *sysfs_root, const char *id)
{
char path[PATH_MAX], link[PATH_MAX + 1];
struct stat sb;
ssize_t len;
char *e;
int fd;
if (id[0] == '/')
id++;
snprintf(path, sizeof(path), "%s/%s", sysfs_root, id);
if (lstat(path, &sb) != 0)
return NULL;
if (S_ISLNK(sb.st_mode)) {
len = readlink(path, link, sizeof(link) - 1);
if (len <= 0) {
ksft_exit_fail_msg("sysfs: cannot read link '%s': %s\n",
path, strerror(errno));
return NULL;
}
link[len] = '\0';
e = strrchr(link, '/');
if (e)
return strdup(e + 1);
return NULL;
}
if (S_ISDIR(sb.st_mode))
return NULL;
if ((sb.st_mode & S_IRUSR) == 0)
return NULL;
fd = open(path, O_RDONLY);
if (fd < 0) {
if (errno == ENOENT)
return NULL;
ksft_exit_fail_msg("sysfs: open failed for '%s': %s\n",
path, strerror(errno));
}
len = read(fd, path, sizeof(path)-1);
close(fd);
if (len < 0)
ksft_exit_fail_msg("sysfs: unable to read value '%s': %s\n",
path, errno);
while (len > 0 && path[len-1] == '\n')
len--;
path[len] = '\0';
e = strdup(path);
if (e == NULL)
ksft_exit_fail_msg("Out of memory\n");
return e;
}
static bool sysfs_match(const char *sysfs_root, snd_config_t *config)
{
snd_config_t *node, *path_config, *regex_config;
snd_config_iterator_t i, next;
const char *path_string, *regex_string, *v;
regex_t re;
regmatch_t match[1];
int iter = 0, ret;
snd_config_for_each(i, next, config) {
node = snd_config_iterator_entry(i);
if (snd_config_search(node, "path", &path_config))
ksft_exit_fail_msg("Missing path field in the sysfs block\n");
if (snd_config_search(node, "regex", ®ex_config))
ksft_exit_fail_msg("Missing regex field in the sysfs block\n");
if (snd_config_get_string(path_config, &path_string))
ksft_exit_fail_msg("Path field in the sysfs block is not a string\n");
if (snd_config_get_string(regex_config, ®ex_string))
ksft_exit_fail_msg("Regex field in the sysfs block is not a string\n");
iter++;
v = sysfs_get(sysfs_root, path_string);
if (!v)
return false;
if (regcomp(&re, regex_string, REG_EXTENDED))
ksft_exit_fail_msg("Wrong regex '%s'\n", regex_string);
ret = regexec(&re, v, 1, match, 0);
regfree(&re);
if (ret)
return false;
}
return iter > 0;
}
static bool test_filename1(int card, const char *filename, const char *sysfs_card_root)
{
struct card_data *data, *data2;
snd_config_t *config, *sysfs_config, *card_config, *sysfs_card_config, *node;
snd_config_iterator_t i, next;
config = conf_load_from_file(filename);
if (snd_config_search(config, "sysfs", &sysfs_config) ||
snd_config_get_type(sysfs_config) != SND_CONFIG_TYPE_COMPOUND)
ksft_exit_fail_msg("Missing global sysfs block in filename %s\n", filename);
if (snd_config_search(config, "card", &card_config) ||
snd_config_get_type(card_config) != SND_CONFIG_TYPE_COMPOUND)
ksft_exit_fail_msg("Missing global card block in filename %s\n", filename);
if (!sysfs_match(SYSFS_ROOT, sysfs_config))
return false;
snd_config_for_each(i, next, card_config) {
node = snd_config_iterator_entry(i);
if (snd_config_search(node, "sysfs", &sysfs_card_config) ||
snd_config_get_type(sysfs_card_config) != SND_CONFIG_TYPE_COMPOUND)
ksft_exit_fail_msg("Missing card sysfs block in filename %s\n", filename);
if (!sysfs_match(sysfs_card_root, sysfs_card_config))
continue;
data = malloc(sizeof(*data));
if (!data)
ksft_exit_fail_msg("Out of memory\n");
data2 = conf_data_by_card(card, false);
if (data2)
ksft_exit_fail_msg("Duplicate card '%s' <-> '%s'\n", filename, data2->filename);
data->card = card;
data->filename = filename;
data->config = node;
data->next = conf_cards;
conf_cards = data;
return true;
}
return false;
}
static bool test_filename(const char *filename)
{
char fn[128];
int card;
for (card = 0; card < 32; card++) {
snprintf(fn, sizeof(fn), "%s/class/sound/card%d", SYSFS_ROOT, card);
if (access(fn, R_OK) == 0 && test_filename1(card, filename, fn))
return true;
}
return false;
}
static int filename_filter(const struct dirent *dirent)
{
size_t flen;
if (dirent == NULL)
return 0;
if (dirent->d_type == DT_DIR)
return 0;
flen = strlen(dirent->d_name);
if (flen <= 5)
return 0;
if (strncmp(&dirent->d_name[flen-5], ".conf", 5) == 0)
return 1;
return 0;
}
void conf_load(void)
{
const char *fn = "conf.d";
struct dirent **namelist;
int n, j;
n = scandir(fn, &namelist, filename_filter, alphasort);
if (n < 0)
ksft_exit_fail_msg("scandir: %s\n", strerror(errno));
for (j = 0; j < n; j++) {
size_t sl = strlen(fn) + strlen(namelist[j]->d_name) + 2;
char *filename = malloc(sl);
if (filename == NULL)
ksft_exit_fail_msg("Out of memory\n");
sprintf(filename, "%s/%s", fn, namelist[j]->d_name);
if (test_filename(filename))
filename = NULL;
free(filename);
free(namelist[j]);
}
free(namelist);
}
void conf_free(void)
{
struct card_data *conf;
while (conf_cards) {
conf = conf_cards;
conf_cards = conf->next;
snd_config_delete(conf->config);
}
}
snd_config_t *conf_by_card(int card)
{
struct card_data *conf;
conf = conf_data_by_card(card, true);
if (conf)
return conf->config;
return NULL;
}
static int conf_get_by_keys(snd_config_t *root, const char *key1,
const char *key2, snd_config_t **result)
{
int ret;
if (key1) {
ret = snd_config_search(root, key1, &root);
if (ret != -ENOENT && ret < 0)
return ret;
}
if (key2)
ret = snd_config_search(root, key2, &root);
if (ret >= 0)
*result = root;
return ret;
}
snd_config_t *conf_get_subtree(snd_config_t *root, const char *key1, const char *key2)
{
int ret;
if (!root)
return NULL;
ret = conf_get_by_keys(root, key1, key2, &root);
if (ret == -ENOENT)
return NULL;
if (ret < 0)
ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
return root;
}
int conf_get_count(snd_config_t *root, const char *key1, const char *key2)
{
snd_config_t *cfg;
snd_config_iterator_t i, next;
int count, ret;
if (!root)
return -1;
ret = conf_get_by_keys(root, key1, key2, &cfg);
if (ret == -ENOENT)
return -1;
if (ret < 0)
ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
if (snd_config_get_type(cfg) != SND_CONFIG_TYPE_COMPOUND)
ksft_exit_fail_msg("key '%s'.'%s' is not a compound\n", key1, key2);
count = 0;
snd_config_for_each(i, next, cfg)
count++;
return count;
}
const char *conf_get_string(snd_config_t *root, const char *key1, const char *key2, const char *def)
{
snd_config_t *cfg;
const char *s;
int ret;
if (!root)
return def;
ret = conf_get_by_keys(root, key1, key2, &cfg);
if (ret == -ENOENT)
return def;
if (ret < 0)
ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
if (snd_config_get_string(cfg, &s))
ksft_exit_fail_msg("key '%s'.'%s' is not a string\n", key1, key2);
return s;
}
long conf_get_long(snd_config_t *root, const char *key1, const char *key2, long def)
{
snd_config_t *cfg;
long l;
int ret;
if (!root)
return def;
ret = conf_get_by_keys(root, key1, key2, &cfg);
if (ret == -ENOENT)
return def;
if (ret < 0)
ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
if (snd_config_get_integer(cfg, &l))
ksft_exit_fail_msg("key '%s'.'%s' is not an integer\n", key1, key2);
return l;
}
int conf_get_bool(snd_config_t *root, const char *key1, const char *key2, int def)
{
snd_config_t *cfg;
int ret;
if (!root)
return def;
ret = conf_get_by_keys(root, key1, key2, &cfg);
if (ret == -ENOENT)
return def;
if (ret < 0)
ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
ret = snd_config_get_bool(cfg);
if (ret < 0)
ksft_exit_fail_msg("key '%s'.'%s' is not an bool\n", key1, key2);
return !!ret;
}
void conf_get_string_array(snd_config_t *root, const char *key1, const char *key2,
const char **array, int array_size, const char *def)
{
snd_config_t *cfg;
char buf[16];
int ret, index;
ret = conf_get_by_keys(root, key1, key2, &cfg);
if (ret == -ENOENT)
cfg = NULL;
else if (ret < 0)
ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
for (index = 0; index < array_size; index++) {
if (cfg == NULL) {
array[index] = def;
} else {
sprintf(buf, "%i", index);
array[index] = conf_get_string(cfg, buf, NULL, def);
}
}
}
| linux-master | tools/testing/selftests/alsa/conf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* video_device_test - Video Device Test
*
* Copyright (c) 2016 Shuah Khan <[email protected]>
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
*
*/
/*
* This file adds a test for Video Device. This test should not be included
* in the Kselftest run. This test should be run when hardware and driver
* that makes use of V4L2 API is present.
*
* This test opens user specified Video Device and calls video ioctls in a
* loop once every 10 seconds.
*
* Usage:
* sudo ./video_device_test -d /dev/videoX
*
* While test is running, remove the device or unbind the driver and
* ensure there are no use after free errors and other Oops in the
* dmesg.
* When possible, enable KaSan kernel config option for use-after-free
* error detection.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <time.h>
#include <linux/videodev2.h>
#define PRIORITY_MAX 4
int priority_test(int fd)
{
/* This test will try to update the priority associated with a file descriptor */
enum v4l2_priority old_priority, new_priority, priority_to_compare;
int ret;
int result = 0;
ret = ioctl(fd, VIDIOC_G_PRIORITY, &old_priority);
if (ret < 0) {
printf("Failed to get priority: %s\n", strerror(errno));
return -1;
}
new_priority = (old_priority + 1) % PRIORITY_MAX;
ret = ioctl(fd, VIDIOC_S_PRIORITY, &new_priority);
if (ret < 0) {
printf("Failed to set priority: %s\n", strerror(errno));
return -1;
}
ret = ioctl(fd, VIDIOC_G_PRIORITY, &priority_to_compare);
if (ret < 0) {
printf("Failed to get new priority: %s\n", strerror(errno));
result = -1;
goto cleanup;
}
if (priority_to_compare != new_priority) {
printf("Priority wasn't set - test failed\n");
result = -1;
}
cleanup:
ret = ioctl(fd, VIDIOC_S_PRIORITY, &old_priority);
if (ret < 0) {
printf("Failed to restore priority: %s\n", strerror(errno));
return -1;
}
return result;
}
int loop_test(int fd)
{
int count;
struct v4l2_tuner vtuner;
struct v4l2_capability vcap;
int ret;
/* Generate random number of interations */
srand((unsigned int) time(NULL));
count = rand();
printf("\nNote:\n"
"While test is running, remove the device or unbind\n"
"driver and ensure there are no use after free errors\n"
"and other Oops in the dmesg. When possible, enable KaSan\n"
"kernel config option for use-after-free error detection.\n\n");
while (count > 0) {
ret = ioctl(fd, VIDIOC_QUERYCAP, &vcap);
if (ret < 0)
printf("VIDIOC_QUERYCAP errno %s\n", strerror(errno));
else
printf("Video device driver %s\n", vcap.driver);
ret = ioctl(fd, VIDIOC_G_TUNER, &vtuner);
if (ret < 0)
printf("VIDIOC_G_TUNER, errno %s\n", strerror(errno));
else
printf("type %d rangelow %d rangehigh %d\n",
vtuner.type, vtuner.rangelow, vtuner.rangehigh);
sleep(10);
count--;
}
return 0;
}
int main(int argc, char **argv)
{
int opt;
char video_dev[256];
int fd;
int test_result;
if (argc < 2) {
printf("Usage: %s [-d </dev/videoX>]\n", argv[0]);
exit(-1);
}
/* Process arguments */
while ((opt = getopt(argc, argv, "d:")) != -1) {
switch (opt) {
case 'd':
strncpy(video_dev, optarg, sizeof(video_dev) - 1);
video_dev[sizeof(video_dev)-1] = '\0';
break;
default:
printf("Usage: %s [-d </dev/videoX>]\n", argv[0]);
exit(-1);
}
}
/* Open Video device and keep it open */
fd = open(video_dev, O_RDWR);
if (fd == -1) {
printf("Video Device open errno %s\n", strerror(errno));
exit(-1);
}
test_result = priority_test(fd);
if (!test_result)
printf("Priority test - PASSED\n");
else
printf("Priority test - FAILED\n");
loop_test(fd);
}
| linux-master | tools/testing/selftests/media_tests/video_device_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* media_device_open.c - Media Controller Device Open Test
*
* Copyright (c) 2016 Shuah Khan <[email protected]>
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
*
*/
/*
* This file adds a test for Media Controller API.
* This test should be run as root and should not be
* included in the Kselftest run. This test should be
* run when hardware and driver that makes use Media
* Controller API are present in the system.
*
* This test opens user specified Media Device and calls
* MEDIA_IOC_DEVICE_INFO ioctl, closes the file, and exits.
*
* Usage:
* sudo ./media_device_open -d /dev/mediaX
*
* Run this test is a loop and run bind/unbind on the driver.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <linux/media.h>
#include "../kselftest.h"
int main(int argc, char **argv)
{
int opt;
char media_device[256];
int count = 0;
struct media_device_info mdi;
int ret;
int fd;
if (argc < 2) {
printf("Usage: %s [-d </dev/mediaX>]\n", argv[0]);
exit(-1);
}
/* Process arguments */
while ((opt = getopt(argc, argv, "d:")) != -1) {
switch (opt) {
case 'd':
strncpy(media_device, optarg, sizeof(media_device) - 1);
media_device[sizeof(media_device)-1] = '\0';
break;
default:
printf("Usage: %s [-d </dev/mediaX>]\n", argv[0]);
exit(-1);
}
}
if (getuid() != 0)
ksft_exit_skip("Please run the test as root - Exiting.\n");
/* Open Media device and keep it open */
fd = open(media_device, O_RDWR);
if (fd == -1) {
printf("Media Device open errno %s\n", strerror(errno));
exit(-1);
}
ret = ioctl(fd, MEDIA_IOC_DEVICE_INFO, &mdi);
if (ret < 0)
printf("Media Device Info errno %s\n", strerror(errno));
else
printf("Media device model %s driver %s\n",
mdi.model, mdi.driver);
}
| linux-master | tools/testing/selftests/media_tests/media_device_open.c |
// SPDX-License-Identifier: GPL-2.0
/*
* media_device_test.c - Media Controller Device ioctl loop Test
*
* Copyright (c) 2016 Shuah Khan <[email protected]>
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
*
*/
/*
* This file adds a test for Media Controller API.
* This test should be run as root and should not be
* included in the Kselftest run. This test should be
* run when hardware and driver that makes use Media
* Controller API are present in the system.
*
* This test opens user specified Media Device and calls
* MEDIA_IOC_DEVICE_INFO ioctl in a loop once every 10
* seconds.
*
* Usage:
* sudo ./media_device_test -d /dev/mediaX
*
* While test is running, remove the device and
* ensure there are no use after free errors and
* other Oops in the dmesg. Enable KaSan kernel
* config option for use-after-free error detection.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <time.h>
#include <linux/media.h>
#include "../kselftest.h"
int main(int argc, char **argv)
{
int opt;
char media_device[256];
int count;
struct media_device_info mdi;
int ret;
int fd;
if (argc < 2) {
printf("Usage: %s [-d </dev/mediaX>]\n", argv[0]);
exit(-1);
}
/* Process arguments */
while ((opt = getopt(argc, argv, "d:")) != -1) {
switch (opt) {
case 'd':
strncpy(media_device, optarg, sizeof(media_device) - 1);
media_device[sizeof(media_device)-1] = '\0';
break;
default:
printf("Usage: %s [-d </dev/mediaX>]\n", argv[0]);
exit(-1);
}
}
if (getuid() != 0)
ksft_exit_skip("Please run the test as root - Exiting.\n");
/* Generate random number of interations */
srand((unsigned int) time(NULL));
count = rand();
/* Open Media device and keep it open */
fd = open(media_device, O_RDWR);
if (fd == -1) {
printf("Media Device open errno %s\n", strerror(errno));
exit(-1);
}
printf("\nNote:\n"
"While test is running, remove the device and\n"
"ensure there are no use after free errors and\n"
"other Oops in the dmesg. Enable KaSan kernel\n"
"config option for use-after-free error detection.\n\n");
printf("Running test for %d iterations\n", count);
while (count > 0) {
ret = ioctl(fd, MEDIA_IOC_DEVICE_INFO, &mdi);
if (ret < 0)
printf("Media Device Info errno %s\n", strerror(errno));
else
printf("Media device model %s driver %s - count %d\n",
mdi.model, mdi.driver, count);
sleep(10);
count--;
}
}
| linux-master | tools/testing/selftests/media_tests/media_device_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014 Sony Mobile Communications Inc.
*
* Selftest for runtime system size
*
* Prints the amount of RAM that the currently running system is using.
*
* This program tries to be as small as possible itself, to
* avoid perturbing the system memory utilization with its
* own execution. It also attempts to have as few dependencies
* on kernel features as possible.
*
* It should be statically linked, with startup libs avoided. It uses
* no library calls except the syscall() function for the following 3
* syscalls:
* sysinfo(), write(), and _exit()
*
* For output, it avoids printf (which in some C libraries
* has large external dependencies) by implementing it's own
* number output and print routines, and using __builtin_strlen()
*
* The test may crash if any of the above syscalls fails because in some
* libc implementations (e.g. the GNU C Library) errno is saved in
* thread-local storage, which does not get initialized due to avoiding
* startup libs.
*/
#include <sys/sysinfo.h>
#include <unistd.h>
#include <sys/syscall.h>
#define STDOUT_FILENO 1
static int print(const char *s)
{
size_t len = 0;
while (s[len] != '\0')
len++;
return syscall(SYS_write, STDOUT_FILENO, s, len);
}
static inline char *num_to_str(unsigned long num, char *buf, int len)
{
unsigned int digit;
/* put digits in buffer from back to front */
buf += len - 1;
*buf = 0;
do {
digit = num % 10;
*(--buf) = digit + '0';
num /= 10;
} while (num > 0);
return buf;
}
static int print_num(unsigned long num)
{
char num_buf[30];
return print(num_to_str(num, num_buf, sizeof(num_buf)));
}
static int print_k_value(const char *s, unsigned long num, unsigned long units)
{
unsigned long long temp;
int ccode;
print(s);
temp = num;
temp = (temp * units)/1024;
num = temp;
ccode = print_num(num);
print("\n");
return ccode;
}
/* this program has no main(), as startup libraries are not used */
void _start(void)
{
int ccode;
struct sysinfo info;
unsigned long used;
static const char *test_name = " get runtime memory use\n";
print("TAP version 13\n");
print("# Testing system size.\n");
ccode = syscall(SYS_sysinfo, &info);
if (ccode < 0) {
print("not ok 1");
print(test_name);
print(" ---\n reason: \"could not get sysinfo\"\n ...\n");
syscall(SYS_exit, ccode);
}
print("ok 1");
print(test_name);
/* ignore cache complexities for now */
used = info.totalram - info.freeram - info.bufferram;
print("# System runtime memory report (units in Kilobytes):\n");
print(" ---\n");
print_k_value(" Total: ", info.totalram, info.mem_unit);
print_k_value(" Free: ", info.freeram, info.mem_unit);
print_k_value(" Buffer: ", info.bufferram, info.mem_unit);
print_k_value(" In use: ", used, info.mem_unit);
print(" ...\n");
print("1..1\n");
syscall(SYS_exit, 0);
}
| linux-master | tools/testing/selftests/size/get_size.c |
/*
* sync stress test: parallelism
* Copyright 2015-2016 Collabora Ltd.
*
* Based on the implementation from the Android Open Source Project,
*
* Copyright 2012 Google, Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pthread.h>
#include "sync.h"
#include "sw_sync.h"
#include "synctest.h"
static struct {
int iterations;
int timeline;
int counter;
} test_data_two_threads;
static int test_stress_two_threads_shared_timeline_thread(void *d)
{
int thread_id = (long)d;
int timeline = test_data_two_threads.timeline;
int iterations = test_data_two_threads.iterations;
int fence, valid, ret, i;
for (i = 0; i < iterations; i++) {
fence = sw_sync_fence_create(timeline, "fence",
i * 2 + thread_id);
valid = sw_sync_fence_is_valid(fence);
ASSERT(valid, "Failure allocating fence\n");
/* Wait on the prior thread to complete */
ret = sync_wait(fence, -1);
ASSERT(ret > 0, "Problem occurred on prior thread\n");
/*
* Confirm the previous thread's writes are visible
* and then increment
*/
ASSERT(test_data_two_threads.counter == i * 2 + thread_id,
"Counter got damaged!\n");
test_data_two_threads.counter++;
/* Kick off the other thread */
ret = sw_sync_timeline_inc(timeline, 1);
ASSERT(ret == 0, "Advancing timeline failed\n");
sw_sync_fence_destroy(fence);
}
return 0;
}
int test_stress_two_threads_shared_timeline(void)
{
pthread_t a, b;
int valid;
int timeline = sw_sync_timeline_create();
valid = sw_sync_timeline_is_valid(timeline);
ASSERT(valid, "Failure allocating timeline\n");
test_data_two_threads.iterations = 1 << 16;
test_data_two_threads.counter = 0;
test_data_two_threads.timeline = timeline;
/*
* Use a single timeline to synchronize two threads
* hammmering on the same counter.
*/
pthread_create(&a, NULL, (void *(*)(void *))
test_stress_two_threads_shared_timeline_thread,
(void *)0);
pthread_create(&b, NULL, (void *(*)(void *))
test_stress_two_threads_shared_timeline_thread,
(void *)1);
pthread_join(a, NULL);
pthread_join(b, NULL);
/* make sure the threads did not trample on one another */
ASSERT(test_data_two_threads.counter ==
test_data_two_threads.iterations * 2,
"Counter has unexpected value\n");
sw_sync_timeline_destroy(timeline);
return 0;
}
| linux-master | tools/testing/selftests/sync/sync_stress_parallelism.c |
/*
* sync allocation tests
* Copyright 2015-2016 Collabora Ltd.
*
* Based on the implementation from the Android Open Source Project,
*
* Copyright 2012 Google, Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "sync.h"
#include "sw_sync.h"
#include "synctest.h"
int test_alloc_timeline(void)
{
int timeline, valid;
timeline = sw_sync_timeline_create();
valid = sw_sync_timeline_is_valid(timeline);
ASSERT(valid, "Failure allocating timeline\n");
sw_sync_timeline_destroy(timeline);
return 0;
}
int test_alloc_fence(void)
{
int timeline, fence, valid;
timeline = sw_sync_timeline_create();
valid = sw_sync_timeline_is_valid(timeline);
ASSERT(valid, "Failure allocating timeline\n");
fence = sw_sync_fence_create(timeline, "allocFence", 1);
valid = sw_sync_fence_is_valid(fence);
ASSERT(valid, "Failure allocating fence\n");
sw_sync_fence_destroy(fence);
sw_sync_timeline_destroy(timeline);
return 0;
}
int test_alloc_fence_negative(void)
{
int fence, timeline;
timeline = sw_sync_timeline_create();
ASSERT(timeline > 0, "Failure allocating timeline\n");
fence = sw_sync_fence_create(-1, "fence", 1);
ASSERT(fence < 0, "Success allocating negative fence\n");
sw_sync_fence_destroy(fence);
sw_sync_timeline_destroy(timeline);
return 0;
}
| linux-master | tools/testing/selftests/sync/sync_alloc.c |
/*
* sync stress test: merging
* Copyright 2015-2016 Collabora Ltd.
*
* Based on the implementation from the Android Open Source Project,
*
* Copyright 2012 Google, Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "sync.h"
#include "sw_sync.h"
#include "synctest.h"
int test_merge_stress_random_merge(void)
{
int i, size, ret;
int timeline_count = 32;
int merge_count = 1024 * 32;
int timelines[timeline_count];
int fence_map[timeline_count];
int fence, tmpfence, merged, valid;
int timeline, timeline_offset, sync_point;
srand(time(NULL));
for (i = 0; i < timeline_count; i++)
timelines[i] = sw_sync_timeline_create();
fence = sw_sync_fence_create(timelines[0], "fence", 0);
valid = sw_sync_fence_is_valid(fence);
ASSERT(valid, "Failure creating fence\n");
memset(fence_map, -1, sizeof(fence_map));
fence_map[0] = 0;
/*
* Randomly create sync_points out of a fixed set of timelines,
* and merge them together
*/
for (i = 0; i < merge_count; i++) {
/* Generate sync_point. */
timeline_offset = rand() % timeline_count;
timeline = timelines[timeline_offset];
sync_point = rand();
/* Keep track of the latest sync_point in each timeline. */
if (fence_map[timeline_offset] == -1)
fence_map[timeline_offset] = sync_point;
else if (fence_map[timeline_offset] < sync_point)
fence_map[timeline_offset] = sync_point;
/* Merge */
tmpfence = sw_sync_fence_create(timeline, "fence", sync_point);
merged = sync_merge("merge", tmpfence, fence);
sw_sync_fence_destroy(tmpfence);
sw_sync_fence_destroy(fence);
fence = merged;
valid = sw_sync_fence_is_valid(merged);
ASSERT(valid, "Failure creating fence i\n");
}
size = 0;
for (i = 0; i < timeline_count; i++)
if (fence_map[i] != -1)
size++;
/* Confirm our map matches the fence. */
ASSERT(sync_fence_size(fence) == size,
"Quantity of elements not matching\n");
/* Trigger the merged fence */
for (i = 0; i < timeline_count; i++) {
if (fence_map[i] != -1) {
ret = sync_wait(fence, 0);
ASSERT(ret == 0,
"Failure waiting on fence until timeout\n");
/* Increment the timeline to the last sync_point */
sw_sync_timeline_inc(timelines[i], fence_map[i]);
}
}
/* Check that the fence is triggered. */
ret = sync_wait(fence, 0);
ASSERT(ret > 0, "Failure triggering fence\n");
sw_sync_fence_destroy(fence);
for (i = 0; i < timeline_count; i++)
sw_sync_timeline_destroy(timelines[i]);
return 0;
}
| linux-master | tools/testing/selftests/sync/sync_stress_merge.c |
/*
* sync stress test: producer/consumer
* Copyright 2015-2016 Collabora Ltd.
*
* Based on the implementation from the Android Open Source Project,
*
* Copyright 2012 Google, Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pthread.h>
#include "sync.h"
#include "sw_sync.h"
#include "synctest.h"
/* IMPORTANT NOTE: if you see this test failing on your system, it may be
* due to a shortage of file descriptors. Please ensure your system has
* a sensible limit for this test to finish correctly.
*/
/* Returns 1 on error, 0 on success */
static int busy_wait_on_fence(int fence)
{
int error, active;
do {
error = sync_fence_count_with_status(fence, FENCE_STATUS_ERROR);
ASSERT(error == 0, "Error occurred on fence\n");
active = sync_fence_count_with_status(fence,
FENCE_STATUS_ACTIVE);
} while (active);
return 0;
}
static struct {
int iterations;
int threads;
int counter;
int consumer_timeline;
int *producer_timelines;
pthread_mutex_t lock;
} test_data_mpsc;
static int mpsc_producer_thread(void *d)
{
int id = (long)d;
int fence, valid, i;
int *producer_timelines = test_data_mpsc.producer_timelines;
int consumer_timeline = test_data_mpsc.consumer_timeline;
int iterations = test_data_mpsc.iterations;
for (i = 0; i < iterations; i++) {
fence = sw_sync_fence_create(consumer_timeline, "fence", i);
valid = sw_sync_fence_is_valid(fence);
ASSERT(valid, "Failure creating fence\n");
/*
* Wait for the consumer to finish. Use alternate
* means of waiting on the fence
*/
if ((iterations + id) % 8 != 0) {
ASSERT(sync_wait(fence, -1) > 0,
"Failure waiting on fence\n");
} else {
ASSERT(busy_wait_on_fence(fence) == 0,
"Failure waiting on fence\n");
}
/*
* Every producer increments the counter, the consumer
* checks and erases it
*/
pthread_mutex_lock(&test_data_mpsc.lock);
test_data_mpsc.counter++;
pthread_mutex_unlock(&test_data_mpsc.lock);
ASSERT(sw_sync_timeline_inc(producer_timelines[id], 1) == 0,
"Error advancing producer timeline\n");
sw_sync_fence_destroy(fence);
}
return 0;
}
static int mpcs_consumer_thread(void)
{
int fence, merged, tmp, valid, it, i;
int *producer_timelines = test_data_mpsc.producer_timelines;
int consumer_timeline = test_data_mpsc.consumer_timeline;
int iterations = test_data_mpsc.iterations;
int n = test_data_mpsc.threads;
for (it = 1; it <= iterations; it++) {
fence = sw_sync_fence_create(producer_timelines[0], "name", it);
for (i = 1; i < n; i++) {
tmp = sw_sync_fence_create(producer_timelines[i],
"name", it);
merged = sync_merge("name", tmp, fence);
sw_sync_fence_destroy(tmp);
sw_sync_fence_destroy(fence);
fence = merged;
}
valid = sw_sync_fence_is_valid(fence);
ASSERT(valid, "Failure merging fences\n");
/*
* Make sure we see an increment from every producer thread.
* Vary the means by which we wait.
*/
if (iterations % 8 != 0) {
ASSERT(sync_wait(fence, -1) > 0,
"Producers did not increment as expected\n");
} else {
ASSERT(busy_wait_on_fence(fence) == 0,
"Producers did not increment as expected\n");
}
ASSERT(test_data_mpsc.counter == n * it,
"Counter value mismatch!\n");
/* Release the producer threads */
ASSERT(sw_sync_timeline_inc(consumer_timeline, 1) == 0,
"Failure releasing producer threads\n");
sw_sync_fence_destroy(fence);
}
return 0;
}
int test_consumer_stress_multi_producer_single_consumer(void)
{
int iterations = 1 << 12;
int n = 5;
long i, ret;
int producer_timelines[n];
int consumer_timeline;
pthread_t threads[n];
consumer_timeline = sw_sync_timeline_create();
for (i = 0; i < n; i++)
producer_timelines[i] = sw_sync_timeline_create();
test_data_mpsc.producer_timelines = producer_timelines;
test_data_mpsc.consumer_timeline = consumer_timeline;
test_data_mpsc.iterations = iterations;
test_data_mpsc.threads = n;
test_data_mpsc.counter = 0;
pthread_mutex_init(&test_data_mpsc.lock, NULL);
for (i = 0; i < n; i++) {
pthread_create(&threads[i], NULL, (void * (*)(void *))
mpsc_producer_thread, (void *)i);
}
/* Consumer thread runs here */
ret = mpcs_consumer_thread();
for (i = 0; i < n; i++)
pthread_join(threads[i], NULL);
return ret;
}
| linux-master | tools/testing/selftests/sync/sync_stress_consumer.c |
/*
* sync fence tests with one timeline
* Copyright 2015-2016 Collabora Ltd.
*
* Based on the implementation from the Android Open Source Project,
*
* Copyright 2012 Google, Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "sync.h"
#include "sw_sync.h"
#include "synctest.h"
int test_fence_one_timeline_wait(void)
{
int fence, valid, ret;
int timeline = sw_sync_timeline_create();
valid = sw_sync_timeline_is_valid(timeline);
ASSERT(valid, "Failure allocating timeline\n");
fence = sw_sync_fence_create(timeline, "allocFence", 5);
valid = sw_sync_fence_is_valid(fence);
ASSERT(valid, "Failure allocating fence\n");
/* Wait on fence until timeout */
ret = sync_wait(fence, 0);
ASSERT(ret == 0, "Failure waiting on fence until timeout\n");
/* Advance timeline from 0 -> 1 */
ret = sw_sync_timeline_inc(timeline, 1);
ASSERT(ret == 0, "Failure advancing timeline\n");
/* Wait on fence until timeout */
ret = sync_wait(fence, 0);
ASSERT(ret == 0, "Failure waiting on fence until timeout\n");
/* Signal the fence */
ret = sw_sync_timeline_inc(timeline, 4);
ASSERT(ret == 0, "Failure signaling the fence\n");
/* Wait successfully */
ret = sync_wait(fence, 0);
ASSERT(ret > 0, "Failure waiting on fence\n");
/* Go even further, and confirm wait still succeeds */
ret = sw_sync_timeline_inc(timeline, 10);
ASSERT(ret == 0, "Failure going further\n");
ret = sync_wait(fence, 0);
ASSERT(ret > 0, "Failure waiting ahead\n");
sw_sync_fence_destroy(fence);
sw_sync_timeline_destroy(timeline);
return 0;
}
int test_fence_one_timeline_merge(void)
{
int a, b, c, d, valid;
int timeline = sw_sync_timeline_create();
/* create fence a,b,c and then merge them all into fence d */
a = sw_sync_fence_create(timeline, "allocFence", 1);
b = sw_sync_fence_create(timeline, "allocFence", 2);
c = sw_sync_fence_create(timeline, "allocFence", 3);
valid = sw_sync_fence_is_valid(a) &&
sw_sync_fence_is_valid(b) &&
sw_sync_fence_is_valid(c);
ASSERT(valid, "Failure allocating fences\n");
d = sync_merge("mergeFence", b, a);
d = sync_merge("mergeFence", c, d);
valid = sw_sync_fence_is_valid(d);
ASSERT(valid, "Failure merging fences\n");
/* confirm all fences have one active point (even d) */
ASSERT(sync_fence_count_with_status(a, FENCE_STATUS_ACTIVE) == 1,
"a has too many active fences!\n");
ASSERT(sync_fence_count_with_status(a, FENCE_STATUS_ACTIVE) == 1,
"b has too many active fences!\n");
ASSERT(sync_fence_count_with_status(a, FENCE_STATUS_ACTIVE) == 1,
"c has too many active fences!\n");
ASSERT(sync_fence_count_with_status(a, FENCE_STATUS_ACTIVE) == 1,
"d has too many active fences!\n");
/* confirm that d is not signaled until the max of a,b,c */
sw_sync_timeline_inc(timeline, 1);
ASSERT(sync_fence_count_with_status(a, FENCE_STATUS_SIGNALED) == 1,
"a did not signal!\n");
ASSERT(sync_fence_count_with_status(d, FENCE_STATUS_ACTIVE) == 1,
"d signaled too early!\n");
sw_sync_timeline_inc(timeline, 1);
ASSERT(sync_fence_count_with_status(b, FENCE_STATUS_SIGNALED) == 1,
"b did not signal!\n");
ASSERT(sync_fence_count_with_status(d, FENCE_STATUS_ACTIVE) == 1,
"d signaled too early!\n");
sw_sync_timeline_inc(timeline, 1);
ASSERT(sync_fence_count_with_status(c, FENCE_STATUS_SIGNALED) == 1,
"c did not signal!\n");
ASSERT(sync_fence_count_with_status(d, FENCE_STATUS_ACTIVE) == 0 &&
sync_fence_count_with_status(d, FENCE_STATUS_SIGNALED) == 1,
"d did not signal!\n");
sw_sync_fence_destroy(d);
sw_sync_fence_destroy(c);
sw_sync_fence_destroy(b);
sw_sync_fence_destroy(a);
sw_sync_timeline_destroy(timeline);
return 0;
}
| linux-master | tools/testing/selftests/sync/sync_fence.c |
/*
* sync / sw_sync abstraction
* Copyright 2015-2016 Collabora Ltd.
*
* Based on the implementation from the Android Open Source Project,
*
* Copyright 2012 Google, Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <fcntl.h>
#include <malloc.h>
#include <poll.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "sync.h"
#include "sw_sync.h"
#include <linux/sync_file.h>
/* SW_SYNC ioctls */
struct sw_sync_create_fence_data {
__u32 value;
char name[32];
__s32 fence;
};
#define SW_SYNC_IOC_MAGIC 'W'
#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
struct sw_sync_create_fence_data)
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
int sync_wait(int fd, int timeout)
{
struct pollfd fds;
fds.fd = fd;
fds.events = POLLIN | POLLERR;
return poll(&fds, 1, timeout);
}
int sync_merge(const char *name, int fd1, int fd2)
{
struct sync_merge_data data = {};
int err;
data.fd2 = fd2;
strncpy(data.name, name, sizeof(data.name) - 1);
data.name[sizeof(data.name) - 1] = '\0';
err = ioctl(fd1, SYNC_IOC_MERGE, &data);
if (err < 0)
return err;
return data.fence;
}
static struct sync_file_info *sync_file_info(int fd)
{
struct sync_file_info *info;
struct sync_fence_info *fence_info;
int err, num_fences;
info = calloc(1, sizeof(*info));
if (info == NULL)
return NULL;
err = ioctl(fd, SYNC_IOC_FILE_INFO, info);
if (err < 0) {
free(info);
return NULL;
}
num_fences = info->num_fences;
if (num_fences) {
info->flags = 0;
info->num_fences = num_fences;
fence_info = calloc(num_fences, sizeof(*fence_info));
if (!fence_info) {
free(info);
return NULL;
}
info->sync_fence_info = (uint64_t)(unsigned long)fence_info;
err = ioctl(fd, SYNC_IOC_FILE_INFO, info);
if (err < 0) {
free(fence_info);
free(info);
return NULL;
}
}
return info;
}
static void sync_file_info_free(struct sync_file_info *info)
{
free((void *)(unsigned long)info->sync_fence_info);
free(info);
}
int sync_fence_size(int fd)
{
int count;
struct sync_file_info *info = sync_file_info(fd);
if (!info)
return 0;
count = info->num_fences;
sync_file_info_free(info);
return count;
}
int sync_fence_count_with_status(int fd, int status)
{
unsigned int i, count = 0;
struct sync_fence_info *fence_info = NULL;
struct sync_file_info *info = sync_file_info(fd);
if (!info)
return -1;
fence_info = (struct sync_fence_info *)(unsigned long)info->sync_fence_info;
for (i = 0 ; i < info->num_fences ; i++) {
if (fence_info[i].status == status)
count++;
}
sync_file_info_free(info);
return count;
}
int sw_sync_timeline_create(void)
{
return open("/sys/kernel/debug/sync/sw_sync", O_RDWR);
}
int sw_sync_timeline_inc(int fd, unsigned int count)
{
__u32 arg = count;
return ioctl(fd, SW_SYNC_IOC_INC, &arg);
}
int sw_sync_timeline_is_valid(int fd)
{
int status;
if (fd == -1)
return 0;
status = fcntl(fd, F_GETFD, 0);
return (status >= 0);
}
void sw_sync_timeline_destroy(int fd)
{
if (sw_sync_timeline_is_valid(fd))
close(fd);
}
int sw_sync_fence_create(int fd, const char *name, unsigned int value)
{
struct sw_sync_create_fence_data data = {};
int err;
data.value = value;
strncpy(data.name, name, sizeof(data.name) - 1);
data.name[sizeof(data.name) - 1] = '\0';
err = ioctl(fd, SW_SYNC_IOC_CREATE_FENCE, &data);
if (err < 0)
return err;
return data.fence;
}
int sw_sync_fence_is_valid(int fd)
{
/* Same code! */
return sw_sync_timeline_is_valid(fd);
}
void sw_sync_fence_destroy(int fd)
{
if (sw_sync_fence_is_valid(fd))
close(fd);
}
| linux-master | tools/testing/selftests/sync/sync.c |
/*
* sync test runner
* Copyright 2015-2016 Collabora Ltd.
*
* Based on the implementation from the Android Open Source Project,
*
* Copyright 2012 Google, Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <errno.h>
#include <string.h>
#include "../kselftest.h"
#include "synctest.h"
static int run_test(int (*test)(void), char *name)
{
int result;
pid_t childpid;
int ret;
fflush(stdout);
childpid = fork();
if (childpid) {
waitpid(childpid, &result, 0);
if (WIFEXITED(result)) {
ret = WEXITSTATUS(result);
if (!ret)
ksft_test_result_pass("[RUN]\t%s\n", name);
else
ksft_test_result_fail("[RUN]\t%s\n", name);
return ret;
}
return 1;
}
exit(test());
}
static void sync_api_supported(void)
{
struct stat sbuf;
int ret;
ret = stat("/sys/kernel/debug/sync/sw_sync", &sbuf);
if (!ret)
return;
if (errno == ENOENT)
ksft_exit_skip("Sync framework not supported by kernel\n");
if (errno == EACCES)
ksft_exit_skip("Run Sync test as root.\n");
ksft_exit_fail_msg("stat failed on /sys/kernel/debug/sync/sw_sync: %s",
strerror(errno));
}
int main(void)
{
int err;
ksft_print_header();
sync_api_supported();
ksft_set_plan(3 + 7);
ksft_print_msg("[RUN]\tTesting sync framework\n");
RUN_TEST(test_alloc_timeline);
RUN_TEST(test_alloc_fence);
RUN_TEST(test_alloc_fence_negative);
RUN_TEST(test_fence_one_timeline_wait);
RUN_TEST(test_fence_one_timeline_merge);
RUN_TEST(test_fence_merge_same_fence);
RUN_TEST(test_fence_multi_timeline_wait);
RUN_TEST(test_stress_two_threads_shared_timeline);
RUN_TEST(test_consumer_stress_multi_producer_single_consumer);
RUN_TEST(test_merge_stress_random_merge);
err = ksft_get_fail_cnt();
if (err)
ksft_exit_fail_msg("%d out of %d sync tests failed\n",
err, ksft_test_num());
/* need this return to keep gcc happy */
return ksft_exit_pass();
}
| linux-master | tools/testing/selftests/sync/sync_test.c |
/*
* sync fence merge tests
* Copyright 2015-2016 Collabora Ltd.
*
* Based on the implementation from the Android Open Source Project,
*
* Copyright 2012 Google, Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "sync.h"
#include "sw_sync.h"
#include "synctest.h"
int test_fence_merge_same_fence(void)
{
int fence, valid, merged;
int timeline = sw_sync_timeline_create();
valid = sw_sync_timeline_is_valid(timeline);
ASSERT(valid, "Failure allocating timeline\n");
fence = sw_sync_fence_create(timeline, "allocFence", 5);
valid = sw_sync_fence_is_valid(fence);
ASSERT(valid, "Failure allocating fence\n");
merged = sync_merge("mergeFence", fence, fence);
valid = sw_sync_fence_is_valid(fence);
ASSERT(valid, "Failure merging fence\n");
ASSERT(sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED) == 0,
"fence signaled too early!\n");
sw_sync_timeline_inc(timeline, 5);
ASSERT(sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED) == 1,
"fence did not signal!\n");
sw_sync_fence_destroy(merged);
sw_sync_fence_destroy(fence);
sw_sync_timeline_destroy(timeline);
return 0;
}
| linux-master | tools/testing/selftests/sync/sync_merge.c |
/*
* sync fence wait tests
* Copyright 2015-2016 Collabora Ltd.
*
* Based on the implementation from the Android Open Source Project,
*
* Copyright 2012 Google, Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "sync.h"
#include "sw_sync.h"
#include "synctest.h"
int test_fence_multi_timeline_wait(void)
{
int timelineA, timelineB, timelineC;
int fenceA, fenceB, fenceC, merged;
int valid, active, signaled, ret;
timelineA = sw_sync_timeline_create();
timelineB = sw_sync_timeline_create();
timelineC = sw_sync_timeline_create();
fenceA = sw_sync_fence_create(timelineA, "fenceA", 5);
fenceB = sw_sync_fence_create(timelineB, "fenceB", 5);
fenceC = sw_sync_fence_create(timelineC, "fenceC", 5);
merged = sync_merge("mergeFence", fenceB, fenceA);
merged = sync_merge("mergeFence", fenceC, merged);
valid = sw_sync_fence_is_valid(merged);
ASSERT(valid, "Failure merging fence from various timelines\n");
/* Confirm fence isn't signaled */
active = sync_fence_count_with_status(merged, FENCE_STATUS_ACTIVE);
ASSERT(active == 3, "Fence signaled too early!\n");
ret = sync_wait(merged, 0);
ASSERT(ret == 0,
"Failure waiting on fence until timeout\n");
ret = sw_sync_timeline_inc(timelineA, 5);
active = sync_fence_count_with_status(merged, FENCE_STATUS_ACTIVE);
signaled = sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED);
ASSERT(active == 2 && signaled == 1,
"Fence did not signal properly!\n");
ret = sw_sync_timeline_inc(timelineB, 5);
active = sync_fence_count_with_status(merged, FENCE_STATUS_ACTIVE);
signaled = sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED);
ASSERT(active == 1 && signaled == 2,
"Fence did not signal properly!\n");
ret = sw_sync_timeline_inc(timelineC, 5);
active = sync_fence_count_with_status(merged, FENCE_STATUS_ACTIVE);
signaled = sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED);
ASSERT(active == 0 && signaled == 3,
"Fence did not signal properly!\n");
/* confirm you can successfully wait */
ret = sync_wait(merged, 100);
ASSERT(ret > 0, "Failure waiting on signaled fence\n");
sw_sync_fence_destroy(merged);
sw_sync_fence_destroy(fenceC);
sw_sync_fence_destroy(fenceB);
sw_sync_fence_destroy(fenceA);
sw_sync_timeline_destroy(timelineC);
sw_sync_timeline_destroy(timelineB);
sw_sync_timeline_destroy(timelineA);
return 0;
}
| linux-master | tools/testing/selftests/sync/sync_wait.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020 Collabora Ltd.
*
* Test code for syscall user dispatch
*/
#define _GNU_SOURCE
#include <sys/prctl.h>
#include <sys/sysinfo.h>
#include <sys/syscall.h>
#include <signal.h>
#include <asm/unistd.h>
#include "../kselftest_harness.h"
#ifndef PR_SET_SYSCALL_USER_DISPATCH
# define PR_SET_SYSCALL_USER_DISPATCH 59
# define PR_SYS_DISPATCH_OFF 0
# define PR_SYS_DISPATCH_ON 1
# define SYSCALL_DISPATCH_FILTER_ALLOW 0
# define SYSCALL_DISPATCH_FILTER_BLOCK 1
#endif
#ifndef SYS_USER_DISPATCH
# define SYS_USER_DISPATCH 2
#endif
#ifdef __NR_syscalls
# define MAGIC_SYSCALL_1 (__NR_syscalls + 1) /* Bad Linux syscall number */
#else
# define MAGIC_SYSCALL_1 (0xff00) /* Bad Linux syscall number */
#endif
#define SYSCALL_DISPATCH_ON(x) ((x) = SYSCALL_DISPATCH_FILTER_BLOCK)
#define SYSCALL_DISPATCH_OFF(x) ((x) = SYSCALL_DISPATCH_FILTER_ALLOW)
/* Test Summary:
*
* - dispatch_trigger_sigsys: Verify if PR_SET_SYSCALL_USER_DISPATCH is
* able to trigger SIGSYS on a syscall.
*
* - bad_selector: Test that a bad selector value triggers SIGSYS with
* si_errno EINVAL.
*
* - bad_prctl_param: Test that the API correctly rejects invalid
* parameters on prctl
*
* - dispatch_and_return: Test that a syscall is selectively dispatched
* to userspace depending on the value of selector.
*
* - disable_dispatch: Test that the PR_SYS_DISPATCH_OFF correctly
* disables the dispatcher
*
* - direct_dispatch_range: Test that a syscall within the allowed range
* can bypass the dispatcher.
*/
TEST_SIGNAL(dispatch_trigger_sigsys, SIGSYS)
{
char sel = SYSCALL_DISPATCH_FILTER_ALLOW;
struct sysinfo info;
int ret;
ret = sysinfo(&info);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 0, 0, &sel);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SYSCALL_USER_DISPATCH");
}
SYSCALL_DISPATCH_ON(sel);
sysinfo(&info);
EXPECT_FALSE(true) {
TH_LOG("Unreachable!");
}
}
TEST(bad_prctl_param)
{
char sel = SYSCALL_DISPATCH_FILTER_ALLOW;
int op;
/* Invalid op */
op = -1;
prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0, 0, &sel);
ASSERT_EQ(EINVAL, errno);
/* PR_SYS_DISPATCH_OFF */
op = PR_SYS_DISPATCH_OFF;
/* offset != 0 */
prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x1, 0x0, 0);
EXPECT_EQ(EINVAL, errno);
/* len != 0 */
prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0xff, 0);
EXPECT_EQ(EINVAL, errno);
/* sel != NULL */
prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x0, &sel);
EXPECT_EQ(EINVAL, errno);
/* Valid parameter */
errno = 0;
prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x0, 0x0);
EXPECT_EQ(0, errno);
/* PR_SYS_DISPATCH_ON */
op = PR_SYS_DISPATCH_ON;
/* Dispatcher region is bad (offset > 0 && len == 0) */
prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x1, 0x0, &sel);
EXPECT_EQ(EINVAL, errno);
prctl(PR_SET_SYSCALL_USER_DISPATCH, op, -1L, 0x0, &sel);
EXPECT_EQ(EINVAL, errno);
/* Invalid selector */
prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x1, (void *) -1);
ASSERT_EQ(EFAULT, errno);
/*
* Dispatcher range overflows unsigned long
*/
prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 1, -1L, &sel);
ASSERT_EQ(EINVAL, errno) {
TH_LOG("Should reject bad syscall range");
}
/*
* Allowed range overflows usigned long
*/
prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, -1L, 0x1, &sel);
ASSERT_EQ(EINVAL, errno) {
TH_LOG("Should reject bad syscall range");
}
}
/*
* Use global selector for handle_sigsys tests, to avoid passing
* selector to signal handler
*/
char glob_sel;
int nr_syscalls_emulated;
int si_code;
int si_errno;
static void handle_sigsys(int sig, siginfo_t *info, void *ucontext)
{
si_code = info->si_code;
si_errno = info->si_errno;
if (info->si_syscall == MAGIC_SYSCALL_1)
nr_syscalls_emulated++;
/* In preparation for sigreturn. */
SYSCALL_DISPATCH_OFF(glob_sel);
}
TEST(dispatch_and_return)
{
long ret;
struct sigaction act;
sigset_t mask;
glob_sel = 0;
nr_syscalls_emulated = 0;
si_code = 0;
si_errno = 0;
memset(&act, 0, sizeof(act));
sigemptyset(&mask);
act.sa_sigaction = handle_sigsys;
act.sa_flags = SA_SIGINFO;
act.sa_mask = mask;
ret = sigaction(SIGSYS, &act, NULL);
ASSERT_EQ(0, ret);
/* Make sure selector is good prior to prctl. */
SYSCALL_DISPATCH_OFF(glob_sel);
ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 0, 0, &glob_sel);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SYSCALL_USER_DISPATCH");
}
/* MAGIC_SYSCALL_1 doesn't exist. */
SYSCALL_DISPATCH_OFF(glob_sel);
ret = syscall(MAGIC_SYSCALL_1);
EXPECT_EQ(-1, ret) {
TH_LOG("Dispatch triggered unexpectedly");
}
/* MAGIC_SYSCALL_1 should be emulated. */
nr_syscalls_emulated = 0;
SYSCALL_DISPATCH_ON(glob_sel);
ret = syscall(MAGIC_SYSCALL_1);
EXPECT_EQ(MAGIC_SYSCALL_1, ret) {
TH_LOG("Failed to intercept syscall");
}
EXPECT_EQ(1, nr_syscalls_emulated) {
TH_LOG("Failed to emulate syscall");
}
ASSERT_EQ(SYS_USER_DISPATCH, si_code) {
TH_LOG("Bad si_code in SIGSYS");
}
ASSERT_EQ(0, si_errno) {
TH_LOG("Bad si_errno in SIGSYS");
}
}
TEST_SIGNAL(bad_selector, SIGSYS)
{
long ret;
struct sigaction act;
sigset_t mask;
struct sysinfo info;
glob_sel = SYSCALL_DISPATCH_FILTER_ALLOW;
nr_syscalls_emulated = 0;
si_code = 0;
si_errno = 0;
memset(&act, 0, sizeof(act));
sigemptyset(&mask);
act.sa_sigaction = handle_sigsys;
act.sa_flags = SA_SIGINFO;
act.sa_mask = mask;
ret = sigaction(SIGSYS, &act, NULL);
ASSERT_EQ(0, ret);
/* Make sure selector is good prior to prctl. */
SYSCALL_DISPATCH_OFF(glob_sel);
ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 0, 0, &glob_sel);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SYSCALL_USER_DISPATCH");
}
glob_sel = -1;
sysinfo(&info);
/* Even though it is ready to catch SIGSYS, the signal is
* supposed to be uncatchable.
*/
EXPECT_FALSE(true) {
TH_LOG("Unreachable!");
}
}
TEST(disable_dispatch)
{
int ret;
struct sysinfo info;
char sel = 0;
ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 0, 0, &sel);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SYSCALL_USER_DISPATCH");
}
/* MAGIC_SYSCALL_1 doesn't exist. */
SYSCALL_DISPATCH_OFF(glob_sel);
ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_OFF, 0, 0, 0);
EXPECT_EQ(0, ret) {
TH_LOG("Failed to unset syscall user dispatch");
}
/* Shouldn't have any effect... */
SYSCALL_DISPATCH_ON(glob_sel);
ret = syscall(__NR_sysinfo, &info);
EXPECT_EQ(0, ret) {
TH_LOG("Dispatch triggered unexpectedly");
}
}
TEST(direct_dispatch_range)
{
int ret = 0;
struct sysinfo info;
char sel = SYSCALL_DISPATCH_FILTER_ALLOW;
/*
* Instead of calculating libc addresses; allow the entire
* memory map and lock the selector.
*/
ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 0, -1L, &sel);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SYSCALL_USER_DISPATCH");
}
SYSCALL_DISPATCH_ON(sel);
ret = sysinfo(&info);
ASSERT_EQ(0, ret) {
TH_LOG("Dispatch triggered unexpectedly");
}
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/syscall_user_dispatch/sud_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020 Collabora Ltd.
*
* Benchmark and test syscall user dispatch
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <signal.h>
#include <errno.h>
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#include <sys/sysinfo.h>
#include <sys/prctl.h>
#include <sys/syscall.h>
#ifndef PR_SET_SYSCALL_USER_DISPATCH
# define PR_SET_SYSCALL_USER_DISPATCH 59
# define PR_SYS_DISPATCH_OFF 0
# define PR_SYS_DISPATCH_ON 1
# define SYSCALL_DISPATCH_FILTER_ALLOW 0
# define SYSCALL_DISPATCH_FILTER_BLOCK 1
#endif
#ifdef __NR_syscalls
# define MAGIC_SYSCALL_1 (__NR_syscalls + 1) /* Bad Linux syscall number */
#else
# define MAGIC_SYSCALL_1 (0xff00) /* Bad Linux syscall number */
#endif
/*
* To test returning from a sigsys with selector blocked, the test
* requires some per-architecture support (i.e. knowledge about the
* signal trampoline address). On i386, we know it is on the vdso, and
* a small trampoline is open-coded for x86_64. Other architectures
* that have a trampoline in the vdso will support TEST_BLOCKED_RETURN
* out of the box, but don't enable them until they support syscall user
* dispatch.
*/
#if defined(__x86_64__) || defined(__i386__)
#define TEST_BLOCKED_RETURN
#endif
#ifdef __x86_64__
void* (syscall_dispatcher_start)(void);
void* (syscall_dispatcher_end)(void);
#else
unsigned long syscall_dispatcher_start = 0;
unsigned long syscall_dispatcher_end = 0;
#endif
unsigned long trapped_call_count = 0;
unsigned long native_call_count = 0;
char selector;
#define SYSCALL_BLOCK (selector = SYSCALL_DISPATCH_FILTER_BLOCK)
#define SYSCALL_UNBLOCK (selector = SYSCALL_DISPATCH_FILTER_ALLOW)
#define CALIBRATION_STEP 100000
#define CALIBRATE_TO_SECS 5
int factor;
static double one_sysinfo_step(void)
{
struct timespec t1, t2;
int i;
struct sysinfo info;
clock_gettime(CLOCK_MONOTONIC, &t1);
for (i = 0; i < CALIBRATION_STEP; i++)
sysinfo(&info);
clock_gettime(CLOCK_MONOTONIC, &t2);
return (t2.tv_sec - t1.tv_sec) + 1.0e-9 * (t2.tv_nsec - t1.tv_nsec);
}
static void calibrate_set(void)
{
double elapsed = 0;
printf("Calibrating test set to last ~%d seconds...\n", CALIBRATE_TO_SECS);
while (elapsed < 1) {
elapsed += one_sysinfo_step();
factor += CALIBRATE_TO_SECS;
}
printf("test iterations = %d\n", CALIBRATION_STEP * factor);
}
static double perf_syscall(void)
{
unsigned int i;
double partial = 0;
for (i = 0; i < factor; ++i)
partial += one_sysinfo_step()/(CALIBRATION_STEP*factor);
return partial;
}
static void handle_sigsys(int sig, siginfo_t *info, void *ucontext)
{
char buf[1024];
int len;
SYSCALL_UNBLOCK;
/* printf and friends are not signal-safe. */
len = snprintf(buf, 1024, "Caught sys_%x\n", info->si_syscall);
write(1, buf, len);
if (info->si_syscall == MAGIC_SYSCALL_1)
trapped_call_count++;
else
native_call_count++;
#ifdef TEST_BLOCKED_RETURN
SYSCALL_BLOCK;
#endif
#ifdef __x86_64__
__asm__ volatile("movq $0xf, %rax");
__asm__ volatile("leaveq");
__asm__ volatile("add $0x8, %rsp");
__asm__ volatile("syscall_dispatcher_start:");
__asm__ volatile("syscall");
__asm__ volatile("nop"); /* Landing pad within dispatcher area */
__asm__ volatile("syscall_dispatcher_end:");
#endif
}
int main(void)
{
struct sigaction act;
double time1, time2;
int ret;
sigset_t mask;
memset(&act, 0, sizeof(act));
sigemptyset(&mask);
act.sa_sigaction = handle_sigsys;
act.sa_flags = SA_SIGINFO;
act.sa_mask = mask;
calibrate_set();
time1 = perf_syscall();
printf("Avg syscall time %.0lfns.\n", time1 * 1.0e9);
ret = sigaction(SIGSYS, &act, NULL);
if (ret) {
perror("Error sigaction:");
exit(-1);
}
fprintf(stderr, "Enabling syscall trapping.\n");
if (prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON,
syscall_dispatcher_start,
(syscall_dispatcher_end - syscall_dispatcher_start + 1),
&selector)) {
perror("prctl failed\n");
exit(-1);
}
SYSCALL_BLOCK;
syscall(MAGIC_SYSCALL_1);
#ifdef TEST_BLOCKED_RETURN
if (selector == SYSCALL_DISPATCH_FILTER_ALLOW) {
fprintf(stderr, "Failed to return with selector blocked.\n");
exit(-1);
}
#endif
SYSCALL_UNBLOCK;
if (!trapped_call_count) {
fprintf(stderr, "syscall trapping does not work.\n");
exit(-1);
}
time2 = perf_syscall();
if (native_call_count) {
perror("syscall trapping intercepted more syscalls than expected\n");
exit(-1);
}
printf("trapped_call_count %lu, native_call_count %lu.\n",
trapped_call_count, native_call_count);
printf("Avg syscall time %.0lfns.\n", time2 * 1.0e9);
printf("Interception overhead: %.1lf%% (+%.0lfns).\n",
100.0 * (time2 / time1 - 1.0), 1.0e9 * (time2 - time1));
return 0;
}
| linux-master | tools/testing/selftests/syscall_user_dispatch/sud_benchmark.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <sys/mman.h>
#include <testcases/mmap_test.h>
#include "../../kselftest_harness.h"
TEST(infinite_rlimit)
{
// Only works on 64 bit
#if __riscv_xlen == 64
struct addresses mmap_addresses;
EXPECT_EQ(BOTTOM_UP, memory_layout());
do_mmaps(&mmap_addresses);
EXPECT_NE(MAP_FAILED, mmap_addresses.no_hint);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_37_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_38_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_46_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_47_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_55_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_56_addr);
EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.no_hint);
EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_37_addr);
EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_38_addr);
EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_46_addr);
EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_47_addr);
EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_55_addr);
EXPECT_GT(1UL << 56, (unsigned long)mmap_addresses.on_56_addr);
#endif
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/riscv/mm/testcases/mmap_bottomup.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <sys/mman.h>
#include <testcases/mmap_test.h>
#include "../../kselftest_harness.h"
TEST(default_rlimit)
{
// Only works on 64 bit
#if __riscv_xlen == 64
struct addresses mmap_addresses;
EXPECT_EQ(TOP_DOWN, memory_layout());
do_mmaps(&mmap_addresses);
EXPECT_NE(MAP_FAILED, mmap_addresses.no_hint);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_37_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_38_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_46_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_47_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_55_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_56_addr);
EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.no_hint);
EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_37_addr);
EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_38_addr);
EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_46_addr);
EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_47_addr);
EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_55_addr);
EXPECT_GT(1UL << 56, (unsigned long)mmap_addresses.on_56_addr);
#endif
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/riscv/mm/testcases/mmap_default.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <stddef.h>
#include <asm/hwprobe.h>
/*
* Rather than relying on having a new enough libc to define this, just do it
* ourselves. This way we don't need to be coupled to a new-enough libc to
* contain the call.
*/
long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpu_count, unsigned long *cpus, unsigned int flags);
int main(int argc, char **argv)
{
struct riscv_hwprobe pairs[8];
unsigned long cpus;
long out;
/* Fake the CPU_SET ops. */
cpus = -1;
/*
* Just run a basic test: pass enough pairs to get up to the base
* behavior, and then check to make sure it's sane.
*/
for (long i = 0; i < 8; i++)
pairs[i].key = i;
out = riscv_hwprobe(pairs, 8, 1, &cpus, 0);
if (out != 0)
return -1;
for (long i = 0; i < 4; ++i) {
/* Fail if the kernel claims not to recognize a base key. */
if ((i < 4) && (pairs[i].key != i))
return -2;
if (pairs[i].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR)
continue;
if (pairs[i].value & RISCV_HWPROBE_BASE_BEHAVIOR_IMA)
continue;
return -3;
}
/*
* This should also work with a NULL CPU set, but should not work
* with an improperly supplied CPU set.
*/
out = riscv_hwprobe(pairs, 8, 0, 0, 0);
if (out != 0)
return -4;
out = riscv_hwprobe(pairs, 8, 0, &cpus, 0);
if (out == 0)
return -5;
out = riscv_hwprobe(pairs, 8, 1, 0, 0);
if (out == 0)
return -6;
/*
* Check that keys work by providing one that we know exists, and
* checking to make sure the resultig pair is what we asked for.
*/
pairs[0].key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR;
out = riscv_hwprobe(pairs, 1, 1, &cpus, 0);
if (out != 0)
return -7;
if (pairs[0].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR)
return -8;
/*
* Check that an unknown key gets overwritten with -1,
* but doesn't block elements after it.
*/
pairs[0].key = 0x5555;
pairs[1].key = 1;
pairs[1].value = 0xAAAA;
out = riscv_hwprobe(pairs, 2, 0, 0, 0);
if (out != 0)
return -9;
if (pairs[0].key != -1)
return -10;
if ((pairs[1].key != 1) || (pairs[1].value == 0xAAAA))
return -11;
return 0;
}
| linux-master | tools/testing/selftests/riscv/hwprobe/hwprobe.c |
// SPDX-License-Identifier: GPL-2.0-only
#define THIS_PROGRAM "./vstate_exec_nolibc"
int main(int argc, char **argv)
{
int rc, pid, status, test_inherit = 0;
long ctrl, ctrl_c;
char *exec_argv[2], *exec_envp[2];
if (argc > 1)
test_inherit = 1;
ctrl = my_syscall1(__NR_prctl, PR_RISCV_V_GET_CONTROL);
if (ctrl < 0) {
puts("PR_RISCV_V_GET_CONTROL is not supported\n");
return ctrl;
}
if (test_inherit) {
pid = fork();
if (pid == -1) {
puts("fork failed\n");
exit(-1);
}
/* child */
if (!pid) {
exec_argv[0] = THIS_PROGRAM;
exec_argv[1] = NULL;
exec_envp[0] = NULL;
exec_envp[1] = NULL;
/* launch the program again to check inherit */
rc = execve(THIS_PROGRAM, exec_argv, exec_envp);
if (rc) {
puts("child execve failed\n");
exit(-1);
}
}
} else {
pid = fork();
if (pid == -1) {
puts("fork failed\n");
exit(-1);
}
if (!pid) {
rc = my_syscall1(__NR_prctl, PR_RISCV_V_GET_CONTROL);
if (rc != ctrl) {
puts("child's vstate_ctrl not equal to parent's\n");
exit(-1);
}
asm volatile (".option push\n\t"
".option arch, +v\n\t"
"vsetvli x0, x0, e32, m8, ta, ma\n\t"
".option pop\n\t"
);
exit(ctrl);
}
}
rc = waitpid(-1, &status, 0);
if (WIFEXITED(status) && WEXITSTATUS(status) == -1) {
puts("child exited abnormally\n");
exit(-1);
}
if (WIFSIGNALED(status)) {
if (WTERMSIG(status) != SIGILL) {
puts("child was terminated by unexpected signal\n");
exit(-1);
}
if ((ctrl & PR_RISCV_V_VSTATE_CTRL_CUR_MASK) != PR_RISCV_V_VSTATE_CTRL_OFF) {
puts("child signaled by illegal V access but vstate_ctrl is not off\n");
exit(-1);
}
/* child terminated, and its vstate_ctrl is off */
exit(ctrl);
}
ctrl_c = WEXITSTATUS(status);
if (test_inherit) {
if (ctrl & PR_RISCV_V_VSTATE_CTRL_INHERIT) {
if (!(ctrl_c & PR_RISCV_V_VSTATE_CTRL_INHERIT)) {
puts("parent has inherit bit, but child has not\n");
exit(-1);
}
}
rc = (ctrl & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK) >> 2;
if (rc != PR_RISCV_V_VSTATE_CTRL_DEFAULT) {
if (rc != (ctrl_c & PR_RISCV_V_VSTATE_CTRL_CUR_MASK)) {
puts("parent's next setting does not equal to child's\n");
exit(-1);
}
if (!(ctrl & PR_RISCV_V_VSTATE_CTRL_INHERIT)) {
if ((ctrl_c & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK) !=
PR_RISCV_V_VSTATE_CTRL_DEFAULT) {
puts("must clear child's next vstate_ctrl if !inherit\n");
exit(-1);
}
}
}
}
return ctrl;
}
| linux-master | tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <sys/prctl.h>
#include <unistd.h>
#include <asm/hwprobe.h>
#include <errno.h>
#include <sys/wait.h>
#include "../../kselftest.h"
/*
* Rather than relying on having a new enough libc to define this, just do it
* ourselves. This way we don't need to be coupled to a new-enough libc to
* contain the call.
*/
long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpu_count, unsigned long *cpus, unsigned int flags);
#define NEXT_PROGRAM "./vstate_exec_nolibc"
static int launch_test(int test_inherit)
{
char *exec_argv[3], *exec_envp[1];
int rc, pid, status;
pid = fork();
if (pid < 0) {
ksft_test_result_fail("fork failed %d", pid);
return -1;
}
if (!pid) {
exec_argv[0] = NEXT_PROGRAM;
exec_argv[1] = test_inherit != 0 ? "x" : NULL;
exec_argv[2] = NULL;
exec_envp[0] = NULL;
/* launch the program again to check inherit */
rc = execve(NEXT_PROGRAM, exec_argv, exec_envp);
if (rc) {
perror("execve");
ksft_test_result_fail("child execve failed %d\n", rc);
exit(-1);
}
}
rc = waitpid(-1, &status, 0);
if (rc < 0) {
ksft_test_result_fail("waitpid failed\n");
return -3;
}
if ((WIFEXITED(status) && WEXITSTATUS(status) == -1) ||
WIFSIGNALED(status)) {
ksft_test_result_fail("child exited abnormally\n");
return -4;
}
return WEXITSTATUS(status);
}
int test_and_compare_child(long provided, long expected, int inherit)
{
int rc;
rc = prctl(PR_RISCV_V_SET_CONTROL, provided);
if (rc != 0) {
ksft_test_result_fail("prctl with provided arg %lx failed with code %d\n",
provided, rc);
return -1;
}
rc = launch_test(inherit);
if (rc != expected) {
ksft_test_result_fail("Test failed, check %d != %d\n", rc,
expected);
return -2;
}
return 0;
}
#define PR_RISCV_V_VSTATE_CTRL_CUR_SHIFT 0
#define PR_RISCV_V_VSTATE_CTRL_NEXT_SHIFT 2
int main(void)
{
struct riscv_hwprobe pair;
long flag, expected;
long rc;
pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0;
rc = riscv_hwprobe(&pair, 1, 0, NULL, 0);
if (rc < 0) {
ksft_test_result_fail("hwprobe() failed with %d\n", rc);
return -1;
}
if (pair.key != RISCV_HWPROBE_KEY_IMA_EXT_0) {
ksft_test_result_fail("hwprobe cannot probe RISCV_HWPROBE_KEY_IMA_EXT_0\n");
return -2;
}
if (!(pair.value & RISCV_HWPROBE_IMA_V)) {
rc = prctl(PR_RISCV_V_GET_CONTROL);
if (rc != -1 || errno != EINVAL) {
ksft_test_result_fail("GET_CONTROL should fail on kernel/hw without V\n");
return -3;
}
rc = prctl(PR_RISCV_V_SET_CONTROL, PR_RISCV_V_VSTATE_CTRL_ON);
if (rc != -1 || errno != EINVAL) {
ksft_test_result_fail("GET_CONTROL should fail on kernel/hw without V\n");
return -4;
}
ksft_test_result_skip("Vector not supported\n");
return 0;
}
flag = PR_RISCV_V_VSTATE_CTRL_ON;
rc = prctl(PR_RISCV_V_SET_CONTROL, flag);
if (rc != 0) {
ksft_test_result_fail("Enabling V for current should always success\n");
return -5;
}
flag = PR_RISCV_V_VSTATE_CTRL_OFF;
rc = prctl(PR_RISCV_V_SET_CONTROL, flag);
if (rc != -1 || errno != EPERM) {
ksft_test_result_fail("Disabling current's V alive must fail with EPERM(%d)\n",
errno);
return -5;
}
/* Turn on next's vector explicitly and test */
flag = PR_RISCV_V_VSTATE_CTRL_ON << PR_RISCV_V_VSTATE_CTRL_NEXT_SHIFT;
if (test_and_compare_child(flag, PR_RISCV_V_VSTATE_CTRL_ON, 0))
return -6;
/* Turn off next's vector explicitly and test */
flag = PR_RISCV_V_VSTATE_CTRL_OFF << PR_RISCV_V_VSTATE_CTRL_NEXT_SHIFT;
if (test_and_compare_child(flag, PR_RISCV_V_VSTATE_CTRL_OFF, 0))
return -7;
/* Turn on next's vector explicitly and test inherit */
flag = PR_RISCV_V_VSTATE_CTRL_ON << PR_RISCV_V_VSTATE_CTRL_NEXT_SHIFT;
flag |= PR_RISCV_V_VSTATE_CTRL_INHERIT;
expected = flag | PR_RISCV_V_VSTATE_CTRL_ON;
if (test_and_compare_child(flag, expected, 0))
return -8;
if (test_and_compare_child(flag, expected, 1))
return -9;
/* Turn off next's vector explicitly and test inherit */
flag = PR_RISCV_V_VSTATE_CTRL_OFF << PR_RISCV_V_VSTATE_CTRL_NEXT_SHIFT;
flag |= PR_RISCV_V_VSTATE_CTRL_INHERIT;
expected = flag | PR_RISCV_V_VSTATE_CTRL_OFF;
if (test_and_compare_child(flag, expected, 0))
return -10;
if (test_and_compare_child(flag, expected, 1))
return -11;
/* arguments should fail with EINVAL */
rc = prctl(PR_RISCV_V_SET_CONTROL, 0xff0);
if (rc != -1 || errno != EINVAL) {
ksft_test_result_fail("Undefined control argument should return EINVAL\n");
return -12;
}
rc = prctl(PR_RISCV_V_SET_CONTROL, 0x3);
if (rc != -1 || errno != EINVAL) {
ksft_test_result_fail("Undefined control argument should return EINVAL\n");
return -12;
}
rc = prctl(PR_RISCV_V_SET_CONTROL, 0xc);
if (rc != -1 || errno != EINVAL) {
ksft_test_result_fail("Undefined control argument should return EINVAL\n");
return -12;
}
rc = prctl(PR_RISCV_V_SET_CONTROL, 0xc);
if (rc != -1 || errno != EINVAL) {
ksft_test_result_fail("Undefined control argument should return EINVAL\n");
return -12;
}
ksft_test_result_pass("tests for riscv_v_vstate_ctrl pass\n");
ksft_exit_pass();
return 0;
}
| linux-master | tools/testing/selftests/riscv/vector/vstate_prctl.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "../../kselftest.h"
#define MAX_VSIZE (8192 * 32)
void dump(char *ptr, int size)
{
int i = 0;
for (i = 0; i < size; i++) {
if (i != 0) {
if (i % 16 == 0)
printf("\n");
else if (i % 8 == 0)
printf(" ");
}
printf("%02x ", ptr[i]);
}
printf("\n");
}
int main(void)
{
int i;
unsigned long vl;
char *datap, *tmp;
datap = malloc(MAX_VSIZE);
if (!datap) {
ksft_test_result_fail("fail to allocate memory for size = %lu\n", MAX_VSIZE);
exit(-1);
}
tmp = datap;
asm volatile (
".option push\n\t"
".option arch, +v\n\t"
"vsetvli %0, x0, e8, m8, ta, ma\n\t"
"vse8.v v0, (%2)\n\t"
"add %1, %2, %0\n\t"
"vse8.v v8, (%1)\n\t"
"add %1, %1, %0\n\t"
"vse8.v v16, (%1)\n\t"
"add %1, %1, %0\n\t"
"vse8.v v24, (%1)\n\t"
".option pop\n\t"
: "=&r" (vl), "=r" (tmp) : "r" (datap) : "memory");
ksft_print_msg("vl = %lu\n", vl);
if (datap[0] != 0x00 && datap[0] != 0xff) {
ksft_test_result_fail("v-regesters are not properly initialized\n");
dump(datap, vl * 4);
exit(-1);
}
for (i = 1; i < vl * 4; i++) {
if (datap[i] != datap[0]) {
ksft_test_result_fail("detect stale values on v-regesters\n");
dump(datap, vl * 4);
exit(-2);
}
}
free(datap);
ksft_exit_pass();
return 0;
}
| linux-master | tools/testing/selftests/riscv/vector/v_initval_nolibc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#define _GNU_SOURCE
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <fcntl.h>
#include <time.h>
#include <sys/wait.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/io.h>
#include <sys/ioctl.h>
#include <sys/reboot.h>
#include <sys/utsname.h>
#include <sys/sendfile.h>
#include <sys/sysmacros.h>
#include <sys/random.h>
#include <linux/random.h>
#include <linux/version.h>
__attribute__((noreturn)) static void poweroff(void)
{
fflush(stdout);
fflush(stderr);
reboot(RB_AUTOBOOT);
sleep(30);
fprintf(stderr, "\x1b[37m\x1b[41m\x1b[1mFailed to power off!!!\x1b[0m\n");
exit(1);
}
static void panic(const char *what)
{
fprintf(stderr, "\n\n\x1b[37m\x1b[41m\x1b[1mSOMETHING WENT HORRIBLY WRONG\x1b[0m\n\n \x1b[31m\x1b[1m%s: %s\x1b[0m\n\n\x1b[37m\x1b[44m\x1b[1mPower off...\x1b[0m\n\n", what, strerror(errno));
poweroff();
}
#define pretty_message(msg) puts("\x1b[32m\x1b[1m" msg "\x1b[0m")
static void print_banner(void)
{
struct utsname utsname;
int len;
if (uname(&utsname) < 0)
panic("uname");
len = strlen(" WireGuard Test Suite on ") + strlen(utsname.sysname) + strlen(utsname.release) + strlen(utsname.machine);
printf("\x1b[45m\x1b[33m\x1b[1m%*.s\x1b[0m\n\x1b[45m\x1b[33m\x1b[1m WireGuard Test Suite on %s %s %s \x1b[0m\n\x1b[45m\x1b[33m\x1b[1m%*.s\x1b[0m\n\n", len, "", utsname.sysname, utsname.release, utsname.machine, len, "");
}
static void seed_rng(void)
{
int bits = 256, fd;
if (!getrandom(NULL, 0, GRND_NONBLOCK))
return;
pretty_message("[+] Fake seeding RNG...");
fd = open("/dev/random", O_WRONLY);
if (fd < 0)
panic("open(random)");
if (ioctl(fd, RNDADDTOENTCNT, &bits) < 0)
panic("ioctl(RNDADDTOENTCNT)");
close(fd);
}
static void set_time(void)
{
if (time(NULL))
return;
pretty_message("[+] Setting fake time...");
if (stime(&(time_t){1433512680}) < 0)
panic("settimeofday()");
}
static void mount_filesystems(void)
{
pretty_message("[+] Mounting filesystems...");
mkdir("/dev", 0755);
mkdir("/proc", 0755);
mkdir("/sys", 0755);
mkdir("/tmp", 0755);
mkdir("/run", 0755);
mkdir("/var", 0755);
if (mount("none", "/dev", "devtmpfs", 0, NULL))
panic("devtmpfs mount");
if (mount("none", "/proc", "proc", 0, NULL))
panic("procfs mount");
if (mount("none", "/sys", "sysfs", 0, NULL))
panic("sysfs mount");
if (mount("none", "/tmp", "tmpfs", 0, NULL))
panic("tmpfs mount");
if (mount("none", "/run", "tmpfs", 0, NULL))
panic("tmpfs mount");
if (mount("none", "/sys/kernel/debug", "debugfs", 0, NULL))
; /* Not a problem if it fails.*/
if (symlink("/run", "/var/run"))
panic("run symlink");
if (symlink("/proc/self/fd", "/dev/fd"))
panic("fd symlink");
}
static void enable_logging(void)
{
int fd;
pretty_message("[+] Enabling logging...");
fd = open("/proc/sys/kernel/printk", O_WRONLY);
if (fd >= 0) {
if (write(fd, "9\n", 2) != 2)
panic("write(printk)");
close(fd);
}
fd = open("/proc/sys/debug/exception-trace", O_WRONLY);
if (fd >= 0) {
if (write(fd, "1\n", 2) != 2)
panic("write(exception-trace)");
close(fd);
}
}
static void kmod_selftests(void)
{
FILE *file;
char line[2048], *start, *pass;
bool success = true;
pretty_message("[+] Module self-tests:");
file = fopen("/proc/kmsg", "r");
if (!file)
panic("fopen(kmsg)");
if (fcntl(fileno(file), F_SETFL, O_NONBLOCK) < 0)
panic("fcntl(kmsg, nonblock)");
while (fgets(line, sizeof(line), file)) {
start = strstr(line, "wireguard: ");
if (!start)
continue;
start += 11;
*strchrnul(start, '\n') = '\0';
if (strstr(start, "www.wireguard.com"))
break;
pass = strstr(start, ": pass");
if (!pass || pass[6] != '\0') {
success = false;
printf(" \x1b[31m* %s\x1b[0m\n", start);
} else
printf(" \x1b[32m* %s\x1b[0m\n", start);
}
fclose(file);
if (!success) {
puts("\x1b[31m\x1b[1m[-] Tests failed! \u2639\x1b[0m");
poweroff();
}
}
static void launch_tests(void)
{
char cmdline[4096], *success_dev;
int status, fd;
pid_t pid;
pretty_message("[+] Launching tests...");
pid = fork();
if (pid == -1)
panic("fork");
else if (pid == 0) {
execl("/init.sh", "init", NULL);
panic("exec");
}
if (waitpid(pid, &status, 0) < 0)
panic("waitpid");
if (WIFEXITED(status) && WEXITSTATUS(status) == 0) {
pretty_message("[+] Tests successful! :-)");
fd = open("/proc/cmdline", O_RDONLY);
if (fd < 0)
panic("open(/proc/cmdline)");
if (read(fd, cmdline, sizeof(cmdline) - 1) <= 0)
panic("read(/proc/cmdline)");
cmdline[sizeof(cmdline) - 1] = '\0';
for (success_dev = strtok(cmdline, " \n"); success_dev; success_dev = strtok(NULL, " \n")) {
if (strncmp(success_dev, "wg.success=", 11))
continue;
memcpy(success_dev + 11 - 5, "/dev/", 5);
success_dev += 11 - 5;
break;
}
if (!success_dev || !strlen(success_dev))
panic("Unable to find success device");
fd = open(success_dev, O_WRONLY);
if (fd < 0)
panic("open(success_dev)");
if (write(fd, "success\n", 8) != 8)
panic("write(success_dev)");
close(fd);
} else {
const char *why = "unknown cause";
int what = -1;
if (WIFEXITED(status)) {
why = "exit code";
what = WEXITSTATUS(status);
} else if (WIFSIGNALED(status)) {
why = "signal";
what = WTERMSIG(status);
}
printf("\x1b[31m\x1b[1m[-] Tests failed with %s %d! \u2639\x1b[0m\n", why, what);
}
}
static void ensure_console(void)
{
for (unsigned int i = 0; i < 1000; ++i) {
int fd = open("/dev/console", O_RDWR);
if (fd < 0) {
usleep(50000);
continue;
}
dup2(fd, 0);
dup2(fd, 1);
dup2(fd, 2);
close(fd);
if (write(1, "\0\0\0\0\n", 5) == 5)
return;
}
panic("Unable to open console device");
}
static void clear_leaks(void)
{
int fd;
fd = open("/sys/kernel/debug/kmemleak", O_WRONLY);
if (fd < 0)
return;
pretty_message("[+] Starting memory leak detection...");
write(fd, "clear\n", 5);
close(fd);
}
static void check_leaks(void)
{
int fd;
fd = open("/sys/kernel/debug/kmemleak", O_WRONLY);
if (fd < 0)
return;
pretty_message("[+] Scanning for memory leaks...");
sleep(2); /* Wait for any grace periods. */
write(fd, "scan\n", 5);
close(fd);
fd = open("/sys/kernel/debug/kmemleak", O_RDONLY);
if (fd < 0)
return;
if (sendfile(1, fd, NULL, 0x7ffff000) > 0)
panic("Memory leaks encountered");
close(fd);
}
int main(int argc, char *argv[])
{
ensure_console();
print_banner();
mount_filesystems();
seed_rng();
set_time();
kmod_selftests();
enable_logging();
clear_leaks();
launch_tests();
check_leaks();
poweroff();
return 1;
}
| linux-master | tools/testing/selftests/wireguard/qemu/init.c |
// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include <stdnoreturn.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <getopt.h>
#include <signal.h>
#include <sys/types.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <net/if.h>
#include <linux/if_link.h>
#include <linux/limits.h>
static unsigned int ifindex;
static __u32 attached_prog_id;
static bool attached_tc;
static void noreturn cleanup(int sig)
{
LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
int prog_fd;
int err;
if (attached_prog_id == 0)
exit(0);
if (attached_tc) {
LIBBPF_OPTS(bpf_tc_hook, hook,
.ifindex = ifindex,
.attach_point = BPF_TC_INGRESS);
err = bpf_tc_hook_destroy(&hook);
if (err < 0) {
fprintf(stderr, "Error: bpf_tc_hook_destroy: %s\n", strerror(-err));
fprintf(stderr, "Failed to destroy the TC hook\n");
exit(1);
}
exit(0);
}
prog_fd = bpf_prog_get_fd_by_id(attached_prog_id);
if (prog_fd < 0) {
fprintf(stderr, "Error: bpf_prog_get_fd_by_id: %s\n", strerror(-prog_fd));
err = bpf_xdp_attach(ifindex, -1, 0, NULL);
if (err < 0) {
fprintf(stderr, "Error: bpf_set_link_xdp_fd: %s\n", strerror(-err));
fprintf(stderr, "Failed to detach XDP program\n");
exit(1);
}
} else {
opts.old_prog_fd = prog_fd;
err = bpf_xdp_attach(ifindex, -1, XDP_FLAGS_REPLACE, &opts);
close(prog_fd);
if (err < 0) {
fprintf(stderr, "Error: bpf_set_link_xdp_fd_opts: %s\n", strerror(-err));
/* Not an error if already replaced by someone else. */
if (err != -EEXIST) {
fprintf(stderr, "Failed to detach XDP program\n");
exit(1);
}
}
}
exit(0);
}
static noreturn void usage(const char *progname)
{
fprintf(stderr, "Usage: %s [--iface <iface>|--prog <prog_id>] [--mss4 <mss ipv4> --mss6 <mss ipv6> --wscale <wscale> --ttl <ttl>] [--ports <port1>,<port2>,...] [--single] [--tc]\n",
progname);
exit(1);
}
static unsigned long parse_arg_ul(const char *progname, const char *arg, unsigned long limit)
{
unsigned long res;
char *endptr;
errno = 0;
res = strtoul(arg, &endptr, 10);
if (errno != 0 || *endptr != '\0' || arg[0] == '\0' || res > limit)
usage(progname);
return res;
}
static void parse_options(int argc, char *argv[], unsigned int *ifindex, __u32 *prog_id,
__u64 *tcpipopts, char **ports, bool *single, bool *tc)
{
static struct option long_options[] = {
{ "help", no_argument, NULL, 'h' },
{ "iface", required_argument, NULL, 'i' },
{ "prog", required_argument, NULL, 'x' },
{ "mss4", required_argument, NULL, 4 },
{ "mss6", required_argument, NULL, 6 },
{ "wscale", required_argument, NULL, 'w' },
{ "ttl", required_argument, NULL, 't' },
{ "ports", required_argument, NULL, 'p' },
{ "single", no_argument, NULL, 's' },
{ "tc", no_argument, NULL, 'c' },
{ NULL, 0, NULL, 0 },
};
unsigned long mss4, wscale, ttl;
unsigned long long mss6;
unsigned int tcpipopts_mask = 0;
if (argc < 2)
usage(argv[0]);
*ifindex = 0;
*prog_id = 0;
*tcpipopts = 0;
*ports = NULL;
*single = false;
*tc = false;
while (true) {
int opt;
opt = getopt_long(argc, argv, "", long_options, NULL);
if (opt == -1)
break;
switch (opt) {
case 'h':
usage(argv[0]);
break;
case 'i':
*ifindex = if_nametoindex(optarg);
if (*ifindex == 0)
usage(argv[0]);
break;
case 'x':
*prog_id = parse_arg_ul(argv[0], optarg, UINT32_MAX);
if (*prog_id == 0)
usage(argv[0]);
break;
case 4:
mss4 = parse_arg_ul(argv[0], optarg, UINT16_MAX);
tcpipopts_mask |= 1 << 0;
break;
case 6:
mss6 = parse_arg_ul(argv[0], optarg, UINT16_MAX);
tcpipopts_mask |= 1 << 1;
break;
case 'w':
wscale = parse_arg_ul(argv[0], optarg, 14);
tcpipopts_mask |= 1 << 2;
break;
case 't':
ttl = parse_arg_ul(argv[0], optarg, UINT8_MAX);
tcpipopts_mask |= 1 << 3;
break;
case 'p':
*ports = optarg;
break;
case 's':
*single = true;
break;
case 'c':
*tc = true;
break;
default:
usage(argv[0]);
}
}
if (optind < argc)
usage(argv[0]);
if (tcpipopts_mask == 0xf) {
if (mss4 == 0 || mss6 == 0 || wscale == 0 || ttl == 0)
usage(argv[0]);
*tcpipopts = (mss6 << 32) | (ttl << 24) | (wscale << 16) | mss4;
} else if (tcpipopts_mask != 0) {
usage(argv[0]);
}
if (*ifindex != 0 && *prog_id != 0)
usage(argv[0]);
if (*ifindex == 0 && *prog_id == 0)
usage(argv[0]);
}
static int syncookie_attach(const char *argv0, unsigned int ifindex, bool tc)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
char xdp_filename[PATH_MAX];
struct bpf_program *prog;
struct bpf_object *obj;
int prog_fd;
int err;
snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.bpf.o", argv0);
obj = bpf_object__open_file(xdp_filename, NULL);
err = libbpf_get_error(obj);
if (err < 0) {
fprintf(stderr, "Error: bpf_object__open_file: %s\n", strerror(-err));
return err;
}
err = bpf_object__load(obj);
if (err < 0) {
fprintf(stderr, "Error: bpf_object__open_file: %s\n", strerror(-err));
return err;
}
prog = bpf_object__find_program_by_name(obj, tc ? "syncookie_tc" : "syncookie_xdp");
if (!prog) {
fprintf(stderr, "Error: bpf_object__find_program_by_name: program was not found\n");
return -ENOENT;
}
prog_fd = bpf_program__fd(prog);
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (err < 0) {
fprintf(stderr, "Error: bpf_prog_get_info_by_fd: %s\n",
strerror(-err));
goto out;
}
attached_tc = tc;
attached_prog_id = info.id;
signal(SIGINT, cleanup);
signal(SIGTERM, cleanup);
if (tc) {
LIBBPF_OPTS(bpf_tc_hook, hook,
.ifindex = ifindex,
.attach_point = BPF_TC_INGRESS);
LIBBPF_OPTS(bpf_tc_opts, opts,
.handle = 1,
.priority = 1,
.prog_fd = prog_fd);
err = bpf_tc_hook_create(&hook);
if (err < 0) {
fprintf(stderr, "Error: bpf_tc_hook_create: %s\n",
strerror(-err));
goto fail;
}
err = bpf_tc_attach(&hook, &opts);
if (err < 0) {
fprintf(stderr, "Error: bpf_tc_attach: %s\n",
strerror(-err));
goto fail;
}
} else {
err = bpf_xdp_attach(ifindex, prog_fd,
XDP_FLAGS_UPDATE_IF_NOEXIST, NULL);
if (err < 0) {
fprintf(stderr, "Error: bpf_set_link_xdp_fd: %s\n",
strerror(-err));
goto fail;
}
}
err = 0;
out:
bpf_object__close(obj);
return err;
fail:
signal(SIGINT, SIG_DFL);
signal(SIGTERM, SIG_DFL);
attached_prog_id = 0;
goto out;
}
static int syncookie_open_bpf_maps(__u32 prog_id, int *values_map_fd, int *ports_map_fd)
{
struct bpf_prog_info prog_info;
__u32 map_ids[8];
__u32 info_len;
int prog_fd;
int err;
int i;
*values_map_fd = -1;
*ports_map_fd = -1;
prog_fd = bpf_prog_get_fd_by_id(prog_id);
if (prog_fd < 0) {
fprintf(stderr, "Error: bpf_prog_get_fd_by_id: %s\n", strerror(-prog_fd));
return prog_fd;
}
prog_info = (struct bpf_prog_info) {
.nr_map_ids = 8,
.map_ids = (__u64)(unsigned long)map_ids,
};
info_len = sizeof(prog_info);
err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
if (err != 0) {
fprintf(stderr, "Error: bpf_prog_get_info_by_fd: %s\n",
strerror(-err));
goto out;
}
if (prog_info.nr_map_ids < 2) {
fprintf(stderr, "Error: Found %u BPF maps, expected at least 2\n",
prog_info.nr_map_ids);
err = -ENOENT;
goto out;
}
for (i = 0; i < prog_info.nr_map_ids; i++) {
struct bpf_map_info map_info = {};
int map_fd;
err = bpf_map_get_fd_by_id(map_ids[i]);
if (err < 0) {
fprintf(stderr, "Error: bpf_map_get_fd_by_id: %s\n", strerror(-err));
goto err_close_map_fds;
}
map_fd = err;
info_len = sizeof(map_info);
err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
if (err != 0) {
fprintf(stderr, "Error: bpf_map_get_info_by_fd: %s\n",
strerror(-err));
close(map_fd);
goto err_close_map_fds;
}
if (strcmp(map_info.name, "values") == 0) {
*values_map_fd = map_fd;
continue;
}
if (strcmp(map_info.name, "allowed_ports") == 0) {
*ports_map_fd = map_fd;
continue;
}
close(map_fd);
}
if (*values_map_fd != -1 && *ports_map_fd != -1) {
err = 0;
goto out;
}
err = -ENOENT;
err_close_map_fds:
if (*values_map_fd != -1)
close(*values_map_fd);
if (*ports_map_fd != -1)
close(*ports_map_fd);
*values_map_fd = -1;
*ports_map_fd = -1;
out:
close(prog_fd);
return err;
}
int main(int argc, char *argv[])
{
int values_map_fd, ports_map_fd;
__u64 tcpipopts;
bool firstiter;
__u64 prevcnt;
__u32 prog_id;
char *ports;
bool single;
int err = 0;
bool tc;
parse_options(argc, argv, &ifindex, &prog_id, &tcpipopts, &ports,
&single, &tc);
if (prog_id == 0) {
if (!tc) {
err = bpf_xdp_query_id(ifindex, 0, &prog_id);
if (err < 0) {
fprintf(stderr, "Error: bpf_get_link_xdp_id: %s\n",
strerror(-err));
goto out;
}
}
if (prog_id == 0) {
err = syncookie_attach(argv[0], ifindex, tc);
if (err < 0)
goto out;
prog_id = attached_prog_id;
}
}
err = syncookie_open_bpf_maps(prog_id, &values_map_fd, &ports_map_fd);
if (err < 0)
goto out;
if (ports) {
__u16 port_last = 0;
__u32 port_idx = 0;
char *p = ports;
fprintf(stderr, "Replacing allowed ports\n");
while (p && *p != '\0') {
char *token = strsep(&p, ",");
__u16 port;
port = parse_arg_ul(argv[0], token, UINT16_MAX);
err = bpf_map_update_elem(ports_map_fd, &port_idx, &port, BPF_ANY);
if (err != 0) {
fprintf(stderr, "Error: bpf_map_update_elem: %s\n", strerror(-err));
fprintf(stderr, "Failed to add port %u (index %u)\n",
port, port_idx);
goto out_close_maps;
}
fprintf(stderr, "Added port %u\n", port);
port_idx++;
}
err = bpf_map_update_elem(ports_map_fd, &port_idx, &port_last, BPF_ANY);
if (err != 0) {
fprintf(stderr, "Error: bpf_map_update_elem: %s\n", strerror(-err));
fprintf(stderr, "Failed to add the terminator value 0 (index %u)\n",
port_idx);
goto out_close_maps;
}
}
if (tcpipopts) {
__u32 key = 0;
fprintf(stderr, "Replacing TCP/IP options\n");
err = bpf_map_update_elem(values_map_fd, &key, &tcpipopts, BPF_ANY);
if (err != 0) {
fprintf(stderr, "Error: bpf_map_update_elem: %s\n", strerror(-err));
goto out_close_maps;
}
}
if ((ports || tcpipopts) && attached_prog_id == 0 && !single)
goto out_close_maps;
prevcnt = 0;
firstiter = true;
while (true) {
__u32 key = 1;
__u64 value;
err = bpf_map_lookup_elem(values_map_fd, &key, &value);
if (err != 0) {
fprintf(stderr, "Error: bpf_map_lookup_elem: %s\n", strerror(-err));
goto out_close_maps;
}
if (firstiter) {
prevcnt = value;
firstiter = false;
}
if (single) {
printf("Total SYNACKs generated: %llu\n", value);
break;
}
printf("SYNACKs generated: %llu (total %llu)\n", value - prevcnt, value);
prevcnt = value;
sleep(1);
}
out_close_maps:
close(values_map_fd);
close(ports_map_fd);
out:
return err == 0 ? 0 : 1;
}
| linux-master | tools/testing/selftests/bpf/xdp_synproxy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Randomized tests for eBPF longest-prefix-match maps
*
* This program runs randomized tests against the lpm-bpf-map. It implements a
* "Trivial Longest Prefix Match" (tlpm) based on simple, linear, singly linked
* lists. The implementation should be pretty straightforward.
*
* Based on tlpm, this inserts randomized data into bpf-lpm-maps and verifies
* the trie-based bpf-map implementation behaves the same way as tlpm.
*/
#include <assert.h>
#include <errno.h>
#include <inttypes.h>
#include <linux/bpf.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <sys/time.h>
#include <bpf/bpf.h>
#include "bpf_util.h"
struct tlpm_node {
struct tlpm_node *next;
size_t n_bits;
uint8_t key[];
};
static struct tlpm_node *tlpm_match(struct tlpm_node *list,
const uint8_t *key,
size_t n_bits);
static struct tlpm_node *tlpm_add(struct tlpm_node *list,
const uint8_t *key,
size_t n_bits)
{
struct tlpm_node *node;
size_t n;
n = (n_bits + 7) / 8;
/* 'overwrite' an equivalent entry if one already exists */
node = tlpm_match(list, key, n_bits);
if (node && node->n_bits == n_bits) {
memcpy(node->key, key, n);
return list;
}
/* add new entry with @key/@n_bits to @list and return new head */
node = malloc(sizeof(*node) + n);
assert(node);
node->next = list;
node->n_bits = n_bits;
memcpy(node->key, key, n);
return node;
}
static void tlpm_clear(struct tlpm_node *list)
{
struct tlpm_node *node;
/* free all entries in @list */
while ((node = list)) {
list = list->next;
free(node);
}
}
static struct tlpm_node *tlpm_match(struct tlpm_node *list,
const uint8_t *key,
size_t n_bits)
{
struct tlpm_node *best = NULL;
size_t i;
/* Perform longest prefix-match on @key/@n_bits. That is, iterate all
* entries and match each prefix against @key. Remember the "best"
* entry we find (i.e., the longest prefix that matches) and return it
* to the caller when done.
*/
for ( ; list; list = list->next) {
for (i = 0; i < n_bits && i < list->n_bits; ++i) {
if ((key[i / 8] & (1 << (7 - i % 8))) !=
(list->key[i / 8] & (1 << (7 - i % 8))))
break;
}
if (i >= list->n_bits) {
if (!best || i > best->n_bits)
best = list;
}
}
return best;
}
static struct tlpm_node *tlpm_delete(struct tlpm_node *list,
const uint8_t *key,
size_t n_bits)
{
struct tlpm_node *best = tlpm_match(list, key, n_bits);
struct tlpm_node *node;
if (!best || best->n_bits != n_bits)
return list;
if (best == list) {
node = best->next;
free(best);
return node;
}
for (node = list; node; node = node->next) {
if (node->next == best) {
node->next = best->next;
free(best);
return list;
}
}
/* should never get here */
assert(0);
return list;
}
static void test_lpm_basic(void)
{
struct tlpm_node *list = NULL, *t1, *t2;
/* very basic, static tests to verify tlpm works as expected */
assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8));
t1 = list = tlpm_add(list, (uint8_t[]){ 0xff }, 8);
assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0x00 }, 16));
assert(!tlpm_match(list, (uint8_t[]){ 0x7f }, 8));
assert(!tlpm_match(list, (uint8_t[]){ 0xfe }, 8));
assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 7));
t2 = list = tlpm_add(list, (uint8_t[]){ 0xff, 0xff }, 16);
assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
assert(t2 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15));
assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16));
list = tlpm_delete(list, (uint8_t[]){ 0xff, 0xff }, 16);
assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
list = tlpm_delete(list, (uint8_t[]){ 0xff }, 8);
assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8));
tlpm_clear(list);
}
static void test_lpm_order(void)
{
struct tlpm_node *t1, *t2, *l1 = NULL, *l2 = NULL;
size_t i, j;
/* Verify the tlpm implementation works correctly regardless of the
* order of entries. Insert a random set of entries into @l1, and copy
* the same data in reverse order into @l2. Then verify a lookup of
* random keys will yield the same result in both sets.
*/
for (i = 0; i < (1 << 12); ++i)
l1 = tlpm_add(l1, (uint8_t[]){
rand() % 0xff,
rand() % 0xff,
}, rand() % 16 + 1);
for (t1 = l1; t1; t1 = t1->next)
l2 = tlpm_add(l2, t1->key, t1->n_bits);
for (i = 0; i < (1 << 8); ++i) {
uint8_t key[] = { rand() % 0xff, rand() % 0xff };
t1 = tlpm_match(l1, key, 16);
t2 = tlpm_match(l2, key, 16);
assert(!t1 == !t2);
if (t1) {
assert(t1->n_bits == t2->n_bits);
for (j = 0; j < t1->n_bits; ++j)
assert((t1->key[j / 8] & (1 << (7 - j % 8))) ==
(t2->key[j / 8] & (1 << (7 - j % 8))));
}
}
tlpm_clear(l1);
tlpm_clear(l2);
}
static void test_lpm_map(int keysize)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
volatile size_t n_matches, n_matches_after_delete;
size_t i, j, n_nodes, n_lookups;
struct tlpm_node *t, *list = NULL;
struct bpf_lpm_trie_key *key;
uint8_t *data, *value;
int r, map;
/* Compare behavior of tlpm vs. bpf-lpm. Create a randomized set of
* prefixes and insert it into both tlpm and bpf-lpm. Then run some
* randomized lookups and verify both maps return the same result.
*/
n_matches = 0;
n_matches_after_delete = 0;
n_nodes = 1 << 8;
n_lookups = 1 << 16;
data = alloca(keysize);
memset(data, 0, keysize);
value = alloca(keysize + 1);
memset(value, 0, keysize + 1);
key = alloca(sizeof(*key) + keysize);
memset(key, 0, sizeof(*key) + keysize);
map = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL,
sizeof(*key) + keysize,
keysize + 1,
4096,
&opts);
assert(map >= 0);
for (i = 0; i < n_nodes; ++i) {
for (j = 0; j < keysize; ++j)
value[j] = rand() & 0xff;
value[keysize] = rand() % (8 * keysize + 1);
list = tlpm_add(list, value, value[keysize]);
key->prefixlen = value[keysize];
memcpy(key->data, value, keysize);
r = bpf_map_update_elem(map, key, value, 0);
assert(!r);
}
for (i = 0; i < n_lookups; ++i) {
for (j = 0; j < keysize; ++j)
data[j] = rand() & 0xff;
t = tlpm_match(list, data, 8 * keysize);
key->prefixlen = 8 * keysize;
memcpy(key->data, data, keysize);
r = bpf_map_lookup_elem(map, key, value);
assert(!r || errno == ENOENT);
assert(!t == !!r);
if (t) {
++n_matches;
assert(t->n_bits == value[keysize]);
for (j = 0; j < t->n_bits; ++j)
assert((t->key[j / 8] & (1 << (7 - j % 8))) ==
(value[j / 8] & (1 << (7 - j % 8))));
}
}
/* Remove the first half of the elements in the tlpm and the
* corresponding nodes from the bpf-lpm. Then run the same
* large number of random lookups in both and make sure they match.
* Note: we need to count the number of nodes actually inserted
* since there may have been duplicates.
*/
for (i = 0, t = list; t; i++, t = t->next)
;
for (j = 0; j < i / 2; ++j) {
key->prefixlen = list->n_bits;
memcpy(key->data, list->key, keysize);
r = bpf_map_delete_elem(map, key);
assert(!r);
list = tlpm_delete(list, list->key, list->n_bits);
assert(list);
}
for (i = 0; i < n_lookups; ++i) {
for (j = 0; j < keysize; ++j)
data[j] = rand() & 0xff;
t = tlpm_match(list, data, 8 * keysize);
key->prefixlen = 8 * keysize;
memcpy(key->data, data, keysize);
r = bpf_map_lookup_elem(map, key, value);
assert(!r || errno == ENOENT);
assert(!t == !!r);
if (t) {
++n_matches_after_delete;
assert(t->n_bits == value[keysize]);
for (j = 0; j < t->n_bits; ++j)
assert((t->key[j / 8] & (1 << (7 - j % 8))) ==
(value[j / 8] & (1 << (7 - j % 8))));
}
}
close(map);
tlpm_clear(list);
/* With 255 random nodes in the map, we are pretty likely to match
* something on every lookup. For statistics, use this:
*
* printf(" nodes: %zu\n"
* " lookups: %zu\n"
* " matches: %zu\n"
* "matches(delete): %zu\n",
* n_nodes, n_lookups, n_matches, n_matches_after_delete);
*/
}
/* Test the implementation with some 'real world' examples */
static void test_lpm_ipaddr(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
struct bpf_lpm_trie_key *key_ipv4;
struct bpf_lpm_trie_key *key_ipv6;
size_t key_size_ipv4;
size_t key_size_ipv6;
int map_fd_ipv4;
int map_fd_ipv6;
__u64 value;
key_size_ipv4 = sizeof(*key_ipv4) + sizeof(__u32);
key_size_ipv6 = sizeof(*key_ipv6) + sizeof(__u32) * 4;
key_ipv4 = alloca(key_size_ipv4);
key_ipv6 = alloca(key_size_ipv6);
map_fd_ipv4 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL,
key_size_ipv4, sizeof(value),
100, &opts);
assert(map_fd_ipv4 >= 0);
map_fd_ipv6 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL,
key_size_ipv6, sizeof(value),
100, &opts);
assert(map_fd_ipv6 >= 0);
/* Fill data some IPv4 and IPv6 address ranges */
value = 1;
key_ipv4->prefixlen = 16;
inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
value = 2;
key_ipv4->prefixlen = 24;
inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
value = 3;
key_ipv4->prefixlen = 24;
inet_pton(AF_INET, "192.168.128.0", key_ipv4->data);
assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
value = 5;
key_ipv4->prefixlen = 24;
inet_pton(AF_INET, "192.168.1.0", key_ipv4->data);
assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
value = 4;
key_ipv4->prefixlen = 23;
inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
value = 0xdeadbeef;
key_ipv6->prefixlen = 64;
inet_pton(AF_INET6, "2a00:1450:4001:814::200e", key_ipv6->data);
assert(bpf_map_update_elem(map_fd_ipv6, key_ipv6, &value, 0) == 0);
/* Set tprefixlen to maximum for lookups */
key_ipv4->prefixlen = 32;
key_ipv6->prefixlen = 128;
/* Test some lookups that should come back with a value */
inet_pton(AF_INET, "192.168.128.23", key_ipv4->data);
assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0);
assert(value == 3);
inet_pton(AF_INET, "192.168.0.1", key_ipv4->data);
assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0);
assert(value == 2);
inet_pton(AF_INET6, "2a00:1450:4001:814::", key_ipv6->data);
assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0);
assert(value == 0xdeadbeef);
inet_pton(AF_INET6, "2a00:1450:4001:814::1", key_ipv6->data);
assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0);
assert(value == 0xdeadbeef);
/* Test some lookups that should not match any entry */
inet_pton(AF_INET, "10.0.0.1", key_ipv4->data);
assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT);
inet_pton(AF_INET, "11.11.11.11", key_ipv4->data);
assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT);
inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data);
assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -ENOENT);
close(map_fd_ipv4);
close(map_fd_ipv6);
}
static void test_lpm_delete(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
struct bpf_lpm_trie_key *key;
size_t key_size;
int map_fd;
__u64 value;
key_size = sizeof(*key) + sizeof(__u32);
key = alloca(key_size);
map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL,
key_size, sizeof(value),
100, &opts);
assert(map_fd >= 0);
/* Add nodes:
* 192.168.0.0/16 (1)
* 192.168.0.0/24 (2)
* 192.168.128.0/24 (3)
* 192.168.1.0/24 (4)
*
* (1)
* / \
* (IM) (3)
* / \
* (2) (4)
*/
value = 1;
key->prefixlen = 16;
inet_pton(AF_INET, "192.168.0.0", key->data);
assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0);
value = 2;
key->prefixlen = 24;
inet_pton(AF_INET, "192.168.0.0", key->data);
assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0);
value = 3;
key->prefixlen = 24;
inet_pton(AF_INET, "192.168.128.0", key->data);
assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0);
value = 4;
key->prefixlen = 24;
inet_pton(AF_INET, "192.168.1.0", key->data);
assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0);
/* remove non-existent node */
key->prefixlen = 32;
inet_pton(AF_INET, "10.0.0.1", key->data);
assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT);
key->prefixlen = 30; // unused prefix so far
inet_pton(AF_INET, "192.255.0.0", key->data);
assert(bpf_map_delete_elem(map_fd, key) == -ENOENT);
key->prefixlen = 16; // same prefix as the root node
inet_pton(AF_INET, "192.255.0.0", key->data);
assert(bpf_map_delete_elem(map_fd, key) == -ENOENT);
/* assert initial lookup */
key->prefixlen = 32;
inet_pton(AF_INET, "192.168.0.1", key->data);
assert(bpf_map_lookup_elem(map_fd, key, &value) == 0);
assert(value == 2);
/* remove leaf node */
key->prefixlen = 24;
inet_pton(AF_INET, "192.168.0.0", key->data);
assert(bpf_map_delete_elem(map_fd, key) == 0);
key->prefixlen = 32;
inet_pton(AF_INET, "192.168.0.1", key->data);
assert(bpf_map_lookup_elem(map_fd, key, &value) == 0);
assert(value == 1);
/* remove leaf (and intermediary) node */
key->prefixlen = 24;
inet_pton(AF_INET, "192.168.1.0", key->data);
assert(bpf_map_delete_elem(map_fd, key) == 0);
key->prefixlen = 32;
inet_pton(AF_INET, "192.168.1.1", key->data);
assert(bpf_map_lookup_elem(map_fd, key, &value) == 0);
assert(value == 1);
/* remove root node */
key->prefixlen = 16;
inet_pton(AF_INET, "192.168.0.0", key->data);
assert(bpf_map_delete_elem(map_fd, key) == 0);
key->prefixlen = 32;
inet_pton(AF_INET, "192.168.128.1", key->data);
assert(bpf_map_lookup_elem(map_fd, key, &value) == 0);
assert(value == 3);
/* remove last node */
key->prefixlen = 24;
inet_pton(AF_INET, "192.168.128.0", key->data);
assert(bpf_map_delete_elem(map_fd, key) == 0);
key->prefixlen = 32;
inet_pton(AF_INET, "192.168.128.1", key->data);
assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT);
close(map_fd);
}
static void test_lpm_get_next_key(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
struct bpf_lpm_trie_key *key_p, *next_key_p;
size_t key_size;
__u32 value = 0;
int map_fd;
key_size = sizeof(*key_p) + sizeof(__u32);
key_p = alloca(key_size);
next_key_p = alloca(key_size);
map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, sizeof(value), 100, &opts);
assert(map_fd >= 0);
/* empty tree. get_next_key should return ENOENT */
assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -ENOENT);
/* get and verify the first key, get the second one should fail. */
key_p->prefixlen = 16;
inet_pton(AF_INET, "192.168.0.0", key_p->data);
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
memset(key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
assert(key_p->prefixlen == 16 && key_p->data[0] == 192 &&
key_p->data[1] == 168);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* no exact matching key should get the first one in post order. */
key_p->prefixlen = 8;
assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
assert(key_p->prefixlen == 16 && key_p->data[0] == 192 &&
key_p->data[1] == 168);
/* add one more element (total two) */
key_p->prefixlen = 24;
inet_pton(AF_INET, "192.168.128.0", key_p->data);
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
memset(key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
key_p->data[1] == 168 && key_p->data[2] == 128);
memset(next_key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total three) */
key_p->prefixlen = 24;
inet_pton(AF_INET, "192.168.0.0", key_p->data);
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
memset(key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
key_p->data[1] == 168 && key_p->data[2] == 0);
memset(next_key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total four) */
key_p->prefixlen = 24;
inet_pton(AF_INET, "192.168.1.0", key_p->data);
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
memset(key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
key_p->data[1] == 168 && key_p->data[2] == 0);
memset(next_key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168 && next_key_p->data[2] == 1);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total five) */
key_p->prefixlen = 28;
inet_pton(AF_INET, "192.168.1.128", key_p->data);
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
memset(key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
key_p->data[1] == 168 && key_p->data[2] == 0);
memset(next_key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 28 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168 && next_key_p->data[2] == 1 &&
next_key_p->data[3] == 128);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168 && next_key_p->data[2] == 1);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* no exact matching key should return the first one in post order */
key_p->prefixlen = 22;
inet_pton(AF_INET, "192.168.1.0", key_p->data);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
next_key_p->data[1] == 168 && next_key_p->data[2] == 0);
close(map_fd);
}
#define MAX_TEST_KEYS 4
struct lpm_mt_test_info {
int cmd; /* 0: update, 1: delete, 2: lookup, 3: get_next_key */
int iter;
int map_fd;
struct {
__u32 prefixlen;
__u32 data;
} key[MAX_TEST_KEYS];
};
static void *lpm_test_command(void *arg)
{
int i, j, ret, iter, key_size;
struct lpm_mt_test_info *info = arg;
struct bpf_lpm_trie_key *key_p;
key_size = sizeof(struct bpf_lpm_trie_key) + sizeof(__u32);
key_p = alloca(key_size);
for (iter = 0; iter < info->iter; iter++)
for (i = 0; i < MAX_TEST_KEYS; i++) {
/* first half of iterations in forward order,
* and second half in backward order.
*/
j = (iter < (info->iter / 2)) ? i : MAX_TEST_KEYS - i - 1;
key_p->prefixlen = info->key[j].prefixlen;
memcpy(key_p->data, &info->key[j].data, sizeof(__u32));
if (info->cmd == 0) {
__u32 value = j;
/* update must succeed */
assert(bpf_map_update_elem(info->map_fd, key_p, &value, 0) == 0);
} else if (info->cmd == 1) {
ret = bpf_map_delete_elem(info->map_fd, key_p);
assert(ret == 0 || errno == ENOENT);
} else if (info->cmd == 2) {
__u32 value;
ret = bpf_map_lookup_elem(info->map_fd, key_p, &value);
assert(ret == 0 || errno == ENOENT);
} else {
struct bpf_lpm_trie_key *next_key_p = alloca(key_size);
ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p);
assert(ret == 0 || errno == ENOENT || errno == ENOMEM);
}
}
// Pass successful exit info back to the main thread
pthread_exit((void *)info);
}
static void setup_lpm_mt_test_info(struct lpm_mt_test_info *info, int map_fd)
{
info->iter = 2000;
info->map_fd = map_fd;
info->key[0].prefixlen = 16;
inet_pton(AF_INET, "192.168.0.0", &info->key[0].data);
info->key[1].prefixlen = 24;
inet_pton(AF_INET, "192.168.0.0", &info->key[1].data);
info->key[2].prefixlen = 24;
inet_pton(AF_INET, "192.168.128.0", &info->key[2].data);
info->key[3].prefixlen = 24;
inet_pton(AF_INET, "192.168.1.0", &info->key[3].data);
}
static void test_lpm_multi_thread(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
struct lpm_mt_test_info info[4];
size_t key_size, value_size;
pthread_t thread_id[4];
int i, map_fd;
void *ret;
/* create a trie */
value_size = sizeof(__u32);
key_size = sizeof(struct bpf_lpm_trie_key) + value_size;
map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, value_size, 100, &opts);
/* create 4 threads to test update, delete, lookup and get_next_key */
setup_lpm_mt_test_info(&info[0], map_fd);
for (i = 0; i < 4; i++) {
if (i != 0)
memcpy(&info[i], &info[0], sizeof(info[i]));
info[i].cmd = i;
assert(pthread_create(&thread_id[i], NULL, &lpm_test_command, &info[i]) == 0);
}
for (i = 0; i < 4; i++)
assert(pthread_join(thread_id[i], &ret) == 0 && ret == (void *)&info[i]);
close(map_fd);
}
int main(void)
{
int i;
/* we want predictable, pseudo random tests */
srand(0xf00ba1);
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
test_lpm_basic();
test_lpm_order();
/* Test with 8, 16, 24, 32, ... 128 bit prefix length */
for (i = 1; i <= 16; ++i)
test_lpm_map(i);
test_lpm_ipaddr();
test_lpm_delete();
test_lpm_get_next_key();
test_lpm_multi_thread();
printf("test_lpm: OK\n");
return 0;
}
| linux-master | tools/testing/selftests/bpf/test_lpm_map.c |
// SPDX-License-Identifier: GPL-2.0
#include <assert.h>
#include <bpf/bpf.h>
#include <linux/filter.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/sysinfo.h>
#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "testing_helpers.h"
char bpf_log_buf[BPF_LOG_BUF_SIZE];
#define TEST_CGROUP "/test-bpf-cgroup-storage-buf/"
int main(int argc, char **argv)
{
struct bpf_insn prog[] = {
BPF_LD_MAP_FD(BPF_REG_1, 0), /* percpu map fd */
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_local_storage),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
};
size_t insns_cnt = ARRAY_SIZE(prog);
int error = EXIT_FAILURE;
int map_fd, percpu_map_fd, prog_fd, cgroup_fd;
struct bpf_cgroup_storage_key key;
unsigned long long value;
unsigned long long *percpu_value;
int cpu, nproc;
nproc = bpf_num_possible_cpus();
percpu_value = malloc(sizeof(*percpu_value) * nproc);
if (!percpu_value) {
printf("Not enough memory for per-cpu area (%d cpus)\n", nproc);
goto err;
}
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
map_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, sizeof(key),
sizeof(value), 0, NULL);
if (map_fd < 0) {
printf("Failed to create map: %s\n", strerror(errno));
goto out;
}
percpu_map_fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, NULL,
sizeof(key), sizeof(value), 0, NULL);
if (percpu_map_fd < 0) {
printf("Failed to create map: %s\n", strerror(errno));
goto out;
}
prog[0].imm = percpu_map_fd;
prog[7].imm = map_fd;
prog_fd = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
if (prog_fd < 0) {
printf("Failed to load bpf program: %s\n", bpf_log_buf);
goto out;
}
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
/* Attach the bpf program */
if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) {
printf("Failed to attach bpf program\n");
goto err;
}
if (bpf_map_get_next_key(map_fd, NULL, &key)) {
printf("Failed to get the first key in cgroup storage\n");
goto err;
}
if (bpf_map_lookup_elem(map_fd, &key, &value)) {
printf("Failed to lookup cgroup storage 0\n");
goto err;
}
for (cpu = 0; cpu < nproc; cpu++)
percpu_value[cpu] = 1000;
if (bpf_map_update_elem(percpu_map_fd, &key, percpu_value, 0)) {
printf("Failed to update the data in the cgroup storage\n");
goto err;
}
/* Every second packet should be dropped */
assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
assert(system("ping localhost -c 1 -W 1 -q > /dev/null"));
assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
/* Check the counter in the cgroup local storage */
if (bpf_map_lookup_elem(map_fd, &key, &value)) {
printf("Failed to lookup cgroup storage\n");
goto err;
}
if (value != 3) {
printf("Unexpected data in the cgroup storage: %llu\n", value);
goto err;
}
/* Bump the counter in the cgroup local storage */
value++;
if (bpf_map_update_elem(map_fd, &key, &value, 0)) {
printf("Failed to update the data in the cgroup storage\n");
goto err;
}
/* Every second packet should be dropped */
assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
assert(system("ping localhost -c 1 -W 1 -q > /dev/null"));
assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
/* Check the final value of the counter in the cgroup local storage */
if (bpf_map_lookup_elem(map_fd, &key, &value)) {
printf("Failed to lookup the cgroup storage\n");
goto err;
}
if (value != 7) {
printf("Unexpected data in the cgroup storage: %llu\n", value);
goto err;
}
/* Check the final value of the counter in the percpu local storage */
for (cpu = 0; cpu < nproc; cpu++)
percpu_value[cpu] = 0;
if (bpf_map_lookup_elem(percpu_map_fd, &key, percpu_value)) {
printf("Failed to lookup the per-cpu cgroup storage\n");
goto err;
}
value = 0;
for (cpu = 0; cpu < nproc; cpu++)
value += percpu_value[cpu];
if (value != nproc * 1000 + 6) {
printf("Unexpected data in the per-cpu cgroup storage\n");
goto err;
}
error = 0;
printf("test_cgroup_storage:PASS\n");
err:
cleanup_cgroup_environment();
free(percpu_value);
out:
return error;
}
| linux-master | tools/testing/selftests/bpf/test_cgroup_storage.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Testsuite for eBPF maps
*
* Copyright (c) 2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
*/
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <assert.h>
#include <stdlib.h>
#include <time.h>
#include <sys/wait.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
#include "test_maps.h"
#include "testing_helpers.h"
#ifndef ENOTSUPP
#define ENOTSUPP 524
#endif
int skips;
static struct bpf_map_create_opts map_opts = { .sz = sizeof(map_opts) };
static void test_hashmap(unsigned int task, void *data)
{
long long key, next_key, first_key, value;
int fd;
fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), 2, &map_opts);
if (fd < 0) {
printf("Failed to create hashmap '%s'!\n", strerror(errno));
exit(1);
}
key = 1;
value = 1234;
/* Insert key=1 element. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
value = 0;
/* BPF_NOEXIST means add new element if it doesn't exist. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
/* key=1 already exists. */
errno == EEXIST);
/* -1 is an invalid flag. */
assert(bpf_map_update_elem(fd, &key, &value, -1) < 0 &&
errno == EINVAL);
/* Check that key=1 can be found. */
assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234);
key = 2;
value = 1234;
/* Insert key=2 element. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
/* Check that key=2 matches the value and delete it */
assert(bpf_map_lookup_and_delete_elem(fd, &key, &value) == 0 && value == 1234);
/* Check that key=2 is not found. */
assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == ENOENT);
/* BPF_EXIST means update existing element. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) < 0 &&
/* key=2 is not there. */
errno == ENOENT);
/* Insert key=2 element. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
/* key=1 and key=2 were inserted, check that key=0 cannot be
* inserted due to max_entries limit.
*/
key = 0;
assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
errno == E2BIG);
/* Update existing element, though the map is full. */
key = 1;
assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
key = 2;
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
key = 3;
assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
errno == E2BIG);
/* Check that key = 0 doesn't exist. */
key = 0;
assert(bpf_map_delete_elem(fd, &key) < 0 && errno == ENOENT);
/* Iterate over two elements. */
assert(bpf_map_get_next_key(fd, NULL, &first_key) == 0 &&
(first_key == 1 || first_key == 2));
assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
(next_key == first_key));
assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
(next_key == 1 || next_key == 2) &&
(next_key != first_key));
assert(bpf_map_get_next_key(fd, &next_key, &next_key) < 0 &&
errno == ENOENT);
/* Delete both elements. */
key = 1;
assert(bpf_map_delete_elem(fd, &key) == 0);
key = 2;
assert(bpf_map_delete_elem(fd, &key) == 0);
assert(bpf_map_delete_elem(fd, &key) < 0 && errno == ENOENT);
key = 0;
/* Check that map is empty. */
assert(bpf_map_get_next_key(fd, NULL, &next_key) < 0 &&
errno == ENOENT);
assert(bpf_map_get_next_key(fd, &key, &next_key) < 0 &&
errno == ENOENT);
close(fd);
}
static void test_hashmap_sizes(unsigned int task, void *data)
{
int fd, i, j;
for (i = 1; i <= 512; i <<= 1)
for (j = 1; j <= 1 << 18; j <<= 1) {
fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, i, j, 2, &map_opts);
if (fd < 0) {
if (errno == ENOMEM)
return;
printf("Failed to create hashmap key=%d value=%d '%s'\n",
i, j, strerror(errno));
exit(1);
}
close(fd);
usleep(10); /* give kernel time to destroy */
}
}
static void test_hashmap_percpu(unsigned int task, void *data)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
BPF_DECLARE_PERCPU(long, value);
long long key, next_key, first_key;
int expected_key_mask = 0;
int fd, i;
fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_HASH, NULL, sizeof(key),
sizeof(bpf_percpu(value, 0)), 2, &map_opts);
if (fd < 0) {
printf("Failed to create hashmap '%s'!\n", strerror(errno));
exit(1);
}
for (i = 0; i < nr_cpus; i++)
bpf_percpu(value, i) = i + 100;
key = 1;
/* Insert key=1 element. */
assert(!(expected_key_mask & key));
assert(bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0);
/* Lookup and delete elem key=1 and check value. */
assert(bpf_map_lookup_and_delete_elem(fd, &key, value) == 0 &&
bpf_percpu(value,0) == 100);
for (i = 0; i < nr_cpus; i++)
bpf_percpu(value,i) = i + 100;
/* Insert key=1 element which should not exist. */
assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == 0);
expected_key_mask |= key;
/* BPF_NOEXIST means add new element if it doesn't exist. */
assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) < 0 &&
/* key=1 already exists. */
errno == EEXIST);
/* -1 is an invalid flag. */
assert(bpf_map_update_elem(fd, &key, value, -1) < 0 &&
errno == EINVAL);
/* Check that key=1 can be found. Value could be 0 if the lookup
* was run from a different CPU.
*/
bpf_percpu(value, 0) = 1;
assert(bpf_map_lookup_elem(fd, &key, value) == 0 &&
bpf_percpu(value, 0) == 100);
key = 2;
/* Check that key=2 is not found. */
assert(bpf_map_lookup_elem(fd, &key, value) < 0 && errno == ENOENT);
/* BPF_EXIST means update existing element. */
assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) < 0 &&
/* key=2 is not there. */
errno == ENOENT);
/* Insert key=2 element. */
assert(!(expected_key_mask & key));
assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == 0);
expected_key_mask |= key;
/* key=1 and key=2 were inserted, check that key=0 cannot be
* inserted due to max_entries limit.
*/
key = 0;
assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) < 0 &&
errno == E2BIG);
/* Check that key = 0 doesn't exist. */
assert(bpf_map_delete_elem(fd, &key) < 0 && errno == ENOENT);
/* Iterate over two elements. */
assert(bpf_map_get_next_key(fd, NULL, &first_key) == 0 &&
((expected_key_mask & first_key) == first_key));
while (!bpf_map_get_next_key(fd, &key, &next_key)) {
if (first_key) {
assert(next_key == first_key);
first_key = 0;
}
assert((expected_key_mask & next_key) == next_key);
expected_key_mask &= ~next_key;
assert(bpf_map_lookup_elem(fd, &next_key, value) == 0);
for (i = 0; i < nr_cpus; i++)
assert(bpf_percpu(value, i) == i + 100);
key = next_key;
}
assert(errno == ENOENT);
/* Update with BPF_EXIST. */
key = 1;
assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == 0);
/* Delete both elements. */
key = 1;
assert(bpf_map_delete_elem(fd, &key) == 0);
key = 2;
assert(bpf_map_delete_elem(fd, &key) == 0);
assert(bpf_map_delete_elem(fd, &key) < 0 && errno == ENOENT);
key = 0;
/* Check that map is empty. */
assert(bpf_map_get_next_key(fd, NULL, &next_key) < 0 &&
errno == ENOENT);
assert(bpf_map_get_next_key(fd, &key, &next_key) < 0 &&
errno == ENOENT);
close(fd);
}
#define VALUE_SIZE 3
static int helper_fill_hashmap(int max_entries)
{
int i, fd, ret;
long long key, value[VALUE_SIZE] = {};
fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value),
max_entries, &map_opts);
CHECK(fd < 0,
"failed to create hashmap",
"err: %s, flags: 0x%x\n", strerror(errno), map_opts.map_flags);
for (i = 0; i < max_entries; i++) {
key = i; value[0] = key;
ret = bpf_map_update_elem(fd, &key, value, BPF_NOEXIST);
CHECK(ret != 0,
"can't update hashmap",
"err: %s\n", strerror(ret));
}
return fd;
}
static void test_hashmap_walk(unsigned int task, void *data)
{
int fd, i, max_entries = 10000;
long long key, value[VALUE_SIZE], next_key;
bool next_key_valid = true;
fd = helper_fill_hashmap(max_entries);
for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key,
&next_key) == 0; i++) {
key = next_key;
assert(bpf_map_lookup_elem(fd, &key, value) == 0);
}
assert(i == max_entries);
assert(bpf_map_get_next_key(fd, NULL, &key) == 0);
for (i = 0; next_key_valid; i++) {
next_key_valid = bpf_map_get_next_key(fd, &key, &next_key) == 0;
assert(bpf_map_lookup_elem(fd, &key, value) == 0);
value[0]++;
assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == 0);
key = next_key;
}
assert(i == max_entries);
for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key,
&next_key) == 0; i++) {
key = next_key;
assert(bpf_map_lookup_elem(fd, &key, value) == 0);
assert(value[0] - 1 == key);
}
assert(i == max_entries);
close(fd);
}
static void test_hashmap_zero_seed(void)
{
int i, first, second, old_flags;
long long key, next_first, next_second;
old_flags = map_opts.map_flags;
map_opts.map_flags |= BPF_F_ZERO_SEED;
first = helper_fill_hashmap(3);
second = helper_fill_hashmap(3);
for (i = 0; ; i++) {
void *key_ptr = !i ? NULL : &key;
if (bpf_map_get_next_key(first, key_ptr, &next_first) != 0)
break;
CHECK(bpf_map_get_next_key(second, key_ptr, &next_second) != 0,
"next_key for second map must succeed",
"key_ptr: %p", key_ptr);
CHECK(next_first != next_second,
"keys must match",
"i: %d first: %lld second: %lld\n", i,
next_first, next_second);
key = next_first;
}
map_opts.map_flags = old_flags;
close(first);
close(second);
}
static void test_arraymap(unsigned int task, void *data)
{
int key, next_key, fd;
long long value;
fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(key), sizeof(value), 2, NULL);
if (fd < 0) {
printf("Failed to create arraymap '%s'!\n", strerror(errno));
exit(1);
}
key = 1;
value = 1234;
/* Insert key=1 element. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
value = 0;
assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
errno == EEXIST);
/* Check that key=1 can be found. */
assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234);
key = 0;
/* Check that key=0 is also found and zero initialized. */
assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0);
/* key=0 and key=1 were inserted, check that key=2 cannot be inserted
* due to max_entries limit.
*/
key = 2;
assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) < 0 &&
errno == E2BIG);
/* Check that key = 2 doesn't exist. */
assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == ENOENT);
/* Iterate over two elements. */
assert(bpf_map_get_next_key(fd, NULL, &next_key) == 0 &&
next_key == 0);
assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
next_key == 0);
assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
next_key == 1);
assert(bpf_map_get_next_key(fd, &next_key, &next_key) < 0 &&
errno == ENOENT);
/* Delete shouldn't succeed. */
key = 1;
assert(bpf_map_delete_elem(fd, &key) < 0 && errno == EINVAL);
close(fd);
}
static void test_arraymap_percpu(unsigned int task, void *data)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
BPF_DECLARE_PERCPU(long, values);
int key, next_key, fd, i;
fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, NULL, sizeof(key),
sizeof(bpf_percpu(values, 0)), 2, NULL);
if (fd < 0) {
printf("Failed to create arraymap '%s'!\n", strerror(errno));
exit(1);
}
for (i = 0; i < nr_cpus; i++)
bpf_percpu(values, i) = i + 100;
key = 1;
/* Insert key=1 element. */
assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
bpf_percpu(values, 0) = 0;
assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) < 0 &&
errno == EEXIST);
/* Check that key=1 can be found. */
assert(bpf_map_lookup_elem(fd, &key, values) == 0 &&
bpf_percpu(values, 0) == 100);
key = 0;
/* Check that key=0 is also found and zero initialized. */
assert(bpf_map_lookup_elem(fd, &key, values) == 0 &&
bpf_percpu(values, 0) == 0 &&
bpf_percpu(values, nr_cpus - 1) == 0);
/* Check that key=2 cannot be inserted due to max_entries limit. */
key = 2;
assert(bpf_map_update_elem(fd, &key, values, BPF_EXIST) < 0 &&
errno == E2BIG);
/* Check that key = 2 doesn't exist. */
assert(bpf_map_lookup_elem(fd, &key, values) < 0 && errno == ENOENT);
/* Iterate over two elements. */
assert(bpf_map_get_next_key(fd, NULL, &next_key) == 0 &&
next_key == 0);
assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
next_key == 0);
assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
next_key == 1);
assert(bpf_map_get_next_key(fd, &next_key, &next_key) < 0 &&
errno == ENOENT);
/* Delete shouldn't succeed. */
key = 1;
assert(bpf_map_delete_elem(fd, &key) < 0 && errno == EINVAL);
close(fd);
}
static void test_arraymap_percpu_many_keys(void)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
BPF_DECLARE_PERCPU(long, values);
/* nr_keys is not too large otherwise the test stresses percpu
* allocator more than anything else
*/
unsigned int nr_keys = 2000;
int key, fd, i;
fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, NULL, sizeof(key),
sizeof(bpf_percpu(values, 0)), nr_keys, NULL);
if (fd < 0) {
printf("Failed to create per-cpu arraymap '%s'!\n",
strerror(errno));
exit(1);
}
for (i = 0; i < nr_cpus; i++)
bpf_percpu(values, i) = i + 10;
for (key = 0; key < nr_keys; key++)
assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
for (key = 0; key < nr_keys; key++) {
for (i = 0; i < nr_cpus; i++)
bpf_percpu(values, i) = 0;
assert(bpf_map_lookup_elem(fd, &key, values) == 0);
for (i = 0; i < nr_cpus; i++)
assert(bpf_percpu(values, i) == i + 10);
}
close(fd);
}
static void test_devmap(unsigned int task, void *data)
{
int fd;
__u32 key, value;
fd = bpf_map_create(BPF_MAP_TYPE_DEVMAP, NULL, sizeof(key), sizeof(value), 2, NULL);
if (fd < 0) {
printf("Failed to create devmap '%s'!\n", strerror(errno));
exit(1);
}
close(fd);
}
static void test_devmap_hash(unsigned int task, void *data)
{
int fd;
__u32 key, value;
fd = bpf_map_create(BPF_MAP_TYPE_DEVMAP_HASH, NULL, sizeof(key), sizeof(value), 2, NULL);
if (fd < 0) {
printf("Failed to create devmap_hash '%s'!\n", strerror(errno));
exit(1);
}
close(fd);
}
static void test_queuemap(unsigned int task, void *data)
{
const int MAP_SIZE = 32;
__u32 vals[MAP_SIZE + MAP_SIZE/2], val;
int fd, i;
/* Fill test values to be used */
for (i = 0; i < MAP_SIZE + MAP_SIZE/2; i++)
vals[i] = rand();
/* Invalid key size */
fd = bpf_map_create(BPF_MAP_TYPE_QUEUE, NULL, 4, sizeof(val), MAP_SIZE, &map_opts);
assert(fd < 0 && errno == EINVAL);
fd = bpf_map_create(BPF_MAP_TYPE_QUEUE, NULL, 0, sizeof(val), MAP_SIZE, &map_opts);
/* Queue map does not support BPF_F_NO_PREALLOC */
if (map_opts.map_flags & BPF_F_NO_PREALLOC) {
assert(fd < 0 && errno == EINVAL);
return;
}
if (fd < 0) {
printf("Failed to create queuemap '%s'!\n", strerror(errno));
exit(1);
}
/* Push MAP_SIZE elements */
for (i = 0; i < MAP_SIZE; i++)
assert(bpf_map_update_elem(fd, NULL, &vals[i], 0) == 0);
/* Check that element cannot be pushed due to max_entries limit */
assert(bpf_map_update_elem(fd, NULL, &val, 0) < 0 &&
errno == E2BIG);
/* Peek element */
assert(bpf_map_lookup_elem(fd, NULL, &val) == 0 && val == vals[0]);
/* Replace half elements */
for (i = MAP_SIZE; i < MAP_SIZE + MAP_SIZE/2; i++)
assert(bpf_map_update_elem(fd, NULL, &vals[i], BPF_EXIST) == 0);
/* Pop all elements */
for (i = MAP_SIZE/2; i < MAP_SIZE + MAP_SIZE/2; i++)
assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) == 0 &&
val == vals[i]);
/* Check that there are not elements left */
assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) < 0 &&
errno == ENOENT);
/* Check that non supported functions set errno to EINVAL */
assert(bpf_map_delete_elem(fd, NULL) < 0 && errno == EINVAL);
assert(bpf_map_get_next_key(fd, NULL, NULL) < 0 && errno == EINVAL);
close(fd);
}
static void test_stackmap(unsigned int task, void *data)
{
const int MAP_SIZE = 32;
__u32 vals[MAP_SIZE + MAP_SIZE/2], val;
int fd, i;
/* Fill test values to be used */
for (i = 0; i < MAP_SIZE + MAP_SIZE/2; i++)
vals[i] = rand();
/* Invalid key size */
fd = bpf_map_create(BPF_MAP_TYPE_STACK, NULL, 4, sizeof(val), MAP_SIZE, &map_opts);
assert(fd < 0 && errno == EINVAL);
fd = bpf_map_create(BPF_MAP_TYPE_STACK, NULL, 0, sizeof(val), MAP_SIZE, &map_opts);
/* Stack map does not support BPF_F_NO_PREALLOC */
if (map_opts.map_flags & BPF_F_NO_PREALLOC) {
assert(fd < 0 && errno == EINVAL);
return;
}
if (fd < 0) {
printf("Failed to create stackmap '%s'!\n", strerror(errno));
exit(1);
}
/* Push MAP_SIZE elements */
for (i = 0; i < MAP_SIZE; i++)
assert(bpf_map_update_elem(fd, NULL, &vals[i], 0) == 0);
/* Check that element cannot be pushed due to max_entries limit */
assert(bpf_map_update_elem(fd, NULL, &val, 0) < 0 &&
errno == E2BIG);
/* Peek element */
assert(bpf_map_lookup_elem(fd, NULL, &val) == 0 && val == vals[i - 1]);
/* Replace half elements */
for (i = MAP_SIZE; i < MAP_SIZE + MAP_SIZE/2; i++)
assert(bpf_map_update_elem(fd, NULL, &vals[i], BPF_EXIST) == 0);
/* Pop all elements */
for (i = MAP_SIZE + MAP_SIZE/2 - 1; i >= MAP_SIZE/2; i--)
assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) == 0 &&
val == vals[i]);
/* Check that there are not elements left */
assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) < 0 &&
errno == ENOENT);
/* Check that non supported functions set errno to EINVAL */
assert(bpf_map_delete_elem(fd, NULL) < 0 && errno == EINVAL);
assert(bpf_map_get_next_key(fd, NULL, NULL) < 0 && errno == EINVAL);
close(fd);
}
#include <sys/ioctl.h>
#include <arpa/inet.h>
#include <sys/select.h>
#include <linux/err.h>
#define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.bpf.o"
#define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.bpf.o"
#define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.bpf.o"
static void test_sockmap(unsigned int tasks, void *data)
{
struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break;
int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break;
struct bpf_object *parse_obj, *verdict_obj, *msg_obj;
int ports[] = {50200, 50201, 50202, 50204};
int err, i, fd, udp, sfd[6] = {0xdeadbeef};
u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0};
int parse_prog, verdict_prog, msg_prog;
struct sockaddr_in addr;
int one = 1, s, sc, rc;
struct timeval to;
__u32 key, value;
pid_t pid[tasks];
fd_set w;
/* Create some sockets to use with sockmap */
for (i = 0; i < 2; i++) {
sfd[i] = socket(AF_INET, SOCK_STREAM, 0);
if (sfd[i] < 0)
goto out;
err = setsockopt(sfd[i], SOL_SOCKET, SO_REUSEADDR,
(char *)&one, sizeof(one));
if (err) {
printf("failed to setsockopt\n");
goto out;
}
err = ioctl(sfd[i], FIONBIO, (char *)&one);
if (err < 0) {
printf("failed to ioctl\n");
goto out;
}
memset(&addr, 0, sizeof(struct sockaddr_in));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = inet_addr("127.0.0.1");
addr.sin_port = htons(ports[i]);
err = bind(sfd[i], (struct sockaddr *)&addr, sizeof(addr));
if (err < 0) {
printf("failed to bind: err %i: %i:%i\n",
err, i, sfd[i]);
goto out;
}
err = listen(sfd[i], 32);
if (err < 0) {
printf("failed to listen\n");
goto out;
}
}
for (i = 2; i < 4; i++) {
sfd[i] = socket(AF_INET, SOCK_STREAM, 0);
if (sfd[i] < 0)
goto out;
err = setsockopt(sfd[i], SOL_SOCKET, SO_REUSEADDR,
(char *)&one, sizeof(one));
if (err) {
printf("set sock opt\n");
goto out;
}
memset(&addr, 0, sizeof(struct sockaddr_in));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = inet_addr("127.0.0.1");
addr.sin_port = htons(ports[i - 2]);
err = connect(sfd[i], (struct sockaddr *)&addr, sizeof(addr));
if (err) {
printf("failed to connect\n");
goto out;
}
}
for (i = 4; i < 6; i++) {
sfd[i] = accept(sfd[i - 4], NULL, NULL);
if (sfd[i] < 0) {
printf("accept failed\n");
goto out;
}
}
/* Test sockmap with connected sockets */
fd = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL,
sizeof(key), sizeof(value),
6, NULL);
if (fd < 0) {
if (!libbpf_probe_bpf_map_type(BPF_MAP_TYPE_SOCKMAP, NULL)) {
printf("%s SKIP (unsupported map type BPF_MAP_TYPE_SOCKMAP)\n",
__func__);
skips++;
for (i = 0; i < 6; i++)
close(sfd[i]);
return;
}
printf("Failed to create sockmap %i\n", fd);
goto out_sockmap;
}
/* Test update with unsupported UDP socket */
udp = socket(AF_INET, SOCK_DGRAM, 0);
i = 0;
err = bpf_map_update_elem(fd, &i, &udp, BPF_ANY);
if (err) {
printf("Failed socket update SOCK_DGRAM '%i:%i'\n",
i, udp);
goto out_sockmap;
}
close(udp);
/* Test update without programs */
for (i = 0; i < 6; i++) {
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
if (err) {
printf("Failed noprog update sockmap '%i:%i'\n",
i, sfd[i]);
goto out_sockmap;
}
}
/* Test attaching/detaching bad fds */
err = bpf_prog_attach(-1, fd, BPF_SK_SKB_STREAM_PARSER, 0);
if (!err) {
printf("Failed invalid parser prog attach\n");
goto out_sockmap;
}
err = bpf_prog_attach(-1, fd, BPF_SK_SKB_STREAM_VERDICT, 0);
if (!err) {
printf("Failed invalid verdict prog attach\n");
goto out_sockmap;
}
err = bpf_prog_attach(-1, fd, BPF_SK_MSG_VERDICT, 0);
if (!err) {
printf("Failed invalid msg verdict prog attach\n");
goto out_sockmap;
}
err = bpf_prog_attach(-1, fd, __MAX_BPF_ATTACH_TYPE, 0);
if (!err) {
printf("Failed unknown prog attach\n");
goto out_sockmap;
}
err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER);
if (!err) {
printf("Failed empty parser prog detach\n");
goto out_sockmap;
}
err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT);
if (!err) {
printf("Failed empty verdict prog detach\n");
goto out_sockmap;
}
err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT);
if (!err) {
printf("Failed empty msg verdict prog detach\n");
goto out_sockmap;
}
err = bpf_prog_detach(fd, __MAX_BPF_ATTACH_TYPE);
if (!err) {
printf("Detach invalid prog successful\n");
goto out_sockmap;
}
/* Load SK_SKB program and Attach */
err = bpf_prog_test_load(SOCKMAP_PARSE_PROG,
BPF_PROG_TYPE_SK_SKB, &parse_obj, &parse_prog);
if (err) {
printf("Failed to load SK_SKB parse prog\n");
goto out_sockmap;
}
err = bpf_prog_test_load(SOCKMAP_TCP_MSG_PROG,
BPF_PROG_TYPE_SK_MSG, &msg_obj, &msg_prog);
if (err) {
printf("Failed to load SK_SKB msg prog\n");
goto out_sockmap;
}
err = bpf_prog_test_load(SOCKMAP_VERDICT_PROG,
BPF_PROG_TYPE_SK_SKB, &verdict_obj, &verdict_prog);
if (err) {
printf("Failed to load SK_SKB verdict prog\n");
goto out_sockmap;
}
bpf_map_rx = bpf_object__find_map_by_name(verdict_obj, "sock_map_rx");
if (!bpf_map_rx) {
printf("Failed to load map rx from verdict prog\n");
goto out_sockmap;
}
map_fd_rx = bpf_map__fd(bpf_map_rx);
if (map_fd_rx < 0) {
printf("Failed to get map rx fd\n");
goto out_sockmap;
}
bpf_map_tx = bpf_object__find_map_by_name(verdict_obj, "sock_map_tx");
if (!bpf_map_tx) {
printf("Failed to load map tx from verdict prog\n");
goto out_sockmap;
}
map_fd_tx = bpf_map__fd(bpf_map_tx);
if (map_fd_tx < 0) {
printf("Failed to get map tx fd\n");
goto out_sockmap;
}
bpf_map_msg = bpf_object__find_map_by_name(verdict_obj, "sock_map_msg");
if (!bpf_map_msg) {
printf("Failed to load map msg from msg_verdict prog\n");
goto out_sockmap;
}
map_fd_msg = bpf_map__fd(bpf_map_msg);
if (map_fd_msg < 0) {
printf("Failed to get map msg fd\n");
goto out_sockmap;
}
bpf_map_break = bpf_object__find_map_by_name(verdict_obj, "sock_map_break");
if (!bpf_map_break) {
printf("Failed to load map tx from verdict prog\n");
goto out_sockmap;
}
map_fd_break = bpf_map__fd(bpf_map_break);
if (map_fd_break < 0) {
printf("Failed to get map tx fd\n");
goto out_sockmap;
}
err = bpf_prog_attach(parse_prog, map_fd_break,
BPF_SK_SKB_STREAM_PARSER, 0);
if (!err) {
printf("Allowed attaching SK_SKB program to invalid map\n");
goto out_sockmap;
}
err = bpf_prog_attach(parse_prog, map_fd_rx,
BPF_SK_SKB_STREAM_PARSER, 0);
if (err) {
printf("Failed stream parser bpf prog attach\n");
goto out_sockmap;
}
err = bpf_prog_attach(verdict_prog, map_fd_rx,
BPF_SK_SKB_STREAM_VERDICT, 0);
if (err) {
printf("Failed stream verdict bpf prog attach\n");
goto out_sockmap;
}
err = bpf_prog_attach(msg_prog, map_fd_msg, BPF_SK_MSG_VERDICT, 0);
if (err) {
printf("Failed msg verdict bpf prog attach\n");
goto out_sockmap;
}
err = bpf_prog_attach(verdict_prog, map_fd_rx,
__MAX_BPF_ATTACH_TYPE, 0);
if (!err) {
printf("Attached unknown bpf prog\n");
goto out_sockmap;
}
/* Test map update elem afterwards fd lives in fd and map_fd */
for (i = 2; i < 6; i++) {
err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY);
if (err) {
printf("Failed map_fd_rx update sockmap %i '%i:%i'\n",
err, i, sfd[i]);
goto out_sockmap;
}
err = bpf_map_update_elem(map_fd_tx, &i, &sfd[i], BPF_ANY);
if (err) {
printf("Failed map_fd_tx update sockmap %i '%i:%i'\n",
err, i, sfd[i]);
goto out_sockmap;
}
}
/* Test map delete elem and remove send/recv sockets */
for (i = 2; i < 4; i++) {
err = bpf_map_delete_elem(map_fd_rx, &i);
if (err) {
printf("Failed delete sockmap rx %i '%i:%i'\n",
err, i, sfd[i]);
goto out_sockmap;
}
err = bpf_map_delete_elem(map_fd_tx, &i);
if (err) {
printf("Failed delete sockmap tx %i '%i:%i'\n",
err, i, sfd[i]);
goto out_sockmap;
}
}
/* Put sfd[2] (sending fd below) into msg map to test sendmsg bpf */
i = 0;
err = bpf_map_update_elem(map_fd_msg, &i, &sfd[2], BPF_ANY);
if (err) {
printf("Failed map_fd_msg update sockmap %i\n", err);
goto out_sockmap;
}
/* Test map send/recv */
for (i = 0; i < 2; i++) {
buf[0] = i;
buf[1] = 0x5;
sc = send(sfd[2], buf, 20, 0);
if (sc < 0) {
printf("Failed sockmap send\n");
goto out_sockmap;
}
FD_ZERO(&w);
FD_SET(sfd[3], &w);
to.tv_sec = 30;
to.tv_usec = 0;
s = select(sfd[3] + 1, &w, NULL, NULL, &to);
if (s == -1) {
perror("Failed sockmap select()");
goto out_sockmap;
} else if (!s) {
printf("Failed sockmap unexpected timeout\n");
goto out_sockmap;
}
if (!FD_ISSET(sfd[3], &w)) {
printf("Failed sockmap select/recv\n");
goto out_sockmap;
}
rc = recv(sfd[3], buf, sizeof(buf), 0);
if (rc < 0) {
printf("Failed sockmap recv\n");
goto out_sockmap;
}
}
/* Negative null entry lookup from datapath should be dropped */
buf[0] = 1;
buf[1] = 12;
sc = send(sfd[2], buf, 20, 0);
if (sc < 0) {
printf("Failed sockmap send\n");
goto out_sockmap;
}
/* Push fd into same slot */
i = 2;
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_NOEXIST);
if (!err) {
printf("Failed allowed sockmap dup slot BPF_NOEXIST\n");
goto out_sockmap;
}
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
if (err) {
printf("Failed sockmap update new slot BPF_ANY\n");
goto out_sockmap;
}
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_EXIST);
if (err) {
printf("Failed sockmap update new slot BPF_EXIST\n");
goto out_sockmap;
}
/* Delete the elems without programs */
for (i = 2; i < 6; i++) {
err = bpf_map_delete_elem(fd, &i);
if (err) {
printf("Failed delete sockmap %i '%i:%i'\n",
err, i, sfd[i]);
}
}
/* Test having multiple maps open and set with programs on same fds */
err = bpf_prog_attach(parse_prog, fd,
BPF_SK_SKB_STREAM_PARSER, 0);
if (err) {
printf("Failed fd bpf parse prog attach\n");
goto out_sockmap;
}
err = bpf_prog_attach(verdict_prog, fd,
BPF_SK_SKB_STREAM_VERDICT, 0);
if (err) {
printf("Failed fd bpf verdict prog attach\n");
goto out_sockmap;
}
for (i = 4; i < 6; i++) {
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
if (!err) {
printf("Failed allowed duplicate programs in update ANY sockmap %i '%i:%i'\n",
err, i, sfd[i]);
goto out_sockmap;
}
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_NOEXIST);
if (!err) {
printf("Failed allowed duplicate program in update NOEXIST sockmap %i '%i:%i'\n",
err, i, sfd[i]);
goto out_sockmap;
}
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_EXIST);
if (!err) {
printf("Failed allowed duplicate program in update EXIST sockmap %i '%i:%i'\n",
err, i, sfd[i]);
goto out_sockmap;
}
}
/* Test tasks number of forked operations */
for (i = 0; i < tasks; i++) {
pid[i] = fork();
if (pid[i] == 0) {
for (i = 0; i < 6; i++) {
bpf_map_delete_elem(map_fd_tx, &i);
bpf_map_delete_elem(map_fd_rx, &i);
bpf_map_update_elem(map_fd_tx, &i,
&sfd[i], BPF_ANY);
bpf_map_update_elem(map_fd_rx, &i,
&sfd[i], BPF_ANY);
}
exit(0);
} else if (pid[i] == -1) {
printf("Couldn't spawn #%d process!\n", i);
exit(1);
}
}
for (i = 0; i < tasks; i++) {
int status;
assert(waitpid(pid[i], &status, 0) == pid[i]);
assert(status == 0);
}
err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE);
if (!err) {
printf("Detached an invalid prog type.\n");
goto out_sockmap;
}
err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
if (err) {
printf("Failed parser prog detach\n");
goto out_sockmap;
}
err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
if (err) {
printf("Failed parser prog detach\n");
goto out_sockmap;
}
/* Test map close sockets and empty maps */
for (i = 0; i < 6; i++) {
bpf_map_delete_elem(map_fd_tx, &i);
bpf_map_delete_elem(map_fd_rx, &i);
close(sfd[i]);
}
close(fd);
close(map_fd_rx);
bpf_object__close(parse_obj);
bpf_object__close(msg_obj);
bpf_object__close(verdict_obj);
return;
out:
for (i = 0; i < 6; i++)
close(sfd[i]);
printf("Failed to create sockmap '%i:%s'!\n", i, strerror(errno));
exit(1);
out_sockmap:
for (i = 0; i < 6; i++) {
if (map_fd_tx)
bpf_map_delete_elem(map_fd_tx, &i);
if (map_fd_rx)
bpf_map_delete_elem(map_fd_rx, &i);
close(sfd[i]);
}
close(fd);
exit(1);
}
#define MAPINMAP_PROG "./test_map_in_map.bpf.o"
#define MAPINMAP_INVALID_PROG "./test_map_in_map_invalid.bpf.o"
static void test_map_in_map(void)
{
struct bpf_object *obj;
struct bpf_map *map;
int mim_fd, fd, err;
int pos = 0;
struct bpf_map_info info = {};
__u32 len = sizeof(info);
__u32 id = 0;
libbpf_print_fn_t old_print_fn;
obj = bpf_object__open(MAPINMAP_PROG);
fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int), sizeof(int), 2, NULL);
if (fd < 0) {
printf("Failed to create hashmap '%s'!\n", strerror(errno));
exit(1);
}
map = bpf_object__find_map_by_name(obj, "mim_array");
if (!map) {
printf("Failed to load array of maps from test prog\n");
goto out_map_in_map;
}
err = bpf_map__set_inner_map_fd(map, fd);
if (err) {
printf("Failed to set inner_map_fd for array of maps\n");
goto out_map_in_map;
}
map = bpf_object__find_map_by_name(obj, "mim_hash");
if (!map) {
printf("Failed to load hash of maps from test prog\n");
goto out_map_in_map;
}
err = bpf_map__set_inner_map_fd(map, fd);
if (err) {
printf("Failed to set inner_map_fd for hash of maps\n");
goto out_map_in_map;
}
bpf_object__load(obj);
map = bpf_object__find_map_by_name(obj, "mim_array");
if (!map) {
printf("Failed to load array of maps from test prog\n");
goto out_map_in_map;
}
mim_fd = bpf_map__fd(map);
if (mim_fd < 0) {
printf("Failed to get descriptor for array of maps\n");
goto out_map_in_map;
}
err = bpf_map_update_elem(mim_fd, &pos, &fd, 0);
if (err) {
printf("Failed to update array of maps\n");
goto out_map_in_map;
}
map = bpf_object__find_map_by_name(obj, "mim_hash");
if (!map) {
printf("Failed to load hash of maps from test prog\n");
goto out_map_in_map;
}
mim_fd = bpf_map__fd(map);
if (mim_fd < 0) {
printf("Failed to get descriptor for hash of maps\n");
goto out_map_in_map;
}
err = bpf_map_update_elem(mim_fd, &pos, &fd, 0);
if (err) {
printf("Failed to update hash of maps\n");
goto out_map_in_map;
}
close(fd);
fd = -1;
bpf_object__close(obj);
/* Test that failing bpf_object__create_map() destroys the inner map */
obj = bpf_object__open(MAPINMAP_INVALID_PROG);
err = libbpf_get_error(obj);
if (err) {
printf("Failed to load %s program: %d %d",
MAPINMAP_INVALID_PROG, err, errno);
goto out_map_in_map;
}
map = bpf_object__find_map_by_name(obj, "mim");
if (!map) {
printf("Failed to load array of maps from test prog\n");
goto out_map_in_map;
}
old_print_fn = libbpf_set_print(NULL);
err = bpf_object__load(obj);
if (!err) {
printf("Loading obj supposed to fail\n");
goto out_map_in_map;
}
libbpf_set_print(old_print_fn);
/* Iterate over all maps to check whether the internal map
* ("mim.internal") has been destroyed.
*/
while (true) {
err = bpf_map_get_next_id(id, &id);
if (err) {
if (errno == ENOENT)
break;
printf("Failed to get next map: %d", errno);
goto out_map_in_map;
}
fd = bpf_map_get_fd_by_id(id);
if (fd < 0) {
if (errno == ENOENT)
continue;
printf("Failed to get map by id %u: %d", id, errno);
goto out_map_in_map;
}
err = bpf_map_get_info_by_fd(fd, &info, &len);
if (err) {
printf("Failed to get map info by fd %d: %d", fd,
errno);
goto out_map_in_map;
}
if (!strcmp(info.name, "mim.inner")) {
printf("Inner map mim.inner was not destroyed\n");
goto out_map_in_map;
}
close(fd);
}
bpf_object__close(obj);
return;
out_map_in_map:
if (fd >= 0)
close(fd);
exit(1);
}
#define MAP_SIZE (32 * 1024)
static void test_map_large(void)
{
struct bigkey {
int a;
char b[4096];
long long c;
} key;
int fd, i, value;
fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value),
MAP_SIZE, &map_opts);
if (fd < 0) {
printf("Failed to create large map '%s'!\n", strerror(errno));
exit(1);
}
for (i = 0; i < MAP_SIZE; i++) {
key = (struct bigkey) { .c = i };
value = i;
assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
}
key.c = -1;
assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
errno == E2BIG);
/* Iterate through all elements. */
assert(bpf_map_get_next_key(fd, NULL, &key) == 0);
key.c = -1;
for (i = 0; i < MAP_SIZE; i++)
assert(bpf_map_get_next_key(fd, &key, &key) == 0);
assert(bpf_map_get_next_key(fd, &key, &key) < 0 && errno == ENOENT);
key.c = 0;
assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0);
key.a = 1;
assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == ENOENT);
close(fd);
}
#define run_parallel(N, FN, DATA) \
printf("Fork %u tasks to '" #FN "'\n", N); \
__run_parallel(N, FN, DATA)
static void __run_parallel(unsigned int tasks,
void (*fn)(unsigned int task, void *data),
void *data)
{
pid_t pid[tasks];
int i;
fflush(stdout);
for (i = 0; i < tasks; i++) {
pid[i] = fork();
if (pid[i] == 0) {
fn(i, data);
exit(0);
} else if (pid[i] == -1) {
printf("Couldn't spawn #%d process!\n", i);
exit(1);
}
}
for (i = 0; i < tasks; i++) {
int status;
assert(waitpid(pid[i], &status, 0) == pid[i]);
assert(status == 0);
}
}
static void test_map_stress(void)
{
run_parallel(100, test_hashmap_walk, NULL);
run_parallel(100, test_hashmap, NULL);
run_parallel(100, test_hashmap_percpu, NULL);
run_parallel(100, test_hashmap_sizes, NULL);
run_parallel(100, test_arraymap, NULL);
run_parallel(100, test_arraymap_percpu, NULL);
}
#define TASKS 100
#define DO_UPDATE 1
#define DO_DELETE 0
#define MAP_RETRIES 20
#define MAX_DELAY_US 50000
#define MIN_DELAY_RANGE_US 5000
static int map_update_retriable(int map_fd, const void *key, const void *value,
int flags, int attempts)
{
int delay = rand() % MIN_DELAY_RANGE_US;
while (bpf_map_update_elem(map_fd, key, value, flags)) {
if (!attempts || (errno != EAGAIN && errno != EBUSY))
return -errno;
if (delay <= MAX_DELAY_US / 2)
delay *= 2;
usleep(delay);
attempts--;
}
return 0;
}
static int map_delete_retriable(int map_fd, const void *key, int attempts)
{
int delay = rand() % MIN_DELAY_RANGE_US;
while (bpf_map_delete_elem(map_fd, key)) {
if (!attempts || (errno != EAGAIN && errno != EBUSY))
return -errno;
if (delay <= MAX_DELAY_US / 2)
delay *= 2;
usleep(delay);
attempts--;
}
return 0;
}
static void test_update_delete(unsigned int fn, void *data)
{
int do_update = ((int *)data)[1];
int fd = ((int *)data)[0];
int i, key, value, err;
if (fn & 1)
test_hashmap_walk(fn, NULL);
for (i = fn; i < MAP_SIZE; i += TASKS) {
key = value = i;
if (do_update) {
err = map_update_retriable(fd, &key, &value, BPF_NOEXIST, MAP_RETRIES);
if (err)
printf("error %d %d\n", err, errno);
assert(err == 0);
err = map_update_retriable(fd, &key, &value, BPF_EXIST, MAP_RETRIES);
if (err)
printf("error %d %d\n", err, errno);
assert(err == 0);
} else {
err = map_delete_retriable(fd, &key, MAP_RETRIES);
if (err)
printf("error %d %d\n", err, errno);
assert(err == 0);
}
}
}
static void test_map_parallel(void)
{
int i, fd, key = 0, value = 0, j = 0;
int data[2];
fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value),
MAP_SIZE, &map_opts);
if (fd < 0) {
printf("Failed to create map for parallel test '%s'!\n",
strerror(errno));
exit(1);
}
again:
/* Use the same fd in children to add elements to this map:
* child_0 adds key=0, key=1024, key=2048, ...
* child_1 adds key=1, key=1025, key=2049, ...
* child_1023 adds key=1023, ...
*/
data[0] = fd;
data[1] = DO_UPDATE;
run_parallel(TASKS, test_update_delete, data);
/* Check that key=0 is already there. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) < 0 &&
errno == EEXIST);
/* Check that all elements were inserted. */
assert(bpf_map_get_next_key(fd, NULL, &key) == 0);
key = -1;
for (i = 0; i < MAP_SIZE; i++)
assert(bpf_map_get_next_key(fd, &key, &key) == 0);
assert(bpf_map_get_next_key(fd, &key, &key) < 0 && errno == ENOENT);
/* Another check for all elements */
for (i = 0; i < MAP_SIZE; i++) {
key = MAP_SIZE - i - 1;
assert(bpf_map_lookup_elem(fd, &key, &value) == 0 &&
value == key);
}
/* Now let's delete all elemenets in parallel. */
data[1] = DO_DELETE;
run_parallel(TASKS, test_update_delete, data);
/* Nothing should be left. */
key = -1;
assert(bpf_map_get_next_key(fd, NULL, &key) < 0 && errno == ENOENT);
assert(bpf_map_get_next_key(fd, &key, &key) < 0 && errno == ENOENT);
key = 0;
bpf_map_delete_elem(fd, &key);
if (j++ < 5)
goto again;
close(fd);
}
static void test_map_rdonly(void)
{
int fd, key = 0, value = 0;
__u32 old_flags;
old_flags = map_opts.map_flags;
map_opts.map_flags |= BPF_F_RDONLY;
fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value),
MAP_SIZE, &map_opts);
map_opts.map_flags = old_flags;
if (fd < 0) {
printf("Failed to create map for read only test '%s'!\n",
strerror(errno));
exit(1);
}
key = 1;
value = 1234;
/* Try to insert key=1 element. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) < 0 &&
errno == EPERM);
/* Check that key=1 is not found. */
assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == ENOENT);
assert(bpf_map_get_next_key(fd, &key, &value) < 0 && errno == ENOENT);
close(fd);
}
static void test_map_wronly_hash(void)
{
int fd, key = 0, value = 0;
__u32 old_flags;
old_flags = map_opts.map_flags;
map_opts.map_flags |= BPF_F_WRONLY;
fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value),
MAP_SIZE, &map_opts);
map_opts.map_flags = old_flags;
if (fd < 0) {
printf("Failed to create map for write only test '%s'!\n",
strerror(errno));
exit(1);
}
key = 1;
value = 1234;
/* Insert key=1 element. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
/* Check that reading elements and keys from the map is not allowed. */
assert(bpf_map_lookup_elem(fd, &key, &value) < 0 && errno == EPERM);
assert(bpf_map_get_next_key(fd, &key, &value) < 0 && errno == EPERM);
close(fd);
}
static void test_map_wronly_stack_or_queue(enum bpf_map_type map_type)
{
int fd, value = 0;
__u32 old_flags;
assert(map_type == BPF_MAP_TYPE_QUEUE ||
map_type == BPF_MAP_TYPE_STACK);
old_flags = map_opts.map_flags;
map_opts.map_flags |= BPF_F_WRONLY;
fd = bpf_map_create(map_type, NULL, 0, sizeof(value), MAP_SIZE, &map_opts);
map_opts.map_flags = old_flags;
/* Stack/Queue maps do not support BPF_F_NO_PREALLOC */
if (map_opts.map_flags & BPF_F_NO_PREALLOC) {
assert(fd < 0 && errno == EINVAL);
return;
}
if (fd < 0) {
printf("Failed to create map '%s'!\n", strerror(errno));
exit(1);
}
value = 1234;
assert(bpf_map_update_elem(fd, NULL, &value, BPF_ANY) == 0);
/* Peek element should fail */
assert(bpf_map_lookup_elem(fd, NULL, &value) < 0 && errno == EPERM);
/* Pop element should fail */
assert(bpf_map_lookup_and_delete_elem(fd, NULL, &value) < 0 &&
errno == EPERM);
close(fd);
}
static void test_map_wronly(void)
{
test_map_wronly_hash();
test_map_wronly_stack_or_queue(BPF_MAP_TYPE_STACK);
test_map_wronly_stack_or_queue(BPF_MAP_TYPE_QUEUE);
}
static void prepare_reuseport_grp(int type, int map_fd, size_t map_elem_size,
__s64 *fds64, __u64 *sk_cookies,
unsigned int n)
{
socklen_t optlen, addrlen;
struct sockaddr_in6 s6;
const __u32 index0 = 0;
const int optval = 1;
unsigned int i;
u64 sk_cookie;
void *value;
__s32 fd32;
__s64 fd64;
int err;
s6.sin6_family = AF_INET6;
s6.sin6_addr = in6addr_any;
s6.sin6_port = 0;
addrlen = sizeof(s6);
optlen = sizeof(sk_cookie);
for (i = 0; i < n; i++) {
fd64 = socket(AF_INET6, type, 0);
CHECK(fd64 == -1, "socket()",
"sock_type:%d fd64:%lld errno:%d\n",
type, fd64, errno);
err = setsockopt(fd64, SOL_SOCKET, SO_REUSEPORT,
&optval, sizeof(optval));
CHECK(err == -1, "setsockopt(SO_REUSEPORT)",
"err:%d errno:%d\n", err, errno);
/* reuseport_array does not allow unbound sk */
if (map_elem_size == sizeof(__u64))
value = &fd64;
else {
assert(map_elem_size == sizeof(__u32));
fd32 = (__s32)fd64;
value = &fd32;
}
err = bpf_map_update_elem(map_fd, &index0, value, BPF_ANY);
CHECK(err >= 0 || errno != EINVAL,
"reuseport array update unbound sk",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
err = bind(fd64, (struct sockaddr *)&s6, sizeof(s6));
CHECK(err == -1, "bind()",
"sock_type:%d err:%d errno:%d\n", type, err, errno);
if (i == 0) {
err = getsockname(fd64, (struct sockaddr *)&s6,
&addrlen);
CHECK(err == -1, "getsockname()",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
}
err = getsockopt(fd64, SOL_SOCKET, SO_COOKIE, &sk_cookie,
&optlen);
CHECK(err == -1, "getsockopt(SO_COOKIE)",
"sock_type:%d err:%d errno:%d\n", type, err, errno);
if (type == SOCK_STREAM) {
/*
* reuseport_array does not allow
* non-listening tcp sk.
*/
err = bpf_map_update_elem(map_fd, &index0, value,
BPF_ANY);
CHECK(err >= 0 || errno != EINVAL,
"reuseport array update non-listening sk",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
err = listen(fd64, 0);
CHECK(err == -1, "listen()",
"sock_type:%d, err:%d errno:%d\n",
type, err, errno);
}
fds64[i] = fd64;
sk_cookies[i] = sk_cookie;
}
}
static void test_reuseport_array(void)
{
#define REUSEPORT_FD_IDX(err, last) ({ (err) ? last : !last; })
const __u32 array_size = 4, index0 = 0, index3 = 3;
int types[2] = { SOCK_STREAM, SOCK_DGRAM }, type;
__u64 grpa_cookies[2], sk_cookie, map_cookie;
__s64 grpa_fds64[2] = { -1, -1 }, fd64 = -1;
const __u32 bad_index = array_size;
int map_fd, err, t, f;
__u32 fds_idx = 0;
int fd;
map_fd = bpf_map_create(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, NULL,
sizeof(__u32), sizeof(__u64), array_size, NULL);
CHECK(map_fd < 0, "reuseport array create",
"map_fd:%d, errno:%d\n", map_fd, errno);
/* Test lookup/update/delete with invalid index */
err = bpf_map_delete_elem(map_fd, &bad_index);
CHECK(err >= 0 || errno != E2BIG, "reuseport array del >=max_entries",
"err:%d errno:%d\n", err, errno);
err = bpf_map_update_elem(map_fd, &bad_index, &fd64, BPF_ANY);
CHECK(err >= 0 || errno != E2BIG,
"reuseport array update >=max_entries",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem(map_fd, &bad_index, &map_cookie);
CHECK(err >= 0 || errno != ENOENT,
"reuseport array update >=max_entries",
"err:%d errno:%d\n", err, errno);
/* Test lookup/delete non existence elem */
err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
CHECK(err >= 0 || errno != ENOENT,
"reuseport array lookup not-exist elem",
"err:%d errno:%d\n", err, errno);
err = bpf_map_delete_elem(map_fd, &index3);
CHECK(err >= 0 || errno != ENOENT,
"reuseport array del not-exist elem",
"err:%d errno:%d\n", err, errno);
for (t = 0; t < ARRAY_SIZE(types); t++) {
type = types[t];
prepare_reuseport_grp(type, map_fd, sizeof(__u64), grpa_fds64,
grpa_cookies, ARRAY_SIZE(grpa_fds64));
/* Test BPF_* update flags */
/* BPF_EXIST failure case */
err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
BPF_EXIST);
CHECK(err >= 0 || errno != ENOENT,
"reuseport array update empty elem BPF_EXIST",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
/* BPF_NOEXIST success case */
err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
BPF_NOEXIST);
CHECK(err < 0,
"reuseport array update empty elem BPF_NOEXIST",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
/* BPF_EXIST success case. */
err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
BPF_EXIST);
CHECK(err < 0,
"reuseport array update same elem BPF_EXIST",
"sock_type:%d err:%d errno:%d\n", type, err, errno);
fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
/* BPF_NOEXIST failure case */
err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
BPF_NOEXIST);
CHECK(err >= 0 || errno != EEXIST,
"reuseport array update non-empty elem BPF_NOEXIST",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
/* BPF_ANY case (always succeed) */
err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
BPF_ANY);
CHECK(err < 0,
"reuseport array update same sk with BPF_ANY",
"sock_type:%d err:%d errno:%d\n", type, err, errno);
fd64 = grpa_fds64[fds_idx];
sk_cookie = grpa_cookies[fds_idx];
/* The same sk cannot be added to reuseport_array twice */
err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_ANY);
CHECK(err >= 0 || errno != EBUSY,
"reuseport array update same sk with same index",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
err = bpf_map_update_elem(map_fd, &index0, &fd64, BPF_ANY);
CHECK(err >= 0 || errno != EBUSY,
"reuseport array update same sk with different index",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
/* Test delete elem */
err = bpf_map_delete_elem(map_fd, &index3);
CHECK(err < 0, "reuseport array delete sk",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
/* Add it back with BPF_NOEXIST */
err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST);
CHECK(err < 0,
"reuseport array re-add with BPF_NOEXIST after del",
"sock_type:%d err:%d errno:%d\n", type, err, errno);
/* Test cookie */
err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
CHECK(err < 0 || sk_cookie != map_cookie,
"reuseport array lookup re-added sk",
"sock_type:%d err:%d errno:%d sk_cookie:0x%llx map_cookie:0x%llxn",
type, err, errno, sk_cookie, map_cookie);
/* Test elem removed by close() */
for (f = 0; f < ARRAY_SIZE(grpa_fds64); f++)
close(grpa_fds64[f]);
err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
CHECK(err >= 0 || errno != ENOENT,
"reuseport array lookup after close()",
"sock_type:%d err:%d errno:%d\n",
type, err, errno);
}
/* Test SOCK_RAW */
fd64 = socket(AF_INET6, SOCK_RAW, IPPROTO_UDP);
CHECK(fd64 == -1, "socket(SOCK_RAW)", "err:%d errno:%d\n",
err, errno);
err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST);
CHECK(err >= 0 || errno != ENOTSUPP, "reuseport array update SOCK_RAW",
"err:%d errno:%d\n", err, errno);
close(fd64);
/* Close the 64 bit value map */
close(map_fd);
/* Test 32 bit fd */
map_fd = bpf_map_create(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, NULL,
sizeof(__u32), sizeof(__u32), array_size, NULL);
CHECK(map_fd < 0, "reuseport array create",
"map_fd:%d, errno:%d\n", map_fd, errno);
prepare_reuseport_grp(SOCK_STREAM, map_fd, sizeof(__u32), &fd64,
&sk_cookie, 1);
fd = fd64;
err = bpf_map_update_elem(map_fd, &index3, &fd, BPF_NOEXIST);
CHECK(err < 0, "reuseport array update 32 bit fd",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
CHECK(err >= 0 || errno != ENOSPC,
"reuseport array lookup 32 bit fd",
"err:%d errno:%d\n", err, errno);
close(fd);
close(map_fd);
}
static void run_all_tests(void)
{
test_hashmap(0, NULL);
test_hashmap_percpu(0, NULL);
test_hashmap_walk(0, NULL);
test_hashmap_zero_seed();
test_arraymap(0, NULL);
test_arraymap_percpu(0, NULL);
test_arraymap_percpu_many_keys();
test_devmap(0, NULL);
test_devmap_hash(0, NULL);
test_sockmap(0, NULL);
test_map_large();
test_map_parallel();
test_map_stress();
test_map_rdonly();
test_map_wronly();
test_reuseport_array();
test_queuemap(0, NULL);
test_stackmap(0, NULL);
test_map_in_map();
}
#define DEFINE_TEST(name) extern void test_##name(void);
#include <map_tests/tests.h>
#undef DEFINE_TEST
int main(void)
{
srand(time(NULL));
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
map_opts.map_flags = 0;
run_all_tests();
map_opts.map_flags = BPF_F_NO_PREALLOC;
run_all_tests();
#define DEFINE_TEST(name) test_##name();
#include <map_tests/tests.h>
#undef DEFINE_TEST
printf("test_maps: OK, %d SKIPPED\n", skips);
return 0;
}
| linux-master | tools/testing/selftests/bpf/test_maps.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Facebook
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <sys/time.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
#include "testing_helpers.h"
#define DEV_CGROUP_PROG "./dev_cgroup.bpf.o"
#define TEST_CGROUP "/test-bpf-based-device-cgroup/"
int main(int argc, char **argv)
{
struct bpf_object *obj;
int error = EXIT_FAILURE;
int prog_fd, cgroup_fd;
__u32 prog_cnt;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
if (bpf_prog_test_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
&obj, &prog_fd)) {
printf("Failed to load DEV_CGROUP program\n");
goto out;
}
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto out;
}
/* Attach bpf program */
if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_DEVICE, 0)) {
printf("Failed to attach DEV_CGROUP program");
goto err;
}
if (bpf_prog_query(cgroup_fd, BPF_CGROUP_DEVICE, 0, NULL, NULL,
&prog_cnt)) {
printf("Failed to query attached programs");
goto err;
}
/* All operations with /dev/zero and and /dev/urandom are allowed,
* everything else is forbidden.
*/
assert(system("rm -f /tmp/test_dev_cgroup_null") == 0);
assert(system("mknod /tmp/test_dev_cgroup_null c 1 3"));
assert(system("rm -f /tmp/test_dev_cgroup_null") == 0);
/* /dev/zero is whitelisted */
assert(system("rm -f /tmp/test_dev_cgroup_zero") == 0);
assert(system("mknod /tmp/test_dev_cgroup_zero c 1 5") == 0);
assert(system("rm -f /tmp/test_dev_cgroup_zero") == 0);
assert(system("dd if=/dev/urandom of=/dev/zero count=64") == 0);
/* src is allowed, target is forbidden */
assert(system("dd if=/dev/urandom of=/dev/full count=64"));
/* src is forbidden, target is allowed */
assert(system("dd if=/dev/random of=/dev/zero count=64"));
error = 0;
printf("test_dev_cgroup:PASS\n");
err:
cleanup_cgroup_environment();
out:
return error;
}
| linux-master | tools/testing/selftests/bpf/test_dev_cgroup.c |
// SPDX-License-Identifier: GPL-2.0
#include "cap_helpers.h"
/* Avoid including <sys/capability.h> from the libcap-devel package,
* so directly declare them here and use them from glibc.
*/
int capget(cap_user_header_t header, cap_user_data_t data);
int capset(cap_user_header_t header, const cap_user_data_t data);
int cap_enable_effective(__u64 caps, __u64 *old_caps)
{
struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
struct __user_cap_header_struct hdr = {
.version = _LINUX_CAPABILITY_VERSION_3,
};
__u32 cap0 = caps;
__u32 cap1 = caps >> 32;
int err;
err = capget(&hdr, data);
if (err)
return err;
if (old_caps)
*old_caps = (__u64)(data[1].effective) << 32 | data[0].effective;
if ((data[0].effective & cap0) == cap0 &&
(data[1].effective & cap1) == cap1)
return 0;
data[0].effective |= cap0;
data[1].effective |= cap1;
err = capset(&hdr, data);
if (err)
return err;
return 0;
}
int cap_disable_effective(__u64 caps, __u64 *old_caps)
{
struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
struct __user_cap_header_struct hdr = {
.version = _LINUX_CAPABILITY_VERSION_3,
};
__u32 cap0 = caps;
__u32 cap1 = caps >> 32;
int err;
err = capget(&hdr, data);
if (err)
return err;
if (old_caps)
*old_caps = (__u64)(data[1].effective) << 32 | data[0].effective;
if (!(data[0].effective & cap0) && !(data[1].effective & cap1))
return 0;
data[0].effective &= ~cap0;
data[1].effective &= ~cap1;
err = capset(&hdr, data);
if (err)
return err;
return 0;
}
| linux-master | tools/testing/selftests/bpf/cap_helpers.c |
// SPDX-License-Identifier: GPL-2.0
#include <uapi/linux/bpf.h>
#include <uapi/linux/netdev.h>
#include <linux/if_link.h>
#include <signal.h>
#include <argp.h>
#include <net/if.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <pthread.h>
#include <network_helpers.h>
#include "xdp_features.skel.h"
#include "xdp_features.h"
#define RED(str) "\033[0;31m" str "\033[0m"
#define GREEN(str) "\033[0;32m" str "\033[0m"
#define YELLOW(str) "\033[0;33m" str "\033[0m"
static struct env {
bool verbosity;
char ifname[IF_NAMESIZE];
int ifindex;
bool is_tester;
struct {
enum netdev_xdp_act drv_feature;
enum xdp_action action;
} feature;
struct sockaddr_storage dut_ctrl_addr;
struct sockaddr_storage dut_addr;
struct sockaddr_storage tester_addr;
} env;
#define BUFSIZE 128
void test__fail(void) { /* for network_helpers.c */ }
static int libbpf_print_fn(enum libbpf_print_level level,
const char *format, va_list args)
{
if (level == LIBBPF_DEBUG && !env.verbosity)
return 0;
return vfprintf(stderr, format, args);
}
static volatile bool exiting;
static void sig_handler(int sig)
{
exiting = true;
}
const char *argp_program_version = "xdp-features 0.0";
const char argp_program_doc[] =
"XDP features detection application.\n"
"\n"
"XDP features application checks the XDP advertised features match detected ones.\n"
"\n"
"USAGE: ./xdp-features [-vt] [-f <xdp-feature>] [-D <dut-data-ip>] [-T <tester-data-ip>] [-C <dut-ctrl-ip>] <iface-name>\n"
"\n"
"dut-data-ip, tester-data-ip, dut-ctrl-ip: IPv6 or IPv4-mapped-IPv6 addresses;\n"
"\n"
"XDP features\n:"
"- XDP_PASS\n"
"- XDP_DROP\n"
"- XDP_ABORTED\n"
"- XDP_REDIRECT\n"
"- XDP_NDO_XMIT\n"
"- XDP_TX\n";
static const struct argp_option opts[] = {
{ "verbose", 'v', NULL, 0, "Verbose debug output" },
{ "tester", 't', NULL, 0, "Tester mode" },
{ "feature", 'f', "XDP-FEATURE", 0, "XDP feature to test" },
{ "dut_data_ip", 'D', "DUT-DATA-IP", 0, "DUT IP data channel" },
{ "dut_ctrl_ip", 'C', "DUT-CTRL-IP", 0, "DUT IP control channel" },
{ "tester_data_ip", 'T', "TESTER-DATA-IP", 0, "Tester IP data channel" },
{},
};
static int get_xdp_feature(const char *arg)
{
if (!strcmp(arg, "XDP_PASS")) {
env.feature.action = XDP_PASS;
env.feature.drv_feature = NETDEV_XDP_ACT_BASIC;
} else if (!strcmp(arg, "XDP_DROP")) {
env.feature.drv_feature = NETDEV_XDP_ACT_BASIC;
env.feature.action = XDP_DROP;
} else if (!strcmp(arg, "XDP_ABORTED")) {
env.feature.drv_feature = NETDEV_XDP_ACT_BASIC;
env.feature.action = XDP_ABORTED;
} else if (!strcmp(arg, "XDP_TX")) {
env.feature.drv_feature = NETDEV_XDP_ACT_BASIC;
env.feature.action = XDP_TX;
} else if (!strcmp(arg, "XDP_REDIRECT")) {
env.feature.drv_feature = NETDEV_XDP_ACT_REDIRECT;
env.feature.action = XDP_REDIRECT;
} else if (!strcmp(arg, "XDP_NDO_XMIT")) {
env.feature.drv_feature = NETDEV_XDP_ACT_NDO_XMIT;
} else {
return -EINVAL;
}
return 0;
}
static char *get_xdp_feature_str(void)
{
switch (env.feature.action) {
case XDP_PASS:
return YELLOW("XDP_PASS");
case XDP_DROP:
return YELLOW("XDP_DROP");
case XDP_ABORTED:
return YELLOW("XDP_ABORTED");
case XDP_TX:
return YELLOW("XDP_TX");
case XDP_REDIRECT:
return YELLOW("XDP_REDIRECT");
default:
break;
}
if (env.feature.drv_feature == NETDEV_XDP_ACT_NDO_XMIT)
return YELLOW("XDP_NDO_XMIT");
return "";
}
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
switch (key) {
case 'v':
env.verbosity = true;
break;
case 't':
env.is_tester = true;
break;
case 'f':
if (get_xdp_feature(arg) < 0) {
fprintf(stderr, "Invalid xdp feature: %s\n", arg);
argp_usage(state);
return ARGP_ERR_UNKNOWN;
}
break;
case 'D':
if (make_sockaddr(AF_INET6, arg, DUT_ECHO_PORT,
&env.dut_addr, NULL)) {
fprintf(stderr,
"Invalid address assigned to the Device Under Test: %s\n",
arg);
return ARGP_ERR_UNKNOWN;
}
break;
case 'C':
if (make_sockaddr(AF_INET6, arg, DUT_CTRL_PORT,
&env.dut_ctrl_addr, NULL)) {
fprintf(stderr,
"Invalid address assigned to the Device Under Test: %s\n",
arg);
return ARGP_ERR_UNKNOWN;
}
break;
case 'T':
if (make_sockaddr(AF_INET6, arg, 0, &env.tester_addr, NULL)) {
fprintf(stderr,
"Invalid address assigned to the Tester device: %s\n",
arg);
return ARGP_ERR_UNKNOWN;
}
break;
case ARGP_KEY_ARG:
errno = 0;
if (strlen(arg) >= IF_NAMESIZE) {
fprintf(stderr, "Invalid device name: %s\n", arg);
argp_usage(state);
return ARGP_ERR_UNKNOWN;
}
env.ifindex = if_nametoindex(arg);
if (!env.ifindex)
env.ifindex = strtoul(arg, NULL, 0);
if (!env.ifindex || !if_indextoname(env.ifindex, env.ifname)) {
fprintf(stderr,
"Bad interface index or name (%d): %s\n",
errno, strerror(errno));
argp_usage(state);
return ARGP_ERR_UNKNOWN;
}
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
static const struct argp argp = {
.options = opts,
.parser = parse_arg,
.doc = argp_program_doc,
};
static void set_env_default(void)
{
env.feature.drv_feature = NETDEV_XDP_ACT_NDO_XMIT;
env.feature.action = -EINVAL;
env.ifindex = -ENODEV;
strcpy(env.ifname, "unknown");
make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_CTRL_PORT,
&env.dut_ctrl_addr, NULL);
make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_ECHO_PORT,
&env.dut_addr, NULL);
make_sockaddr(AF_INET6, "::ffff:127.0.0.1", 0, &env.tester_addr, NULL);
}
static void *dut_echo_thread(void *arg)
{
unsigned char buf[sizeof(struct tlv_hdr)];
int sockfd = *(int *)arg;
while (!exiting) {
struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
struct sockaddr_storage addr;
socklen_t addrlen;
size_t n;
n = recvfrom(sockfd, buf, sizeof(buf), MSG_WAITALL,
(struct sockaddr *)&addr, &addrlen);
if (n != ntohs(tlv->len))
continue;
if (ntohs(tlv->type) != CMD_ECHO)
continue;
sendto(sockfd, buf, sizeof(buf), MSG_NOSIGNAL | MSG_CONFIRM,
(struct sockaddr *)&addr, addrlen);
}
pthread_exit((void *)0);
close(sockfd);
return NULL;
}
static int dut_run_echo_thread(pthread_t *t, int *sockfd)
{
int err;
sockfd = start_reuseport_server(AF_INET6, SOCK_DGRAM, NULL,
DUT_ECHO_PORT, 0, 1);
if (!sockfd) {
fprintf(stderr,
"Failed creating data UDP socket on device %s\n",
env.ifname);
return -errno;
}
/* start echo channel */
err = pthread_create(t, NULL, dut_echo_thread, sockfd);
if (err) {
fprintf(stderr,
"Failed creating data UDP thread on device %s: %s\n",
env.ifname, strerror(-err));
free_fds(sockfd, 1);
return -EINVAL;
}
return 0;
}
static int dut_attach_xdp_prog(struct xdp_features *skel, int flags)
{
enum xdp_action action = env.feature.action;
struct bpf_program *prog;
unsigned int key = 0;
int err, fd = 0;
if (env.feature.drv_feature == NETDEV_XDP_ACT_NDO_XMIT) {
struct bpf_devmap_val entry = {
.ifindex = env.ifindex,
};
err = bpf_map__update_elem(skel->maps.dev_map,
&key, sizeof(key),
&entry, sizeof(entry), 0);
if (err < 0)
return err;
fd = bpf_program__fd(skel->progs.xdp_do_redirect_cpumap);
action = XDP_REDIRECT;
}
switch (action) {
case XDP_TX:
prog = skel->progs.xdp_do_tx;
break;
case XDP_DROP:
prog = skel->progs.xdp_do_drop;
break;
case XDP_ABORTED:
prog = skel->progs.xdp_do_aborted;
break;
case XDP_PASS:
prog = skel->progs.xdp_do_pass;
break;
case XDP_REDIRECT: {
struct bpf_cpumap_val entry = {
.qsize = 2048,
.bpf_prog.fd = fd,
};
err = bpf_map__update_elem(skel->maps.cpu_map,
&key, sizeof(key),
&entry, sizeof(entry), 0);
if (err < 0)
return err;
prog = skel->progs.xdp_do_redirect;
break;
}
default:
return -EINVAL;
}
err = bpf_xdp_attach(env.ifindex, bpf_program__fd(prog), flags, NULL);
if (err)
fprintf(stderr, "Failed attaching XDP program to device %s\n",
env.ifname);
return err;
}
static int recv_msg(int sockfd, void *buf, size_t bufsize, void *val,
size_t val_size)
{
struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
size_t len;
len = recv(sockfd, buf, bufsize, 0);
if (len != ntohs(tlv->len) || len < sizeof(*tlv))
return -EINVAL;
if (val) {
len -= sizeof(*tlv);
if (len > val_size)
return -ENOMEM;
memcpy(val, tlv->data, len);
}
return 0;
}
static int dut_run(struct xdp_features *skel)
{
int flags = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_DRV_MODE;
int state, err, *sockfd, ctrl_sockfd, echo_sockfd;
struct sockaddr_storage ctrl_addr;
pthread_t dut_thread;
socklen_t addrlen;
sockfd = start_reuseport_server(AF_INET6, SOCK_STREAM, NULL,
DUT_CTRL_PORT, 0, 1);
if (!sockfd) {
fprintf(stderr,
"Failed creating control socket on device %s\n", env.ifname);
return -errno;
}
ctrl_sockfd = accept(*sockfd, (struct sockaddr *)&ctrl_addr, &addrlen);
if (ctrl_sockfd < 0) {
fprintf(stderr,
"Failed accepting connections on device %s control socket\n",
env.ifname);
free_fds(sockfd, 1);
return -errno;
}
/* CTRL loop */
while (!exiting) {
unsigned char buf[BUFSIZE] = {};
struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
err = recv_msg(ctrl_sockfd, buf, BUFSIZE, NULL, 0);
if (err)
continue;
switch (ntohs(tlv->type)) {
case CMD_START: {
if (state == CMD_START)
continue;
state = CMD_START;
/* Load the XDP program on the DUT */
err = dut_attach_xdp_prog(skel, flags);
if (err)
goto out;
err = dut_run_echo_thread(&dut_thread, &echo_sockfd);
if (err < 0)
goto out;
tlv->type = htons(CMD_ACK);
tlv->len = htons(sizeof(*tlv));
err = send(ctrl_sockfd, buf, sizeof(*tlv), 0);
if (err < 0)
goto end_thread;
break;
}
case CMD_STOP:
if (state != CMD_START)
break;
state = CMD_STOP;
exiting = true;
bpf_xdp_detach(env.ifindex, flags, NULL);
tlv->type = htons(CMD_ACK);
tlv->len = htons(sizeof(*tlv));
err = send(ctrl_sockfd, buf, sizeof(*tlv), 0);
goto end_thread;
case CMD_GET_XDP_CAP: {
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
unsigned long long val;
size_t n;
err = bpf_xdp_query(env.ifindex, XDP_FLAGS_DRV_MODE,
&opts);
if (err) {
fprintf(stderr,
"Failed querying XDP cap for device %s\n",
env.ifname);
goto end_thread;
}
tlv->type = htons(CMD_ACK);
n = sizeof(*tlv) + sizeof(opts.feature_flags);
tlv->len = htons(n);
val = htobe64(opts.feature_flags);
memcpy(tlv->data, &val, sizeof(val));
err = send(ctrl_sockfd, buf, n, 0);
if (err < 0)
goto end_thread;
break;
}
case CMD_GET_STATS: {
unsigned int key = 0, val;
size_t n;
err = bpf_map__lookup_elem(skel->maps.dut_stats,
&key, sizeof(key),
&val, sizeof(val), 0);
if (err) {
fprintf(stderr,
"bpf_map_lookup_elem failed (%d)\n", err);
goto end_thread;
}
tlv->type = htons(CMD_ACK);
n = sizeof(*tlv) + sizeof(val);
tlv->len = htons(n);
val = htonl(val);
memcpy(tlv->data, &val, sizeof(val));
err = send(ctrl_sockfd, buf, n, 0);
if (err < 0)
goto end_thread;
break;
}
default:
break;
}
}
end_thread:
pthread_join(dut_thread, NULL);
out:
bpf_xdp_detach(env.ifindex, flags, NULL);
close(ctrl_sockfd);
free_fds(sockfd, 1);
return err;
}
static bool tester_collect_detected_cap(struct xdp_features *skel,
unsigned int dut_stats)
{
unsigned int err, key = 0, val;
if (!dut_stats)
return false;
err = bpf_map__lookup_elem(skel->maps.stats, &key, sizeof(key),
&val, sizeof(val), 0);
if (err) {
fprintf(stderr, "bpf_map_lookup_elem failed (%d)\n", err);
return false;
}
switch (env.feature.action) {
case XDP_PASS:
case XDP_TX:
case XDP_REDIRECT:
return val > 0;
case XDP_DROP:
case XDP_ABORTED:
return val == 0;
default:
break;
}
if (env.feature.drv_feature == NETDEV_XDP_ACT_NDO_XMIT)
return val > 0;
return false;
}
static int send_and_recv_msg(int sockfd, enum test_commands cmd, void *val,
size_t val_size)
{
unsigned char buf[BUFSIZE] = {};
struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
int err;
tlv->type = htons(cmd);
tlv->len = htons(sizeof(*tlv));
err = send(sockfd, buf, sizeof(*tlv), 0);
if (err < 0)
return err;
err = recv_msg(sockfd, buf, BUFSIZE, val, val_size);
if (err < 0)
return err;
return ntohs(tlv->type) == CMD_ACK ? 0 : -EINVAL;
}
static int send_echo_msg(void)
{
unsigned char buf[sizeof(struct tlv_hdr)];
struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
int sockfd, n;
sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
if (sockfd < 0) {
fprintf(stderr,
"Failed creating data UDP socket on device %s\n",
env.ifname);
return -errno;
}
tlv->type = htons(CMD_ECHO);
tlv->len = htons(sizeof(*tlv));
n = sendto(sockfd, buf, sizeof(*tlv), MSG_NOSIGNAL | MSG_CONFIRM,
(struct sockaddr *)&env.dut_addr, sizeof(env.dut_addr));
close(sockfd);
return n == ntohs(tlv->len) ? 0 : -EINVAL;
}
static int tester_run(struct xdp_features *skel)
{
int flags = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_DRV_MODE;
unsigned long long advertised_feature;
struct bpf_program *prog;
unsigned int stats;
int i, err, sockfd;
bool detected_cap;
sockfd = socket(AF_INET6, SOCK_STREAM, 0);
if (sockfd < 0) {
fprintf(stderr,
"Failed creating tester service control socket\n");
return -errno;
}
if (settimeo(sockfd, 1000) < 0)
return -EINVAL;
err = connect(sockfd, (struct sockaddr *)&env.dut_ctrl_addr,
sizeof(env.dut_ctrl_addr));
if (err) {
fprintf(stderr,
"Failed connecting to the Device Under Test control socket\n");
return -errno;
}
err = send_and_recv_msg(sockfd, CMD_GET_XDP_CAP, &advertised_feature,
sizeof(advertised_feature));
if (err < 0) {
close(sockfd);
return err;
}
advertised_feature = be64toh(advertised_feature);
if (env.feature.drv_feature == NETDEV_XDP_ACT_NDO_XMIT ||
env.feature.action == XDP_TX)
prog = skel->progs.xdp_tester_check_tx;
else
prog = skel->progs.xdp_tester_check_rx;
err = bpf_xdp_attach(env.ifindex, bpf_program__fd(prog), flags, NULL);
if (err) {
fprintf(stderr, "Failed attaching XDP program to device %s\n",
env.ifname);
goto out;
}
err = send_and_recv_msg(sockfd, CMD_START, NULL, 0);
if (err)
goto out;
for (i = 0; i < 10 && !exiting; i++) {
err = send_echo_msg();
if (err < 0)
goto out;
sleep(1);
}
err = send_and_recv_msg(sockfd, CMD_GET_STATS, &stats, sizeof(stats));
if (err)
goto out;
/* stop the test */
err = send_and_recv_msg(sockfd, CMD_STOP, NULL, 0);
/* send a new echo message to wake echo thread of the dut */
send_echo_msg();
detected_cap = tester_collect_detected_cap(skel, ntohl(stats));
fprintf(stdout, "Feature %s: [%s][%s]\n", get_xdp_feature_str(),
detected_cap ? GREEN("DETECTED") : RED("NOT DETECTED"),
env.feature.drv_feature & advertised_feature ? GREEN("ADVERTISED")
: RED("NOT ADVERTISED"));
out:
bpf_xdp_detach(env.ifindex, flags, NULL);
close(sockfd);
return err < 0 ? err : 0;
}
int main(int argc, char **argv)
{
struct xdp_features *skel;
int err;
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
libbpf_set_print(libbpf_print_fn);
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
set_env_default();
/* Parse command line arguments */
err = argp_parse(&argp, argc, argv, 0, NULL, NULL);
if (err)
return err;
if (env.ifindex < 0) {
fprintf(stderr, "Invalid device name %s\n", env.ifname);
return -ENODEV;
}
/* Load and verify BPF application */
skel = xdp_features__open();
if (!skel) {
fprintf(stderr, "Failed to open and load BPF skeleton\n");
return -EINVAL;
}
skel->rodata->tester_addr =
((struct sockaddr_in6 *)&env.tester_addr)->sin6_addr;
skel->rodata->dut_addr =
((struct sockaddr_in6 *)&env.dut_addr)->sin6_addr;
/* Load & verify BPF programs */
err = xdp_features__load(skel);
if (err) {
fprintf(stderr, "Failed to load and verify BPF skeleton\n");
goto cleanup;
}
err = xdp_features__attach(skel);
if (err) {
fprintf(stderr, "Failed to attach BPF skeleton\n");
goto cleanup;
}
if (env.is_tester) {
/* Tester */
fprintf(stdout, "Starting tester service on device %s\n",
env.ifname);
err = tester_run(skel);
} else {
/* DUT */
fprintf(stdout, "Starting test on device %s\n", env.ifname);
err = dut_run(skel);
}
cleanup:
xdp_features__destroy(skel);
return err < 0 ? -err : 0;
}
| linux-master | tools/testing/selftests/bpf/xdp_features.c |
../../../../kernel/bpf/disasm.c | linux-master | tools/testing/selftests/bpf/disasm.c |
// SPDX-License-Identifier: GPL-2.0
/* Reference program for verifying XDP metadata on real HW. Functional test
* only, doesn't test the performance.
*
* RX:
* - UDP 9091 packets are diverted into AF_XDP
* - Metadata verified:
* - rx_timestamp
* - rx_hash
*
* TX:
* - TBD
*/
#include <test_progs.h>
#include <network_helpers.h>
#include "xdp_hw_metadata.skel.h"
#include "xsk.h"
#include <error.h>
#include <linux/errqueue.h>
#include <linux/if_link.h>
#include <linux/net_tstamp.h>
#include <linux/udp.h>
#include <linux/sockios.h>
#include <sys/mman.h>
#include <net/if.h>
#include <poll.h>
#include <time.h>
#include "xdp_metadata.h"
#define UMEM_NUM 16
#define UMEM_FRAME_SIZE XSK_UMEM__DEFAULT_FRAME_SIZE
#define UMEM_SIZE (UMEM_FRAME_SIZE * UMEM_NUM)
#define XDP_FLAGS (XDP_FLAGS_DRV_MODE | XDP_FLAGS_REPLACE)
struct xsk {
void *umem_area;
struct xsk_umem *umem;
struct xsk_ring_prod fill;
struct xsk_ring_cons comp;
struct xsk_ring_prod tx;
struct xsk_ring_cons rx;
struct xsk_socket *socket;
};
struct xdp_hw_metadata *bpf_obj;
struct xsk *rx_xsk;
const char *ifname;
int ifindex;
int rxq;
void test__fail(void) { /* for network_helpers.c */ }
static int open_xsk(int ifindex, struct xsk *xsk, __u32 queue_id)
{
int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
const struct xsk_socket_config socket_config = {
.rx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
.bind_flags = XDP_COPY,
};
const struct xsk_umem_config umem_config = {
.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
.comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
.frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
.flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG,
};
__u32 idx;
u64 addr;
int ret;
int i;
xsk->umem_area = mmap(NULL, UMEM_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
if (xsk->umem_area == MAP_FAILED)
return -ENOMEM;
ret = xsk_umem__create(&xsk->umem,
xsk->umem_area, UMEM_SIZE,
&xsk->fill,
&xsk->comp,
&umem_config);
if (ret)
return ret;
ret = xsk_socket__create(&xsk->socket, ifindex, queue_id,
xsk->umem,
&xsk->rx,
&xsk->tx,
&socket_config);
if (ret)
return ret;
/* First half of umem is for TX. This way address matches 1-to-1
* to the completion queue index.
*/
for (i = 0; i < UMEM_NUM / 2; i++) {
addr = i * UMEM_FRAME_SIZE;
printf("%p: tx_desc[%d] -> %lx\n", xsk, i, addr);
}
/* Second half of umem is for RX. */
ret = xsk_ring_prod__reserve(&xsk->fill, UMEM_NUM / 2, &idx);
for (i = 0; i < UMEM_NUM / 2; i++) {
addr = (UMEM_NUM / 2 + i) * UMEM_FRAME_SIZE;
printf("%p: rx_desc[%d] -> %lx\n", xsk, i, addr);
*xsk_ring_prod__fill_addr(&xsk->fill, i) = addr;
}
xsk_ring_prod__submit(&xsk->fill, ret);
return 0;
}
static void close_xsk(struct xsk *xsk)
{
if (xsk->umem)
xsk_umem__delete(xsk->umem);
if (xsk->socket)
xsk_socket__delete(xsk->socket);
munmap(xsk->umem_area, UMEM_SIZE);
}
static void refill_rx(struct xsk *xsk, __u64 addr)
{
__u32 idx;
if (xsk_ring_prod__reserve(&xsk->fill, 1, &idx) == 1) {
printf("%p: complete idx=%u addr=%llx\n", xsk, idx, addr);
*xsk_ring_prod__fill_addr(&xsk->fill, idx) = addr;
xsk_ring_prod__submit(&xsk->fill, 1);
}
}
#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
static __u64 gettime(clockid_t clock_id)
{
struct timespec t;
int res;
/* See man clock_gettime(2) for type of clock_id's */
res = clock_gettime(clock_id, &t);
if (res < 0)
error(res, errno, "Error with clock_gettime()");
return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
}
static void verify_xdp_metadata(void *data, clockid_t clock_id)
{
struct xdp_meta *meta;
meta = data - sizeof(*meta);
if (meta->rx_hash_err < 0)
printf("No rx_hash err=%d\n", meta->rx_hash_err);
else
printf("rx_hash: 0x%X with RSS type:0x%X\n",
meta->rx_hash, meta->rx_hash_type);
printf("rx_timestamp: %llu (sec:%0.4f)\n", meta->rx_timestamp,
(double)meta->rx_timestamp / NANOSEC_PER_SEC);
if (meta->rx_timestamp) {
__u64 usr_clock = gettime(clock_id);
__u64 xdp_clock = meta->xdp_timestamp;
__s64 delta_X = xdp_clock - meta->rx_timestamp;
__s64 delta_X2U = usr_clock - xdp_clock;
printf("XDP RX-time: %llu (sec:%0.4f) delta sec:%0.4f (%0.3f usec)\n",
xdp_clock, (double)xdp_clock / NANOSEC_PER_SEC,
(double)delta_X / NANOSEC_PER_SEC,
(double)delta_X / 1000);
printf("AF_XDP time: %llu (sec:%0.4f) delta sec:%0.4f (%0.3f usec)\n",
usr_clock, (double)usr_clock / NANOSEC_PER_SEC,
(double)delta_X2U / NANOSEC_PER_SEC,
(double)delta_X2U / 1000);
}
}
static void verify_skb_metadata(int fd)
{
char cmsg_buf[1024];
char packet_buf[128];
struct scm_timestamping *ts;
struct iovec packet_iov;
struct cmsghdr *cmsg;
struct msghdr hdr;
memset(&hdr, 0, sizeof(hdr));
hdr.msg_iov = &packet_iov;
hdr.msg_iovlen = 1;
packet_iov.iov_base = packet_buf;
packet_iov.iov_len = sizeof(packet_buf);
hdr.msg_control = cmsg_buf;
hdr.msg_controllen = sizeof(cmsg_buf);
if (recvmsg(fd, &hdr, 0) < 0)
error(1, errno, "recvmsg");
for (cmsg = CMSG_FIRSTHDR(&hdr); cmsg != NULL;
cmsg = CMSG_NXTHDR(&hdr, cmsg)) {
if (cmsg->cmsg_level != SOL_SOCKET)
continue;
switch (cmsg->cmsg_type) {
case SCM_TIMESTAMPING:
ts = (struct scm_timestamping *)CMSG_DATA(cmsg);
if (ts->ts[2].tv_sec || ts->ts[2].tv_nsec) {
printf("found skb hwtstamp = %lu.%lu\n",
ts->ts[2].tv_sec, ts->ts[2].tv_nsec);
return;
}
break;
default:
break;
}
}
printf("skb hwtstamp is not found!\n");
}
static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t clock_id)
{
const struct xdp_desc *rx_desc;
struct pollfd fds[rxq + 1];
__u64 comp_addr;
__u64 addr;
__u32 idx;
int ret;
int i;
for (i = 0; i < rxq; i++) {
fds[i].fd = xsk_socket__fd(rx_xsk[i].socket);
fds[i].events = POLLIN;
fds[i].revents = 0;
}
fds[rxq].fd = server_fd;
fds[rxq].events = POLLIN;
fds[rxq].revents = 0;
while (true) {
errno = 0;
ret = poll(fds, rxq + 1, 1000);
printf("poll: %d (%d) skip=%llu fail=%llu redir=%llu\n",
ret, errno, bpf_obj->bss->pkts_skip,
bpf_obj->bss->pkts_fail, bpf_obj->bss->pkts_redir);
if (ret < 0)
break;
if (ret == 0)
continue;
if (fds[rxq].revents)
verify_skb_metadata(server_fd);
for (i = 0; i < rxq; i++) {
if (fds[i].revents == 0)
continue;
struct xsk *xsk = &rx_xsk[i];
ret = xsk_ring_cons__peek(&xsk->rx, 1, &idx);
printf("xsk_ring_cons__peek: %d\n", ret);
if (ret != 1)
continue;
rx_desc = xsk_ring_cons__rx_desc(&xsk->rx, idx);
comp_addr = xsk_umem__extract_addr(rx_desc->addr);
addr = xsk_umem__add_offset_to_addr(rx_desc->addr);
printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx\n",
xsk, idx, rx_desc->addr, addr, comp_addr);
verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr),
clock_id);
xsk_ring_cons__release(&xsk->rx, 1);
refill_rx(xsk, comp_addr);
}
}
return 0;
}
struct ethtool_channels {
__u32 cmd;
__u32 max_rx;
__u32 max_tx;
__u32 max_other;
__u32 max_combined;
__u32 rx_count;
__u32 tx_count;
__u32 other_count;
__u32 combined_count;
};
#define ETHTOOL_GCHANNELS 0x0000003c /* Get no of channels */
static int rxq_num(const char *ifname)
{
struct ethtool_channels ch = {
.cmd = ETHTOOL_GCHANNELS,
};
struct ifreq ifr = {
.ifr_data = (void *)&ch,
};
strncpy(ifr.ifr_name, ifname, IF_NAMESIZE - 1);
int fd, ret;
fd = socket(AF_UNIX, SOCK_DGRAM, 0);
if (fd < 0)
error(1, errno, "socket");
ret = ioctl(fd, SIOCETHTOOL, &ifr);
if (ret < 0)
error(1, errno, "ioctl(SIOCETHTOOL)");
close(fd);
return ch.rx_count + ch.combined_count;
}
static void hwtstamp_ioctl(int op, const char *ifname, struct hwtstamp_config *cfg)
{
struct ifreq ifr = {
.ifr_data = (void *)cfg,
};
strncpy(ifr.ifr_name, ifname, IF_NAMESIZE - 1);
int fd, ret;
fd = socket(AF_UNIX, SOCK_DGRAM, 0);
if (fd < 0)
error(1, errno, "socket");
ret = ioctl(fd, op, &ifr);
if (ret < 0)
error(1, errno, "ioctl(%d)", op);
close(fd);
}
static struct hwtstamp_config saved_hwtstamp_cfg;
static const char *saved_hwtstamp_ifname;
static void hwtstamp_restore(void)
{
hwtstamp_ioctl(SIOCSHWTSTAMP, saved_hwtstamp_ifname, &saved_hwtstamp_cfg);
}
static void hwtstamp_enable(const char *ifname)
{
struct hwtstamp_config cfg = {
.rx_filter = HWTSTAMP_FILTER_ALL,
};
hwtstamp_ioctl(SIOCGHWTSTAMP, ifname, &saved_hwtstamp_cfg);
saved_hwtstamp_ifname = strdup(ifname);
atexit(hwtstamp_restore);
hwtstamp_ioctl(SIOCSHWTSTAMP, ifname, &cfg);
}
static void cleanup(void)
{
LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
int ret;
int i;
if (bpf_obj) {
opts.old_prog_fd = bpf_program__fd(bpf_obj->progs.rx);
if (opts.old_prog_fd >= 0) {
printf("detaching bpf program....\n");
ret = bpf_xdp_detach(ifindex, XDP_FLAGS, &opts);
if (ret)
printf("failed to detach XDP program: %d\n", ret);
}
}
for (i = 0; i < rxq; i++)
close_xsk(&rx_xsk[i]);
if (bpf_obj)
xdp_hw_metadata__destroy(bpf_obj);
}
static void handle_signal(int sig)
{
/* interrupting poll() is all we need */
}
static void timestamping_enable(int fd, int val)
{
int ret;
ret = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, &val, sizeof(val));
if (ret < 0)
error(1, errno, "setsockopt(SO_TIMESTAMPING)");
}
int main(int argc, char *argv[])
{
clockid_t clock_id = CLOCK_TAI;
int server_fd = -1;
int ret;
int i;
struct bpf_program *prog;
if (argc != 2) {
fprintf(stderr, "pass device name\n");
return -1;
}
ifname = argv[1];
ifindex = if_nametoindex(ifname);
rxq = rxq_num(ifname);
printf("rxq: %d\n", rxq);
hwtstamp_enable(ifname);
rx_xsk = malloc(sizeof(struct xsk) * rxq);
if (!rx_xsk)
error(1, ENOMEM, "malloc");
for (i = 0; i < rxq; i++) {
printf("open_xsk(%s, %p, %d)\n", ifname, &rx_xsk[i], i);
ret = open_xsk(ifindex, &rx_xsk[i], i);
if (ret)
error(1, -ret, "open_xsk");
printf("xsk_socket__fd() -> %d\n", xsk_socket__fd(rx_xsk[i].socket));
}
printf("open bpf program...\n");
bpf_obj = xdp_hw_metadata__open();
if (libbpf_get_error(bpf_obj))
error(1, libbpf_get_error(bpf_obj), "xdp_hw_metadata__open");
prog = bpf_object__find_program_by_name(bpf_obj->obj, "rx");
bpf_program__set_ifindex(prog, ifindex);
bpf_program__set_flags(prog, BPF_F_XDP_DEV_BOUND_ONLY);
printf("load bpf program...\n");
ret = xdp_hw_metadata__load(bpf_obj);
if (ret)
error(1, -ret, "xdp_hw_metadata__load");
printf("prepare skb endpoint...\n");
server_fd = start_server(AF_INET6, SOCK_DGRAM, NULL, 9092, 1000);
if (server_fd < 0)
error(1, errno, "start_server");
timestamping_enable(server_fd,
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
printf("prepare xsk map...\n");
for (i = 0; i < rxq; i++) {
int sock_fd = xsk_socket__fd(rx_xsk[i].socket);
__u32 queue_id = i;
printf("map[%d] = %d\n", queue_id, sock_fd);
ret = bpf_map_update_elem(bpf_map__fd(bpf_obj->maps.xsk), &queue_id, &sock_fd, 0);
if (ret)
error(1, -ret, "bpf_map_update_elem");
}
printf("attach bpf program...\n");
ret = bpf_xdp_attach(ifindex,
bpf_program__fd(bpf_obj->progs.rx),
XDP_FLAGS, NULL);
if (ret)
error(1, -ret, "bpf_xdp_attach");
signal(SIGINT, handle_signal);
ret = verify_metadata(rx_xsk, rxq, server_fd, clock_id);
close(server_fd);
cleanup();
if (ret)
error(1, -ret, "verify_metadata");
}
| linux-master | tools/testing/selftests/bpf/xdp_hw_metadata.c |
../../../bpf/bpftool/json_writer.c | linux-master | tools/testing/selftests/bpf/json_writer.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <sched.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <linux/limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <linux/sched.h>
#include <fcntl.h>
#include <unistd.h>
#include <ftw.h>
#include "cgroup_helpers.h"
#include "bpf_util.h"
/*
* To avoid relying on the system setup, when setup_cgroup_env is called
* we create a new mount namespace, and cgroup namespace. The cgroupv2
* root is mounted at CGROUP_MOUNT_PATH. Unfortunately, most people don't
* have cgroupv2 enabled at this point in time. It's easier to create our
* own mount namespace and manage it ourselves. We assume /mnt exists.
*
* Related cgroupv1 helpers are named *classid*(), since we only use the
* net_cls controller for tagging net_cls.classid. We assume the default
* mount under /sys/fs/cgroup/net_cls, which should be the case for the
* vast majority of users.
*/
#define WALK_FD_LIMIT 16
#define CGROUP_MOUNT_PATH "/mnt"
#define CGROUP_MOUNT_DFLT "/sys/fs/cgroup"
#define NETCLS_MOUNT_PATH CGROUP_MOUNT_DFLT "/net_cls"
#define CGROUP_WORK_DIR "/cgroup-test-work-dir"
#define format_cgroup_path_pid(buf, path, pid) \
snprintf(buf, sizeof(buf), "%s%s%d%s", CGROUP_MOUNT_PATH, \
CGROUP_WORK_DIR, pid, path)
#define format_cgroup_path(buf, path) \
format_cgroup_path_pid(buf, path, getpid())
#define format_parent_cgroup_path(buf, path) \
format_cgroup_path_pid(buf, path, getppid())
#define format_classid_path(buf) \
snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \
CGROUP_WORK_DIR)
static int __enable_controllers(const char *cgroup_path, const char *controllers)
{
char path[PATH_MAX + 1];
char enable[PATH_MAX + 1];
char *c, *c2;
int fd, cfd;
ssize_t len;
/* If not controllers are passed, enable all available controllers */
if (!controllers) {
snprintf(path, sizeof(path), "%s/cgroup.controllers",
cgroup_path);
fd = open(path, O_RDONLY);
if (fd < 0) {
log_err("Opening cgroup.controllers: %s", path);
return 1;
}
len = read(fd, enable, sizeof(enable) - 1);
if (len < 0) {
close(fd);
log_err("Reading cgroup.controllers: %s", path);
return 1;
} else if (len == 0) { /* No controllers to enable */
close(fd);
return 0;
}
enable[len] = 0;
close(fd);
} else {
bpf_strlcpy(enable, controllers, sizeof(enable));
}
snprintf(path, sizeof(path), "%s/cgroup.subtree_control", cgroup_path);
cfd = open(path, O_RDWR);
if (cfd < 0) {
log_err("Opening cgroup.subtree_control: %s", path);
return 1;
}
for (c = strtok_r(enable, " ", &c2); c; c = strtok_r(NULL, " ", &c2)) {
if (dprintf(cfd, "+%s\n", c) <= 0) {
log_err("Enabling controller %s: %s", c, path);
close(cfd);
return 1;
}
}
close(cfd);
return 0;
}
/**
* enable_controllers() - Enable cgroup v2 controllers
* @relative_path: The cgroup path, relative to the workdir
* @controllers: List of controllers to enable in cgroup.controllers format
*
*
* Enable given cgroup v2 controllers, if @controllers is NULL, enable all
* available controllers.
*
* If successful, 0 is returned.
*/
int enable_controllers(const char *relative_path, const char *controllers)
{
char cgroup_path[PATH_MAX + 1];
format_cgroup_path(cgroup_path, relative_path);
return __enable_controllers(cgroup_path, controllers);
}
static int __write_cgroup_file(const char *cgroup_path, const char *file,
const char *buf)
{
char file_path[PATH_MAX + 1];
int fd;
snprintf(file_path, sizeof(file_path), "%s/%s", cgroup_path, file);
fd = open(file_path, O_RDWR);
if (fd < 0) {
log_err("Opening %s", file_path);
return 1;
}
if (dprintf(fd, "%s", buf) <= 0) {
log_err("Writing to %s", file_path);
close(fd);
return 1;
}
close(fd);
return 0;
}
/**
* write_cgroup_file() - Write to a cgroup file
* @relative_path: The cgroup path, relative to the workdir
* @file: The name of the file in cgroupfs to write to
* @buf: Buffer to write to the file
*
* Write to a file in the given cgroup's directory.
*
* If successful, 0 is returned.
*/
int write_cgroup_file(const char *relative_path, const char *file,
const char *buf)
{
char cgroup_path[PATH_MAX - 24];
format_cgroup_path(cgroup_path, relative_path);
return __write_cgroup_file(cgroup_path, file, buf);
}
/**
* write_cgroup_file_parent() - Write to a cgroup file in the parent process
* workdir
* @relative_path: The cgroup path, relative to the parent process workdir
* @file: The name of the file in cgroupfs to write to
* @buf: Buffer to write to the file
*
* Write to a file in the given cgroup's directory under the parent process
* workdir.
*
* If successful, 0 is returned.
*/
int write_cgroup_file_parent(const char *relative_path, const char *file,
const char *buf)
{
char cgroup_path[PATH_MAX - 24];
format_parent_cgroup_path(cgroup_path, relative_path);
return __write_cgroup_file(cgroup_path, file, buf);
}
/**
* setup_cgroup_environment() - Setup the cgroup environment
*
* After calling this function, cleanup_cgroup_environment should be called
* once testing is complete.
*
* This function will print an error to stderr and return 1 if it is unable
* to setup the cgroup environment. If setup is successful, 0 is returned.
*/
int setup_cgroup_environment(void)
{
char cgroup_workdir[PATH_MAX - 24];
format_cgroup_path(cgroup_workdir, "");
if (unshare(CLONE_NEWNS)) {
log_err("unshare");
return 1;
}
if (mount("none", "/", NULL, MS_REC | MS_PRIVATE, NULL)) {
log_err("mount fakeroot");
return 1;
}
if (mount("none", CGROUP_MOUNT_PATH, "cgroup2", 0, NULL) && errno != EBUSY) {
log_err("mount cgroup2");
return 1;
}
/* Cleanup existing failed runs, now that the environment is setup */
cleanup_cgroup_environment();
if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) {
log_err("mkdir cgroup work dir");
return 1;
}
/* Enable all available controllers to increase test coverage */
if (__enable_controllers(CGROUP_MOUNT_PATH, NULL) ||
__enable_controllers(cgroup_workdir, NULL))
return 1;
return 0;
}
static int nftwfunc(const char *filename, const struct stat *statptr,
int fileflags, struct FTW *pfwt)
{
if ((fileflags & FTW_D) && rmdir(filename))
log_err("Removing cgroup: %s", filename);
return 0;
}
static int join_cgroup_from_top(const char *cgroup_path)
{
char cgroup_procs_path[PATH_MAX + 1];
pid_t pid = getpid();
int fd, rc = 0;
snprintf(cgroup_procs_path, sizeof(cgroup_procs_path),
"%s/cgroup.procs", cgroup_path);
fd = open(cgroup_procs_path, O_WRONLY);
if (fd < 0) {
log_err("Opening Cgroup Procs: %s", cgroup_procs_path);
return 1;
}
if (dprintf(fd, "%d\n", pid) < 0) {
log_err("Joining Cgroup");
rc = 1;
}
close(fd);
return rc;
}
/**
* join_cgroup() - Join a cgroup
* @relative_path: The cgroup path, relative to the workdir, to join
*
* This function expects a cgroup to already be created, relative to the cgroup
* work dir, and it joins it. For example, passing "/my-cgroup" as the path
* would actually put the calling process into the cgroup
* "/cgroup-test-work-dir/my-cgroup"
*
* On success, it returns 0, otherwise on failure it returns 1.
*/
int join_cgroup(const char *relative_path)
{
char cgroup_path[PATH_MAX + 1];
format_cgroup_path(cgroup_path, relative_path);
return join_cgroup_from_top(cgroup_path);
}
/**
* join_root_cgroup() - Join the root cgroup
*
* This function joins the root cgroup.
*
* On success, it returns 0, otherwise on failure it returns 1.
*/
int join_root_cgroup(void)
{
return join_cgroup_from_top(CGROUP_MOUNT_PATH);
}
/**
* join_parent_cgroup() - Join a cgroup in the parent process workdir
* @relative_path: The cgroup path, relative to parent process workdir, to join
*
* See join_cgroup().
*
* On success, it returns 0, otherwise on failure it returns 1.
*/
int join_parent_cgroup(const char *relative_path)
{
char cgroup_path[PATH_MAX + 1];
format_parent_cgroup_path(cgroup_path, relative_path);
return join_cgroup_from_top(cgroup_path);
}
/**
* cleanup_cgroup_environment() - Cleanup Cgroup Testing Environment
*
* This is an idempotent function to delete all temporary cgroups that
* have been created during the test, including the cgroup testing work
* directory.
*
* At call time, it moves the calling process to the root cgroup, and then
* runs the deletion process. It is idempotent, and should not fail, unless
* a process is lingering.
*
* On failure, it will print an error to stderr, and try to continue.
*/
void cleanup_cgroup_environment(void)
{
char cgroup_workdir[PATH_MAX + 1];
format_cgroup_path(cgroup_workdir, "");
join_cgroup_from_top(CGROUP_MOUNT_PATH);
nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
}
/**
* get_root_cgroup() - Get the FD of the root cgroup
*
* On success, it returns the file descriptor. On failure, it returns -1.
* If there is a failure, it prints the error to stderr.
*/
int get_root_cgroup(void)
{
int fd;
fd = open(CGROUP_MOUNT_PATH, O_RDONLY);
if (fd < 0) {
log_err("Opening root cgroup");
return -1;
}
return fd;
}
/*
* remove_cgroup() - Remove a cgroup
* @relative_path: The cgroup path, relative to the workdir, to remove
*
* This function expects a cgroup to already be created, relative to the cgroup
* work dir. It also expects the cgroup doesn't have any children or live
* processes and it removes the cgroup.
*
* On failure, it will print an error to stderr.
*/
void remove_cgroup(const char *relative_path)
{
char cgroup_path[PATH_MAX + 1];
format_cgroup_path(cgroup_path, relative_path);
if (rmdir(cgroup_path))
log_err("rmdiring cgroup %s .. %s", relative_path, cgroup_path);
}
/**
* create_and_get_cgroup() - Create a cgroup, relative to workdir, and get the FD
* @relative_path: The cgroup path, relative to the workdir, to join
*
* This function creates a cgroup under the top level workdir and returns the
* file descriptor. It is idempotent.
*
* On success, it returns the file descriptor. On failure it returns -1.
* If there is a failure, it prints the error to stderr.
*/
int create_and_get_cgroup(const char *relative_path)
{
char cgroup_path[PATH_MAX + 1];
int fd;
format_cgroup_path(cgroup_path, relative_path);
if (mkdir(cgroup_path, 0777) && errno != EEXIST) {
log_err("mkdiring cgroup %s .. %s", relative_path, cgroup_path);
return -1;
}
fd = open(cgroup_path, O_RDONLY);
if (fd < 0) {
log_err("Opening Cgroup");
return -1;
}
return fd;
}
/**
* get_cgroup_id() - Get cgroup id for a particular cgroup path
* @relative_path: The cgroup path, relative to the workdir, to join
*
* On success, it returns the cgroup id. On failure it returns 0,
* which is an invalid cgroup id.
* If there is a failure, it prints the error to stderr.
*/
unsigned long long get_cgroup_id(const char *relative_path)
{
int dirfd, err, flags, mount_id, fhsize;
union {
unsigned long long cgid;
unsigned char raw_bytes[8];
} id;
char cgroup_workdir[PATH_MAX + 1];
struct file_handle *fhp, *fhp2;
unsigned long long ret = 0;
format_cgroup_path(cgroup_workdir, relative_path);
dirfd = AT_FDCWD;
flags = 0;
fhsize = sizeof(*fhp);
fhp = calloc(1, fhsize);
if (!fhp) {
log_err("calloc");
return 0;
}
err = name_to_handle_at(dirfd, cgroup_workdir, fhp, &mount_id, flags);
if (err >= 0 || fhp->handle_bytes != 8) {
log_err("name_to_handle_at");
goto free_mem;
}
fhsize = sizeof(struct file_handle) + fhp->handle_bytes;
fhp2 = realloc(fhp, fhsize);
if (!fhp2) {
log_err("realloc");
goto free_mem;
}
err = name_to_handle_at(dirfd, cgroup_workdir, fhp2, &mount_id, flags);
fhp = fhp2;
if (err < 0) {
log_err("name_to_handle_at");
goto free_mem;
}
memcpy(id.raw_bytes, fhp->f_handle, 8);
ret = id.cgid;
free_mem:
free(fhp);
return ret;
}
int cgroup_setup_and_join(const char *path) {
int cg_fd;
if (setup_cgroup_environment()) {
fprintf(stderr, "Failed to setup cgroup environment\n");
return -EINVAL;
}
cg_fd = create_and_get_cgroup(path);
if (cg_fd < 0) {
fprintf(stderr, "Failed to create test cgroup\n");
cleanup_cgroup_environment();
return cg_fd;
}
if (join_cgroup(path)) {
fprintf(stderr, "Failed to join cgroup\n");
cleanup_cgroup_environment();
return -EINVAL;
}
return cg_fd;
}
/**
* setup_classid_environment() - Setup the cgroupv1 net_cls environment
*
* After calling this function, cleanup_classid_environment should be called
* once testing is complete.
*
* This function will print an error to stderr and return 1 if it is unable
* to setup the cgroup environment. If setup is successful, 0 is returned.
*/
int setup_classid_environment(void)
{
char cgroup_workdir[PATH_MAX + 1];
format_classid_path(cgroup_workdir);
if (mount("tmpfs", CGROUP_MOUNT_DFLT, "tmpfs", 0, NULL) &&
errno != EBUSY) {
log_err("mount cgroup base");
return 1;
}
if (mkdir(NETCLS_MOUNT_PATH, 0777) && errno != EEXIST) {
log_err("mkdir cgroup net_cls");
return 1;
}
if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls") &&
errno != EBUSY) {
log_err("mount cgroup net_cls");
return 1;
}
cleanup_classid_environment();
if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) {
log_err("mkdir cgroup work dir");
return 1;
}
return 0;
}
/**
* set_classid() - Set a cgroupv1 net_cls classid
* @id: the numeric classid
*
* Writes the passed classid into the cgroup work dir's net_cls.classid
* file in order to later on trigger socket tagging.
*
* On success, it returns 0, otherwise on failure it returns 1. If there
* is a failure, it prints the error to stderr.
*/
int set_classid(unsigned int id)
{
char cgroup_workdir[PATH_MAX - 42];
char cgroup_classid_path[PATH_MAX + 1];
int fd, rc = 0;
format_classid_path(cgroup_workdir);
snprintf(cgroup_classid_path, sizeof(cgroup_classid_path),
"%s/net_cls.classid", cgroup_workdir);
fd = open(cgroup_classid_path, O_WRONLY);
if (fd < 0) {
log_err("Opening cgroup classid: %s", cgroup_classid_path);
return 1;
}
if (dprintf(fd, "%u\n", id) < 0) {
log_err("Setting cgroup classid");
rc = 1;
}
close(fd);
return rc;
}
/**
* join_classid() - Join a cgroupv1 net_cls classid
*
* This function expects the cgroup work dir to be already created, as we
* join it here. This causes the process sockets to be tagged with the given
* net_cls classid.
*
* On success, it returns 0, otherwise on failure it returns 1.
*/
int join_classid(void)
{
char cgroup_workdir[PATH_MAX + 1];
format_classid_path(cgroup_workdir);
return join_cgroup_from_top(cgroup_workdir);
}
/**
* cleanup_classid_environment() - Cleanup the cgroupv1 net_cls environment
*
* At call time, it moves the calling process to the root cgroup, and then
* runs the deletion process.
*
* On failure, it will print an error to stderr, and try to continue.
*/
void cleanup_classid_environment(void)
{
char cgroup_workdir[PATH_MAX + 1];
format_classid_path(cgroup_workdir);
join_cgroup_from_top(NETCLS_MOUNT_PATH);
nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
}
| linux-master | tools/testing/selftests/bpf/cgroup_helpers.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <pthread.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <asm/types.h>
#include <sys/syscall.h>
#include <errno.h>
#include <string.h>
#include <linux/bpf.h>
#include <sys/socket.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <sys/ioctl.h>
#include <linux/rtnetlink.h>
#include <signal.h>
#include <linux/perf_event.h>
#include <linux/err.h>
#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "test_tcpnotify.h"
#include "trace_helpers.h"
#include "testing_helpers.h"
#define SOCKET_BUFFER_SIZE (getpagesize() < 8192L ? getpagesize() : 8192L)
pthread_t tid;
int rx_callbacks;
static void dummyfn(void *ctx, int cpu, void *data, __u32 size)
{
struct tcp_notifier *t = data;
if (t->type != 0xde || t->subtype != 0xad ||
t->source != 0xbe || t->hash != 0xef)
return;
rx_callbacks++;
}
void tcp_notifier_poller(struct perf_buffer *pb)
{
int err;
while (1) {
err = perf_buffer__poll(pb, 100);
if (err < 0 && err != -EINTR) {
printf("failed perf_buffer__poll: %d\n", err);
return;
}
}
}
static void *poller_thread(void *arg)
{
struct perf_buffer *pb = arg;
tcp_notifier_poller(pb);
return arg;
}
int verify_result(const struct tcpnotify_globals *result)
{
return (result->ncalls > 0 && result->ncalls == rx_callbacks ? 0 : 1);
}
int main(int argc, char **argv)
{
const char *file = "test_tcpnotify_kern.bpf.o";
struct bpf_map *perf_map, *global_map;
struct tcpnotify_globals g = {0};
struct perf_buffer *pb = NULL;
const char *cg_path = "/foo";
int prog_fd, rv, cg_fd = -1;
int error = EXIT_FAILURE;
struct bpf_object *obj;
char test_script[80];
cpu_set_t cpuset;
__u32 key = 0;
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
cg_fd = cgroup_setup_and_join(cg_path);
if (cg_fd < 0)
goto err;
if (bpf_prog_test_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
printf("FAILED: load_bpf_file failed for: %s\n", file);
goto err;
}
rv = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_SOCK_OPS, 0);
if (rv) {
printf("FAILED: bpf_prog_attach: %d (%s)\n",
error, strerror(errno));
goto err;
}
perf_map = bpf_object__find_map_by_name(obj, "perf_event_map");
if (!perf_map) {
printf("FAIL:map '%s' not found\n", "perf_event_map");
goto err;
}
global_map = bpf_object__find_map_by_name(obj, "global_map");
if (!global_map) {
printf("FAIL:map '%s' not found\n", "global_map");
return -1;
}
pb = perf_buffer__new(bpf_map__fd(perf_map), 8, dummyfn, NULL, NULL, NULL);
if (!pb)
goto err;
pthread_create(&tid, NULL, poller_thread, pb);
sprintf(test_script,
"iptables -A INPUT -p tcp --dport %d -j DROP",
TESTPORT);
if (system(test_script)) {
printf("FAILED: execute command: %s, err %d\n", test_script, -errno);
goto err;
}
sprintf(test_script,
"nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
TESTPORT);
if (system(test_script))
printf("execute command: %s, err %d\n", test_script, -errno);
sprintf(test_script,
"iptables -D INPUT -p tcp --dport %d -j DROP",
TESTPORT);
if (system(test_script)) {
printf("FAILED: execute command: %s, err %d\n", test_script, -errno);
goto err;
}
rv = bpf_map_lookup_elem(bpf_map__fd(global_map), &key, &g);
if (rv != 0) {
printf("FAILED: bpf_map_lookup_elem returns %d\n", rv);
goto err;
}
sleep(10);
if (verify_result(&g)) {
printf("FAILED: Wrong stats Expected %d calls, got %d\n",
g.ncalls, rx_callbacks);
goto err;
}
printf("PASSED!\n");
error = 0;
err:
bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
close(cg_fd);
cleanup_cgroup_environment();
perf_buffer__free(pb);
return error;
}
| linux-master | tools/testing/selftests/bpf/test_tcpnotify_user.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
/* Copyright (C) 2020 Facebook, Inc. */
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "test_progs.h"
#include "testing_helpers.h"
#include <linux/membarrier.h>
int parse_num_list(const char *s, bool **num_set, int *num_set_len)
{
int i, set_len = 0, new_len, num, start = 0, end = -1;
bool *set = NULL, *tmp, parsing_end = false;
char *next;
while (s[0]) {
errno = 0;
num = strtol(s, &next, 10);
if (errno)
return -errno;
if (parsing_end)
end = num;
else
start = num;
if (!parsing_end && *next == '-') {
s = next + 1;
parsing_end = true;
continue;
} else if (*next == ',') {
parsing_end = false;
s = next + 1;
end = num;
} else if (*next == '\0') {
parsing_end = false;
s = next;
end = num;
} else {
return -EINVAL;
}
if (start > end)
return -EINVAL;
if (end + 1 > set_len) {
new_len = end + 1;
tmp = realloc(set, new_len);
if (!tmp) {
free(set);
return -ENOMEM;
}
for (i = set_len; i < start; i++)
tmp[i] = false;
set = tmp;
set_len = new_len;
}
for (i = start; i <= end; i++)
set[i] = true;
}
if (!set || parsing_end)
return -EINVAL;
*num_set = set;
*num_set_len = set_len;
return 0;
}
static int do_insert_test(struct test_filter_set *set,
char *test_str,
char *subtest_str)
{
struct test_filter *tmp, *test;
char **ctmp;
int i;
for (i = 0; i < set->cnt; i++) {
test = &set->tests[i];
if (strcmp(test_str, test->name) == 0) {
free(test_str);
goto subtest;
}
}
tmp = realloc(set->tests, sizeof(*test) * (set->cnt + 1));
if (!tmp)
return -ENOMEM;
set->tests = tmp;
test = &set->tests[set->cnt];
test->name = test_str;
test->subtests = NULL;
test->subtest_cnt = 0;
set->cnt++;
subtest:
if (!subtest_str)
return 0;
for (i = 0; i < test->subtest_cnt; i++) {
if (strcmp(subtest_str, test->subtests[i]) == 0) {
free(subtest_str);
return 0;
}
}
ctmp = realloc(test->subtests,
sizeof(*test->subtests) * (test->subtest_cnt + 1));
if (!ctmp)
return -ENOMEM;
test->subtests = ctmp;
test->subtests[test->subtest_cnt] = subtest_str;
test->subtest_cnt++;
return 0;
}
static int insert_test(struct test_filter_set *set,
char *test_spec,
bool is_glob_pattern)
{
char *pattern, *subtest_str, *ext_test_str, *ext_subtest_str = NULL;
int glob_chars = 0;
if (is_glob_pattern) {
pattern = "%s";
} else {
pattern = "*%s*";
glob_chars = 2;
}
subtest_str = strchr(test_spec, '/');
if (subtest_str) {
*subtest_str = '\0';
subtest_str += 1;
}
ext_test_str = malloc(strlen(test_spec) + glob_chars + 1);
if (!ext_test_str)
goto err;
sprintf(ext_test_str, pattern, test_spec);
if (subtest_str) {
ext_subtest_str = malloc(strlen(subtest_str) + glob_chars + 1);
if (!ext_subtest_str)
goto err;
sprintf(ext_subtest_str, pattern, subtest_str);
}
return do_insert_test(set, ext_test_str, ext_subtest_str);
err:
free(ext_test_str);
free(ext_subtest_str);
return -ENOMEM;
}
int parse_test_list_file(const char *path,
struct test_filter_set *set,
bool is_glob_pattern)
{
char *buf = NULL, *capture_start, *capture_end, *scan_end;
size_t buflen = 0;
int err = 0;
FILE *f;
f = fopen(path, "r");
if (!f) {
err = -errno;
fprintf(stderr, "Failed to open '%s': %d\n", path, err);
return err;
}
while (getline(&buf, &buflen, f) != -1) {
capture_start = buf;
while (isspace(*capture_start))
++capture_start;
capture_end = capture_start;
scan_end = capture_start;
while (*scan_end && *scan_end != '#') {
if (!isspace(*scan_end))
capture_end = scan_end;
++scan_end;
}
if (capture_end == capture_start)
continue;
*(++capture_end) = '\0';
err = insert_test(set, capture_start, is_glob_pattern);
if (err)
break;
}
fclose(f);
return err;
}
int parse_test_list(const char *s,
struct test_filter_set *set,
bool is_glob_pattern)
{
char *input, *state = NULL, *test_spec;
int err = 0;
input = strdup(s);
if (!input)
return -ENOMEM;
while ((test_spec = strtok_r(state ? NULL : input, ",", &state))) {
err = insert_test(set, test_spec, is_glob_pattern);
if (err)
break;
}
free(input);
return err;
}
__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info)
{
__u32 info_len = sizeof(*info);
int err;
memset(info, 0, sizeof(*info));
err = bpf_link_get_info_by_fd(bpf_link__fd(link), info, &info_len);
if (err) {
printf("failed to get link info: %d\n", -errno);
return 0;
}
return info->prog_id;
}
int extra_prog_load_log_flags = 0;
int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd)
{
LIBBPF_OPTS(bpf_object_open_opts, opts,
.kernel_log_level = extra_prog_load_log_flags,
);
struct bpf_object *obj;
struct bpf_program *prog;
__u32 flags;
int err;
obj = bpf_object__open_file(file, &opts);
if (!obj)
return -errno;
prog = bpf_object__next_program(obj, NULL);
if (!prog) {
err = -ENOENT;
goto err_out;
}
if (type != BPF_PROG_TYPE_UNSPEC && bpf_program__type(prog) != type)
bpf_program__set_type(prog, type);
flags = bpf_program__flags(prog) | BPF_F_TEST_RND_HI32;
bpf_program__set_flags(prog, flags);
err = bpf_object__load(obj);
if (err)
goto err_out;
*pobj = obj;
*prog_fd = bpf_program__fd(prog);
return 0;
err_out:
bpf_object__close(obj);
return err;
}
int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
size_t insns_cnt, const char *license,
__u32 kern_version, char *log_buf,
size_t log_buf_sz)
{
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.kern_version = kern_version,
.prog_flags = BPF_F_TEST_RND_HI32,
.log_level = extra_prog_load_log_flags,
.log_buf = log_buf,
.log_size = log_buf_sz,
);
return bpf_prog_load(type, NULL, license, insns, insns_cnt, &opts);
}
__u64 read_perf_max_sample_freq(void)
{
__u64 sample_freq = 5000; /* fallback to 5000 on error */
FILE *f;
f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
if (f == NULL) {
printf("Failed to open /proc/sys/kernel/perf_event_max_sample_rate: err %d\n"
"return default value: 5000\n", -errno);
return sample_freq;
}
if (fscanf(f, "%llu", &sample_freq) != 1) {
printf("Failed to parse /proc/sys/kernel/perf_event_max_sample_rate: err %d\n"
"return default value: 5000\n", -errno);
}
fclose(f);
return sample_freq;
}
static int finit_module(int fd, const char *param_values, int flags)
{
return syscall(__NR_finit_module, fd, param_values, flags);
}
static int delete_module(const char *name, int flags)
{
return syscall(__NR_delete_module, name, flags);
}
int unload_bpf_testmod(bool verbose)
{
if (kern_sync_rcu())
fprintf(stdout, "Failed to trigger kernel-side RCU sync!\n");
if (delete_module("bpf_testmod", 0)) {
if (errno == ENOENT) {
if (verbose)
fprintf(stdout, "bpf_testmod.ko is already unloaded.\n");
return -1;
}
fprintf(stdout, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno);
return -1;
}
if (verbose)
fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n");
return 0;
}
int load_bpf_testmod(bool verbose)
{
int fd;
if (verbose)
fprintf(stdout, "Loading bpf_testmod.ko...\n");
fd = open("bpf_testmod.ko", O_RDONLY);
if (fd < 0) {
fprintf(stdout, "Can't find bpf_testmod.ko kernel module: %d\n", -errno);
return -ENOENT;
}
if (finit_module(fd, "", 0)) {
fprintf(stdout, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno);
close(fd);
return -EINVAL;
}
close(fd);
if (verbose)
fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n");
return 0;
}
/*
* Trigger synchronize_rcu() in kernel.
*/
int kern_sync_rcu(void)
{
return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0);
}
| linux-master | tools/testing/selftests/bpf/testing_helpers.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
#define CGROUP_PATH "/skb_cgroup_test"
#define NUM_CGROUP_LEVELS 4
/* RFC 4291, Section 2.7.1 */
#define LINKLOCAL_MULTICAST "ff02::1"
static int mk_dst_addr(const char *ip, const char *iface,
struct sockaddr_in6 *dst)
{
memset(dst, 0, sizeof(*dst));
dst->sin6_family = AF_INET6;
dst->sin6_port = htons(1025);
if (inet_pton(AF_INET6, ip, &dst->sin6_addr) != 1) {
log_err("Invalid IPv6: %s", ip);
return -1;
}
dst->sin6_scope_id = if_nametoindex(iface);
if (!dst->sin6_scope_id) {
log_err("Failed to get index of iface: %s", iface);
return -1;
}
return 0;
}
static int send_packet(const char *iface)
{
struct sockaddr_in6 dst;
char msg[] = "msg";
int err = 0;
int fd = -1;
if (mk_dst_addr(LINKLOCAL_MULTICAST, iface, &dst))
goto err;
fd = socket(AF_INET6, SOCK_DGRAM, 0);
if (fd == -1) {
log_err("Failed to create UDP socket");
goto err;
}
if (sendto(fd, &msg, sizeof(msg), 0, (const struct sockaddr *)&dst,
sizeof(dst)) == -1) {
log_err("Failed to send datagram");
goto err;
}
goto out;
err:
err = -1;
out:
if (fd >= 0)
close(fd);
return err;
}
int get_map_fd_by_prog_id(int prog_id)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
__u32 map_ids[1];
int prog_fd = -1;
int map_fd = -1;
prog_fd = bpf_prog_get_fd_by_id(prog_id);
if (prog_fd < 0) {
log_err("Failed to get fd by prog id %d", prog_id);
goto err;
}
info.nr_map_ids = 1;
info.map_ids = (__u64) (unsigned long) map_ids;
if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) {
log_err("Failed to get info by prog fd %d", prog_fd);
goto err;
}
if (!info.nr_map_ids) {
log_err("No maps found for prog fd %d", prog_fd);
goto err;
}
map_fd = bpf_map_get_fd_by_id(map_ids[0]);
if (map_fd < 0)
log_err("Failed to get fd by map id %d", map_ids[0]);
err:
if (prog_fd >= 0)
close(prog_fd);
return map_fd;
}
int check_ancestor_cgroup_ids(int prog_id)
{
__u64 actual_ids[NUM_CGROUP_LEVELS], expected_ids[NUM_CGROUP_LEVELS];
__u32 level;
int err = 0;
int map_fd;
expected_ids[0] = get_cgroup_id("/.."); /* root cgroup */
expected_ids[1] = get_cgroup_id("");
expected_ids[2] = get_cgroup_id(CGROUP_PATH);
expected_ids[3] = 0; /* non-existent cgroup */
map_fd = get_map_fd_by_prog_id(prog_id);
if (map_fd < 0)
goto err;
for (level = 0; level < NUM_CGROUP_LEVELS; ++level) {
if (bpf_map_lookup_elem(map_fd, &level, &actual_ids[level])) {
log_err("Failed to lookup key %d", level);
goto err;
}
if (actual_ids[level] != expected_ids[level]) {
log_err("%llx (actual) != %llx (expected), level: %u\n",
actual_ids[level], expected_ids[level], level);
goto err;
}
}
goto out;
err:
err = -1;
out:
if (map_fd >= 0)
close(map_fd);
return err;
}
int main(int argc, char **argv)
{
int cgfd = -1;
int err = 0;
if (argc < 3) {
fprintf(stderr, "Usage: %s iface prog_id\n", argv[0]);
exit(EXIT_FAILURE);
}
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
cgfd = cgroup_setup_and_join(CGROUP_PATH);
if (cgfd < 0)
goto err;
if (send_packet(argv[1]))
goto err;
if (check_ancestor_cgroup_ids(atoi(argv[2])))
goto err;
goto out;
err:
err = -1;
out:
close(cgfd);
cleanup_cgroup_environment();
printf("[%s]\n", err ? "FAIL" : "PASS");
return err;
}
| linux-master | tools/testing/selftests/bpf/test_skb_cgroup_id_user.c |
// SPDX-License-Identifier: GPL-2.0
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <unistd.h>
#include <linux/perf_event.h>
#include <sys/mman.h>
#include "trace_helpers.h"
#include <linux/limits.h>
#include <libelf.h>
#include <gelf.h>
#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"
#define MAX_SYMS 400000
static struct ksym syms[MAX_SYMS];
static int sym_cnt;
static int ksym_cmp(const void *p1, const void *p2)
{
return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
}
int load_kallsyms_refresh(void)
{
FILE *f;
char func[256], buf[256];
char symbol;
void *addr;
int i = 0;
sym_cnt = 0;
f = fopen("/proc/kallsyms", "r");
if (!f)
return -ENOENT;
while (fgets(buf, sizeof(buf), f)) {
if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
break;
if (!addr)
continue;
if (i >= MAX_SYMS)
return -EFBIG;
syms[i].addr = (long) addr;
syms[i].name = strdup(func);
i++;
}
fclose(f);
sym_cnt = i;
qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
return 0;
}
int load_kallsyms(void)
{
/*
* This is called/used from multiplace places,
* load symbols just once.
*/
if (sym_cnt)
return 0;
return load_kallsyms_refresh();
}
struct ksym *ksym_search(long key)
{
int start = 0, end = sym_cnt;
int result;
/* kallsyms not loaded. return NULL */
if (sym_cnt <= 0)
return NULL;
while (start < end) {
size_t mid = start + (end - start) / 2;
result = key - syms[mid].addr;
if (result < 0)
end = mid;
else if (result > 0)
start = mid + 1;
else
return &syms[mid];
}
if (start >= 1 && syms[start - 1].addr < key &&
key < syms[start].addr)
/* valid ksym */
return &syms[start - 1];
/* out of range. return _stext */
return &syms[0];
}
long ksym_get_addr(const char *name)
{
int i;
for (i = 0; i < sym_cnt; i++) {
if (strcmp(syms[i].name, name) == 0)
return syms[i].addr;
}
return 0;
}
/* open kallsyms and read symbol addresses on the fly. Without caching all symbols,
* this is faster than load + find.
*/
int kallsyms_find(const char *sym, unsigned long long *addr)
{
char type, name[500];
unsigned long long value;
int err = 0;
FILE *f;
f = fopen("/proc/kallsyms", "r");
if (!f)
return -EINVAL;
while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) {
if (strcmp(name, sym) == 0) {
*addr = value;
goto out;
}
}
err = -ENOENT;
out:
fclose(f);
return err;
}
void read_trace_pipe(void)
{
int trace_fd;
if (access(TRACEFS_PIPE, F_OK) == 0)
trace_fd = open(TRACEFS_PIPE, O_RDONLY, 0);
else
trace_fd = open(DEBUGFS_PIPE, O_RDONLY, 0);
if (trace_fd < 0)
return;
while (1) {
static char buf[4096];
ssize_t sz;
sz = read(trace_fd, buf, sizeof(buf) - 1);
if (sz > 0) {
buf[sz] = 0;
puts(buf);
}
}
}
ssize_t get_uprobe_offset(const void *addr)
{
size_t start, end, base;
char buf[256];
bool found = false;
FILE *f;
f = fopen("/proc/self/maps", "r");
if (!f)
return -errno;
while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &base) == 4) {
if (buf[2] == 'x' && (uintptr_t)addr >= start && (uintptr_t)addr < end) {
found = true;
break;
}
}
fclose(f);
if (!found)
return -ESRCH;
#if defined(__powerpc64__) && defined(_CALL_ELF) && _CALL_ELF == 2
#define OP_RT_RA_MASK 0xffff0000UL
#define LIS_R2 0x3c400000UL
#define ADDIS_R2_R12 0x3c4c0000UL
#define ADDI_R2_R2 0x38420000UL
/*
* A PPC64 ABIv2 function may have a local and a global entry
* point. We need to use the local entry point when patching
* functions, so identify and step over the global entry point
* sequence.
*
* The global entry point sequence is always of the form:
*
* addis r2,r12,XXXX
* addi r2,r2,XXXX
*
* A linker optimisation may convert the addis to lis:
*
* lis r2,XXXX
* addi r2,r2,XXXX
*/
{
const u32 *insn = (const u32 *)(uintptr_t)addr;
if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
((*(insn + 1) & OP_RT_RA_MASK) == ADDI_R2_R2))
return (uintptr_t)(insn + 2) - start + base;
}
#endif
return (uintptr_t)addr - start + base;
}
ssize_t get_rel_offset(uintptr_t addr)
{
size_t start, end, offset;
char buf[256];
FILE *f;
f = fopen("/proc/self/maps", "r");
if (!f)
return -errno;
while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &offset) == 4) {
if (addr >= start && addr < end) {
fclose(f);
return (size_t)addr - start + offset;
}
}
fclose(f);
return -EINVAL;
}
static int
parse_build_id_buf(const void *note_start, Elf32_Word note_size, char *build_id)
{
Elf32_Word note_offs = 0;
while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
if (nhdr->n_type == 3 && nhdr->n_namesz == sizeof("GNU") &&
!strcmp((char *)(nhdr + 1), "GNU") && nhdr->n_descsz > 0 &&
nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
memcpy(build_id, note_start + note_offs +
ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), nhdr->n_descsz);
memset(build_id + nhdr->n_descsz, 0, BPF_BUILD_ID_SIZE - nhdr->n_descsz);
return (int) nhdr->n_descsz;
}
note_offs = note_offs + sizeof(Elf32_Nhdr) +
ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
}
return -ENOENT;
}
/* Reads binary from *path* file and returns it in the *build_id* buffer
* with *size* which is expected to be at least BPF_BUILD_ID_SIZE bytes.
* Returns size of build id on success. On error the error value is
* returned.
*/
int read_build_id(const char *path, char *build_id, size_t size)
{
int fd, err = -EINVAL;
Elf *elf = NULL;
GElf_Ehdr ehdr;
size_t max, i;
if (size < BPF_BUILD_ID_SIZE)
return -EINVAL;
fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd < 0)
return -errno;
(void)elf_version(EV_CURRENT);
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
if (!elf)
goto out;
if (elf_kind(elf) != ELF_K_ELF)
goto out;
if (!gelf_getehdr(elf, &ehdr))
goto out;
for (i = 0; i < ehdr.e_phnum; i++) {
GElf_Phdr mem, *phdr;
char *data;
phdr = gelf_getphdr(elf, i, &mem);
if (!phdr)
goto out;
if (phdr->p_type != PT_NOTE)
continue;
data = elf_rawfile(elf, &max);
if (!data)
goto out;
if (phdr->p_offset + phdr->p_memsz > max)
goto out;
err = parse_build_id_buf(data + phdr->p_offset, phdr->p_memsz, build_id);
if (err > 0)
break;
}
out:
if (elf)
elf_end(elf);
close(fd);
return err;
}
| linux-master | tools/testing/selftests/bpf/trace_helpers.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Facebook
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <assert.h>
#include <sched.h>
#include <stdlib.h>
#include <time.h>
#include <sys/wait.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
#include "../../../include/linux/filter.h"
#define LOCAL_FREE_TARGET (128)
#define PERCPU_FREE_TARGET (4)
static int nr_cpus;
static int create_map(int map_type, int map_flags, unsigned int size)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags);
int map_fd;
map_fd = bpf_map_create(map_type, NULL, sizeof(unsigned long long),
sizeof(unsigned long long), size, &opts);
if (map_fd == -1)
perror("bpf_map_create");
return map_fd;
}
static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key,
void *value)
{
struct bpf_insn insns[] = {
BPF_LD_MAP_VALUE(BPF_REG_9, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, fd),
BPF_LD_IMM64(BPF_REG_3, key),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
};
__u8 data[64] = {};
int mfd, pfd, ret, zero = 0;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = data,
.data_size_in = sizeof(data),
.repeat = 1,
);
mfd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(__u64), 1, NULL);
if (mfd < 0)
return -1;
insns[0].imm = mfd;
pfd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, NULL, "GPL", insns, ARRAY_SIZE(insns), NULL);
if (pfd < 0) {
close(mfd);
return -1;
}
ret = bpf_prog_test_run_opts(pfd, &topts);
if (ret < 0 || topts.retval != 42) {
ret = -1;
} else {
assert(!bpf_map_lookup_elem(mfd, &zero, value));
ret = 0;
}
close(pfd);
close(mfd);
return ret;
}
static int map_subset(int map0, int map1)
{
unsigned long long next_key = 0;
unsigned long long value0[nr_cpus], value1[nr_cpus];
int ret;
while (!bpf_map_get_next_key(map1, &next_key, &next_key)) {
assert(!bpf_map_lookup_elem(map1, &next_key, value1));
ret = bpf_map_lookup_elem(map0, &next_key, value0);
if (ret) {
printf("key:%llu not found from map. %s(%d)\n",
next_key, strerror(errno), errno);
return 0;
}
if (value0[0] != value1[0]) {
printf("key:%llu value0:%llu != value1:%llu\n",
next_key, value0[0], value1[0]);
return 0;
}
}
return 1;
}
static int map_equal(int lru_map, int expected)
{
return map_subset(lru_map, expected) && map_subset(expected, lru_map);
}
static int sched_next_online(int pid, int *next_to_try)
{
cpu_set_t cpuset;
int next = *next_to_try;
int ret = -1;
while (next < nr_cpus) {
CPU_ZERO(&cpuset);
CPU_SET(next++, &cpuset);
if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
ret = 0;
break;
}
}
*next_to_try = next;
return ret;
}
/* Size of the LRU map is 2
* Add key=1 (+1 key)
* Add key=2 (+1 key)
* Lookup Key=1
* Add Key=3
* => Key=2 will be removed by LRU
* Iterate map. Only found key=1 and key=3
*/
static void test_lru_sanity0(int map_type, int map_flags)
{
unsigned long long key, value[nr_cpus];
int lru_map_fd, expected_map_fd;
int next_cpu = 0;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
assert(sched_next_online(0, &next_cpu) != -1);
if (map_flags & BPF_F_NO_COMMON_LRU)
lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
else
lru_map_fd = create_map(map_type, map_flags, 2);
assert(lru_map_fd != -1);
expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
assert(expected_map_fd != -1);
value[0] = 1234;
/* insert key=1 element */
key = 1;
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
/* key=1 already exists */
assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -EINVAL);
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* BPF_EXIST means: update existing element */
assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
/* key=2 is not there */
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
/* insert key=3 element */
/* check that key=3 is not found */
key = 3;
assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* check that key=1 can be found and mark the ref bit to
* stop LRU from removing key=1
*/
key = 1;
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(value[0] == 1234);
key = 3;
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
/* key=2 has been removed from the LRU */
key = 2;
assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* lookup elem key=1 and delete it, then check it doesn't exist */
key = 1;
assert(!bpf_map_lookup_and_delete_elem(lru_map_fd, &key, &value));
assert(value[0] == 1234);
/* remove the same element from the expected map */
assert(!bpf_map_delete_elem(expected_map_fd, &key));
assert(map_equal(lru_map_fd, expected_map_fd));
close(expected_map_fd);
close(lru_map_fd);
printf("Pass\n");
}
/* Size of the LRU map is 1.5*tgt_free
* Insert 1 to tgt_free (+tgt_free keys)
* Lookup 1 to tgt_free/2
* Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
* => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
*/
static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
{
unsigned long long key, end_key, value[nr_cpus];
int lru_map_fd, expected_map_fd;
unsigned int batch_size;
unsigned int map_size;
int next_cpu = 0;
if (map_flags & BPF_F_NO_COMMON_LRU)
/* This test is only applicable to common LRU list */
return;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
assert(sched_next_online(0, &next_cpu) != -1);
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
map_size = tgt_free + batch_size;
lru_map_fd = create_map(map_type, map_flags, map_size);
assert(lru_map_fd != -1);
expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
assert(expected_map_fd != -1);
value[0] = 1234;
/* Insert 1 to tgt_free (+tgt_free keys) */
end_key = 1 + tgt_free;
for (key = 1; key < end_key; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
/* Lookup 1 to tgt_free/2 */
end_key = 1 + batch_size;
for (key = 1; key < end_key; key++) {
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
/* Insert 1+tgt_free to 2*tgt_free
* => 1+tgt_free/2 to LOCALFREE_TARGET will be
* removed by LRU
*/
key = 1 + tgt_free;
end_key = key + tgt_free;
for (; key < end_key; key++) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
assert(map_equal(lru_map_fd, expected_map_fd));
close(expected_map_fd);
close(lru_map_fd);
printf("Pass\n");
}
/* Size of the LRU map 1.5 * tgt_free
* Insert 1 to tgt_free (+tgt_free keys)
* Update 1 to tgt_free/2
* => The original 1 to tgt_free/2 will be removed due to
* the LRU shrink process
* Re-insert 1 to tgt_free/2 again and do a lookup immeidately
* Insert 1+tgt_free to tgt_free*3/2
* Insert 1+tgt_free*3/2 to tgt_free*5/2
* => Key 1+tgt_free to tgt_free*3/2
* will be removed from LRU because it has never
* been lookup and ref bit is not set
*/
static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
{
unsigned long long key, value[nr_cpus];
unsigned long long end_key;
int lru_map_fd, expected_map_fd;
unsigned int batch_size;
unsigned int map_size;
int next_cpu = 0;
if (map_flags & BPF_F_NO_COMMON_LRU)
/* This test is only applicable to common LRU list */
return;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
assert(sched_next_online(0, &next_cpu) != -1);
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
map_size = tgt_free + batch_size;
lru_map_fd = create_map(map_type, map_flags, map_size);
assert(lru_map_fd != -1);
expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
assert(expected_map_fd != -1);
value[0] = 1234;
/* Insert 1 to tgt_free (+tgt_free keys) */
end_key = 1 + tgt_free;
for (key = 1; key < end_key; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
/* Any bpf_map_update_elem will require to acquire a new node
* from LRU first.
*
* The local list is running out of free nodes.
* It gets from the global LRU list which tries to
* shrink the inactive list to get tgt_free
* number of free nodes.
*
* Hence, the oldest key 1 to tgt_free/2
* are removed from the LRU list.
*/
key = 1;
if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
assert(!bpf_map_delete_elem(lru_map_fd, &key));
} else {
assert(bpf_map_update_elem(lru_map_fd, &key, value,
BPF_EXIST));
}
/* Re-insert 1 to tgt_free/2 again and do a lookup
* immeidately.
*/
end_key = 1 + batch_size;
value[0] = 4321;
for (key = 1; key < end_key; key++) {
assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(value[0] == 4321);
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
value[0] = 1234;
/* Insert 1+tgt_free to tgt_free*3/2 */
end_key = 1 + tgt_free + batch_size;
for (key = 1 + tgt_free; key < end_key; key++)
/* These newly added but not referenced keys will be
* gone during the next LRU shrink.
*/
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
/* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
end_key = key + tgt_free;
for (; key < end_key; key++) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
assert(map_equal(lru_map_fd, expected_map_fd));
close(expected_map_fd);
close(lru_map_fd);
printf("Pass\n");
}
/* Size of the LRU map is 2*tgt_free
* It is to test the active/inactive list rotation
* Insert 1 to 2*tgt_free (+2*tgt_free keys)
* Lookup key 1 to tgt_free*3/2
* Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
* => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
*/
static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
{
unsigned long long key, end_key, value[nr_cpus];
int lru_map_fd, expected_map_fd;
unsigned int batch_size;
unsigned int map_size;
int next_cpu = 0;
if (map_flags & BPF_F_NO_COMMON_LRU)
/* This test is only applicable to common LRU list */
return;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
assert(sched_next_online(0, &next_cpu) != -1);
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
map_size = tgt_free * 2;
lru_map_fd = create_map(map_type, map_flags, map_size);
assert(lru_map_fd != -1);
expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
assert(expected_map_fd != -1);
value[0] = 1234;
/* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
end_key = 1 + (2 * tgt_free);
for (key = 1; key < end_key; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
/* Lookup key 1 to tgt_free*3/2 */
end_key = tgt_free + batch_size;
for (key = 1; key < end_key; key++) {
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
/* Add 1+2*tgt_free to tgt_free*5/2
* (+tgt_free/2 keys)
*/
key = 2 * tgt_free + 1;
end_key = key + batch_size;
for (; key < end_key; key++) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
assert(map_equal(lru_map_fd, expected_map_fd));
close(expected_map_fd);
close(lru_map_fd);
printf("Pass\n");
}
/* Test deletion */
static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
{
int lru_map_fd, expected_map_fd;
unsigned long long key, value[nr_cpus];
unsigned long long end_key;
int next_cpu = 0;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
assert(sched_next_online(0, &next_cpu) != -1);
if (map_flags & BPF_F_NO_COMMON_LRU)
lru_map_fd = create_map(map_type, map_flags,
3 * tgt_free * nr_cpus);
else
lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free);
assert(lru_map_fd != -1);
expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,
3 * tgt_free);
assert(expected_map_fd != -1);
value[0] = 1234;
for (key = 1; key <= 2 * tgt_free; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
key = 1;
assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
for (key = 1; key <= tgt_free; key++) {
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
for (; key <= 2 * tgt_free; key++) {
assert(!bpf_map_delete_elem(lru_map_fd, &key));
assert(bpf_map_delete_elem(lru_map_fd, &key));
}
end_key = key + 2 * tgt_free;
for (; key < end_key; key++) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
assert(map_equal(lru_map_fd, expected_map_fd));
close(expected_map_fd);
close(lru_map_fd);
printf("Pass\n");
}
static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
{
unsigned long long key, value[nr_cpus];
/* Ensure the last key inserted by previous CPU can be found */
assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, last_key, value));
value[0] = 1234;
key = last_key + 1;
assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, key, value));
/* Cannot find the last key because it was removed by LRU */
assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -ENOENT);
}
/* Test map with only one element */
static void test_lru_sanity5(int map_type, int map_flags)
{
unsigned long long key, value[nr_cpus];
int next_cpu = 0;
int map_fd;
if (map_flags & BPF_F_NO_COMMON_LRU)
return;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
map_fd = create_map(map_type, map_flags, 1);
assert(map_fd != -1);
value[0] = 1234;
key = 0;
assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
while (sched_next_online(0, &next_cpu) != -1) {
pid_t pid;
pid = fork();
if (pid == 0) {
do_test_lru_sanity5(key, map_fd);
exit(0);
} else if (pid == -1) {
printf("couldn't spawn process to test key:%llu\n",
key);
exit(1);
} else {
int status;
assert(waitpid(pid, &status, 0) == pid);
assert(status == 0);
key++;
}
}
close(map_fd);
/* At least one key should be tested */
assert(key > 0);
printf("Pass\n");
}
/* Test list rotation for BPF_F_NO_COMMON_LRU map */
static void test_lru_sanity6(int map_type, int map_flags, int tgt_free)
{
int lru_map_fd, expected_map_fd;
unsigned long long key, value[nr_cpus];
unsigned int map_size = tgt_free * 2;
int next_cpu = 0;
if (!(map_flags & BPF_F_NO_COMMON_LRU))
return;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
assert(sched_next_online(0, &next_cpu) != -1);
expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
assert(expected_map_fd != -1);
lru_map_fd = create_map(map_type, map_flags, map_size * nr_cpus);
assert(lru_map_fd != -1);
value[0] = 1234;
for (key = 1; key <= tgt_free; key++) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
for (; key <= tgt_free * 2; key++) {
unsigned long long stable_key;
/* Make ref bit sticky for key: [1, tgt_free] */
for (stable_key = 1; stable_key <= tgt_free; stable_key++) {
/* Mark the ref bit */
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd,
stable_key, value));
}
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
}
for (; key <= tgt_free * 3; key++) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
assert(map_equal(lru_map_fd, expected_map_fd));
close(expected_map_fd);
close(lru_map_fd);
printf("Pass\n");
}
/* Size of the LRU map is 2
* Add key=1 (+1 key)
* Add key=2 (+1 key)
* Lookup Key=1 (datapath)
* Lookup Key=2 (syscall)
* Add Key=3
* => Key=2 will be removed by LRU
* Iterate map. Only found key=1 and key=3
*/
static void test_lru_sanity7(int map_type, int map_flags)
{
unsigned long long key, value[nr_cpus];
int lru_map_fd, expected_map_fd;
int next_cpu = 0;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
assert(sched_next_online(0, &next_cpu) != -1);
if (map_flags & BPF_F_NO_COMMON_LRU)
lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
else
lru_map_fd = create_map(map_type, map_flags, 2);
assert(lru_map_fd != -1);
expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
assert(expected_map_fd != -1);
value[0] = 1234;
/* insert key=1 element */
key = 1;
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
/* key=1 already exists */
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* BPF_EXIST means: update existing element */
assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
/* key=2 is not there */
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
/* insert key=3 element */
/* check that key=3 is not found */
key = 3;
assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* check that key=1 can be found and mark the ref bit to
* stop LRU from removing key=1
*/
key = 1;
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(value[0] == 1234);
/* check that key=2 can be found and do _not_ mark ref bit.
* this will be evicted on next update.
*/
key = 2;
assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
assert(value[0] == 1234);
key = 3;
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
/* key=2 has been removed from the LRU */
key = 2;
assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
assert(map_equal(lru_map_fd, expected_map_fd));
close(expected_map_fd);
close(lru_map_fd);
printf("Pass\n");
}
/* Size of the LRU map is 2
* Add key=1 (+1 key)
* Add key=2 (+1 key)
* Lookup Key=1 (syscall)
* Lookup Key=2 (datapath)
* Add Key=3
* => Key=1 will be removed by LRU
* Iterate map. Only found key=2 and key=3
*/
static void test_lru_sanity8(int map_type, int map_flags)
{
unsigned long long key, value[nr_cpus];
int lru_map_fd, expected_map_fd;
int next_cpu = 0;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
assert(sched_next_online(0, &next_cpu) != -1);
if (map_flags & BPF_F_NO_COMMON_LRU)
lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
else
lru_map_fd = create_map(map_type, map_flags, 2);
assert(lru_map_fd != -1);
expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
assert(expected_map_fd != -1);
value[0] = 1234;
/* insert key=1 element */
key = 1;
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
/* key=1 already exists */
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* BPF_EXIST means: update existing element */
assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
/* key=2 is not there */
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
/* insert key=3 element */
/* check that key=3 is not found */
key = 3;
assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* check that key=1 can be found and do _not_ mark ref bit.
* this will be evicted on next update.
*/
key = 1;
assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
assert(value[0] == 1234);
/* check that key=2 can be found and mark the ref bit to
* stop LRU from removing key=2
*/
key = 2;
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(value[0] == 1234);
key = 3;
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
/* key=1 has been removed from the LRU */
key = 1;
assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
assert(map_equal(lru_map_fd, expected_map_fd));
close(expected_map_fd);
close(lru_map_fd);
printf("Pass\n");
}
int main(int argc, char **argv)
{
int map_types[] = {BPF_MAP_TYPE_LRU_HASH,
BPF_MAP_TYPE_LRU_PERCPU_HASH};
int map_flags[] = {0, BPF_F_NO_COMMON_LRU};
int t, f;
setbuf(stdout, NULL);
nr_cpus = bpf_num_possible_cpus();
assert(nr_cpus != -1);
printf("nr_cpus:%d\n\n", nr_cpus);
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
for (f = 0; f < ARRAY_SIZE(map_flags); f++) {
unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ?
PERCPU_FREE_TARGET : LOCAL_FREE_TARGET;
for (t = 0; t < ARRAY_SIZE(map_types); t++) {
test_lru_sanity0(map_types[t], map_flags[f]);
test_lru_sanity1(map_types[t], map_flags[f], tgt_free);
test_lru_sanity2(map_types[t], map_flags[f], tgt_free);
test_lru_sanity3(map_types[t], map_flags[f], tgt_free);
test_lru_sanity4(map_types[t], map_flags[f], tgt_free);
test_lru_sanity5(map_types[t], map_flags[f]);
test_lru_sanity6(map_types[t], map_flags[f], tgt_free);
test_lru_sanity7(map_types[t], map_flags[f]);
test_lru_sanity8(map_types[t], map_flags[f]);
printf("\n");
}
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/test_lru_map.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <linux/filter.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
#include "bpf_util.h"
#ifndef ENOTSUPP
# define ENOTSUPP 524
#endif
#define CG_PATH "/foo"
#define CONNECT4_PROG_PATH "./connect4_prog.bpf.o"
#define CONNECT6_PROG_PATH "./connect6_prog.bpf.o"
#define SENDMSG4_PROG_PATH "./sendmsg4_prog.bpf.o"
#define SENDMSG6_PROG_PATH "./sendmsg6_prog.bpf.o"
#define RECVMSG4_PROG_PATH "./recvmsg4_prog.bpf.o"
#define RECVMSG6_PROG_PATH "./recvmsg6_prog.bpf.o"
#define BIND4_PROG_PATH "./bind4_prog.bpf.o"
#define BIND6_PROG_PATH "./bind6_prog.bpf.o"
#define SERV4_IP "192.168.1.254"
#define SERV4_REWRITE_IP "127.0.0.1"
#define SRC4_IP "172.16.0.1"
#define SRC4_REWRITE_IP "127.0.0.4"
#define SERV4_PORT 4040
#define SERV4_REWRITE_PORT 4444
#define SERV6_IP "face:b00c:1234:5678::abcd"
#define SERV6_REWRITE_IP "::1"
#define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
#define SRC6_IP "::1"
#define SRC6_REWRITE_IP "::6"
#define WILDCARD6_IP "::"
#define SERV6_PORT 6060
#define SERV6_REWRITE_PORT 6666
#define INET_NTOP_BUF 40
struct sock_addr_test;
typedef int (*load_fn)(const struct sock_addr_test *test);
typedef int (*info_fn)(int, struct sockaddr *, socklen_t *);
char bpf_log_buf[BPF_LOG_BUF_SIZE];
struct sock_addr_test {
const char *descr;
/* BPF prog properties */
load_fn loadfn;
enum bpf_attach_type expected_attach_type;
enum bpf_attach_type attach_type;
/* Socket properties */
int domain;
int type;
/* IP:port pairs for BPF prog to override */
const char *requested_ip;
unsigned short requested_port;
const char *expected_ip;
unsigned short expected_port;
const char *expected_src_ip;
/* Expected test result */
enum {
LOAD_REJECT,
ATTACH_REJECT,
ATTACH_OKAY,
SYSCALL_EPERM,
SYSCALL_ENOTSUPP,
SUCCESS,
} expected_result;
};
static int bind4_prog_load(const struct sock_addr_test *test);
static int bind6_prog_load(const struct sock_addr_test *test);
static int connect4_prog_load(const struct sock_addr_test *test);
static int connect6_prog_load(const struct sock_addr_test *test);
static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
static int recvmsg_allow_prog_load(const struct sock_addr_test *test);
static int recvmsg_deny_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
static int recvmsg4_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
static int recvmsg6_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
static struct sock_addr_test tests[] = {
/* bind */
{
"bind4: load prog with wrong expected attach type",
bind4_prog_load,
BPF_CGROUP_INET6_BIND,
BPF_CGROUP_INET4_BIND,
AF_INET,
SOCK_STREAM,
NULL,
0,
NULL,
0,
NULL,
LOAD_REJECT,
},
{
"bind4: attach prog with wrong attach type",
bind4_prog_load,
BPF_CGROUP_INET4_BIND,
BPF_CGROUP_INET6_BIND,
AF_INET,
SOCK_STREAM,
NULL,
0,
NULL,
0,
NULL,
ATTACH_REJECT,
},
{
"bind4: rewrite IP & TCP port in",
bind4_prog_load,
BPF_CGROUP_INET4_BIND,
BPF_CGROUP_INET4_BIND,
AF_INET,
SOCK_STREAM,
SERV4_IP,
SERV4_PORT,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
NULL,
SUCCESS,
},
{
"bind4: rewrite IP & UDP port in",
bind4_prog_load,
BPF_CGROUP_INET4_BIND,
BPF_CGROUP_INET4_BIND,
AF_INET,
SOCK_DGRAM,
SERV4_IP,
SERV4_PORT,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
NULL,
SUCCESS,
},
{
"bind6: load prog with wrong expected attach type",
bind6_prog_load,
BPF_CGROUP_INET4_BIND,
BPF_CGROUP_INET6_BIND,
AF_INET6,
SOCK_STREAM,
NULL,
0,
NULL,
0,
NULL,
LOAD_REJECT,
},
{
"bind6: attach prog with wrong attach type",
bind6_prog_load,
BPF_CGROUP_INET6_BIND,
BPF_CGROUP_INET4_BIND,
AF_INET,
SOCK_STREAM,
NULL,
0,
NULL,
0,
NULL,
ATTACH_REJECT,
},
{
"bind6: rewrite IP & TCP port in",
bind6_prog_load,
BPF_CGROUP_INET6_BIND,
BPF_CGROUP_INET6_BIND,
AF_INET6,
SOCK_STREAM,
SERV6_IP,
SERV6_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
NULL,
SUCCESS,
},
{
"bind6: rewrite IP & UDP port in",
bind6_prog_load,
BPF_CGROUP_INET6_BIND,
BPF_CGROUP_INET6_BIND,
AF_INET6,
SOCK_DGRAM,
SERV6_IP,
SERV6_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
NULL,
SUCCESS,
},
/* connect */
{
"connect4: load prog with wrong expected attach type",
connect4_prog_load,
BPF_CGROUP_INET6_CONNECT,
BPF_CGROUP_INET4_CONNECT,
AF_INET,
SOCK_STREAM,
NULL,
0,
NULL,
0,
NULL,
LOAD_REJECT,
},
{
"connect4: attach prog with wrong attach type",
connect4_prog_load,
BPF_CGROUP_INET4_CONNECT,
BPF_CGROUP_INET6_CONNECT,
AF_INET,
SOCK_STREAM,
NULL,
0,
NULL,
0,
NULL,
ATTACH_REJECT,
},
{
"connect4: rewrite IP & TCP port",
connect4_prog_load,
BPF_CGROUP_INET4_CONNECT,
BPF_CGROUP_INET4_CONNECT,
AF_INET,
SOCK_STREAM,
SERV4_IP,
SERV4_PORT,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
SRC4_REWRITE_IP,
SUCCESS,
},
{
"connect4: rewrite IP & UDP port",
connect4_prog_load,
BPF_CGROUP_INET4_CONNECT,
BPF_CGROUP_INET4_CONNECT,
AF_INET,
SOCK_DGRAM,
SERV4_IP,
SERV4_PORT,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
SRC4_REWRITE_IP,
SUCCESS,
},
{
"connect6: load prog with wrong expected attach type",
connect6_prog_load,
BPF_CGROUP_INET4_CONNECT,
BPF_CGROUP_INET6_CONNECT,
AF_INET6,
SOCK_STREAM,
NULL,
0,
NULL,
0,
NULL,
LOAD_REJECT,
},
{
"connect6: attach prog with wrong attach type",
connect6_prog_load,
BPF_CGROUP_INET6_CONNECT,
BPF_CGROUP_INET4_CONNECT,
AF_INET,
SOCK_STREAM,
NULL,
0,
NULL,
0,
NULL,
ATTACH_REJECT,
},
{
"connect6: rewrite IP & TCP port",
connect6_prog_load,
BPF_CGROUP_INET6_CONNECT,
BPF_CGROUP_INET6_CONNECT,
AF_INET6,
SOCK_STREAM,
SERV6_IP,
SERV6_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SRC6_REWRITE_IP,
SUCCESS,
},
{
"connect6: rewrite IP & UDP port",
connect6_prog_load,
BPF_CGROUP_INET6_CONNECT,
BPF_CGROUP_INET6_CONNECT,
AF_INET6,
SOCK_DGRAM,
SERV6_IP,
SERV6_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SRC6_REWRITE_IP,
SUCCESS,
},
/* sendmsg */
{
"sendmsg4: load prog with wrong expected attach type",
sendmsg4_rw_asm_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
BPF_CGROUP_UDP4_SENDMSG,
AF_INET,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
LOAD_REJECT,
},
{
"sendmsg4: attach prog with wrong attach type",
sendmsg4_rw_asm_prog_load,
BPF_CGROUP_UDP4_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
AF_INET,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
ATTACH_REJECT,
},
{
"sendmsg4: rewrite IP & port (asm)",
sendmsg4_rw_asm_prog_load,
BPF_CGROUP_UDP4_SENDMSG,
BPF_CGROUP_UDP4_SENDMSG,
AF_INET,
SOCK_DGRAM,
SERV4_IP,
SERV4_PORT,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
SRC4_REWRITE_IP,
SUCCESS,
},
{
"sendmsg4: rewrite IP & port (C)",
sendmsg4_rw_c_prog_load,
BPF_CGROUP_UDP4_SENDMSG,
BPF_CGROUP_UDP4_SENDMSG,
AF_INET,
SOCK_DGRAM,
SERV4_IP,
SERV4_PORT,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
SRC4_REWRITE_IP,
SUCCESS,
},
{
"sendmsg4: deny call",
sendmsg_deny_prog_load,
BPF_CGROUP_UDP4_SENDMSG,
BPF_CGROUP_UDP4_SENDMSG,
AF_INET,
SOCK_DGRAM,
SERV4_IP,
SERV4_PORT,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
SRC4_REWRITE_IP,
SYSCALL_EPERM,
},
{
"sendmsg6: load prog with wrong expected attach type",
sendmsg6_rw_asm_prog_load,
BPF_CGROUP_UDP4_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
AF_INET6,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
LOAD_REJECT,
},
{
"sendmsg6: attach prog with wrong attach type",
sendmsg6_rw_asm_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
BPF_CGROUP_UDP4_SENDMSG,
AF_INET6,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
ATTACH_REJECT,
},
{
"sendmsg6: rewrite IP & port (asm)",
sendmsg6_rw_asm_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
AF_INET6,
SOCK_DGRAM,
SERV6_IP,
SERV6_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SRC6_REWRITE_IP,
SUCCESS,
},
{
"sendmsg6: rewrite IP & port (C)",
sendmsg6_rw_c_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
AF_INET6,
SOCK_DGRAM,
SERV6_IP,
SERV6_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SRC6_REWRITE_IP,
SUCCESS,
},
{
"sendmsg6: IPv4-mapped IPv6",
sendmsg6_rw_v4mapped_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
AF_INET6,
SOCK_DGRAM,
SERV6_IP,
SERV6_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SRC6_REWRITE_IP,
SYSCALL_ENOTSUPP,
},
{
"sendmsg6: set dst IP = [::] (BSD'ism)",
sendmsg6_rw_wildcard_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
AF_INET6,
SOCK_DGRAM,
SERV6_IP,
SERV6_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SRC6_REWRITE_IP,
SUCCESS,
},
{
"sendmsg6: preserve dst IP = [::] (BSD'ism)",
sendmsg_allow_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
AF_INET6,
SOCK_DGRAM,
WILDCARD6_IP,
SERV6_PORT,
SERV6_REWRITE_IP,
SERV6_PORT,
SRC6_IP,
SUCCESS,
},
{
"sendmsg6: deny call",
sendmsg_deny_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
AF_INET6,
SOCK_DGRAM,
SERV6_IP,
SERV6_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SRC6_REWRITE_IP,
SYSCALL_EPERM,
},
/* recvmsg */
{
"recvmsg4: return code ok",
recvmsg_allow_prog_load,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP4_RECVMSG,
AF_INET,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
ATTACH_OKAY,
},
{
"recvmsg4: return code !ok",
recvmsg_deny_prog_load,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP4_RECVMSG,
AF_INET,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
LOAD_REJECT,
},
{
"recvmsg6: return code ok",
recvmsg_allow_prog_load,
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
AF_INET6,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
ATTACH_OKAY,
},
{
"recvmsg6: return code !ok",
recvmsg_deny_prog_load,
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
AF_INET6,
SOCK_DGRAM,
NULL,
0,
NULL,
0,
NULL,
LOAD_REJECT,
},
{
"recvmsg4: rewrite IP & port (C)",
recvmsg4_rw_c_prog_load,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP4_RECVMSG,
AF_INET,
SOCK_DGRAM,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
SERV4_REWRITE_IP,
SERV4_REWRITE_PORT,
SERV4_IP,
SUCCESS,
},
{
"recvmsg6: rewrite IP & port (C)",
recvmsg6_rw_c_prog_load,
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
AF_INET6,
SOCK_DGRAM,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SERV6_REWRITE_IP,
SERV6_REWRITE_PORT,
SERV6_IP,
SUCCESS,
},
};
static int mk_sockaddr(int domain, const char *ip, unsigned short port,
struct sockaddr *addr, socklen_t addr_len)
{
struct sockaddr_in6 *addr6;
struct sockaddr_in *addr4;
if (domain != AF_INET && domain != AF_INET6) {
log_err("Unsupported address family");
return -1;
}
memset(addr, 0, addr_len);
if (domain == AF_INET) {
if (addr_len < sizeof(struct sockaddr_in))
return -1;
addr4 = (struct sockaddr_in *)addr;
addr4->sin_family = domain;
addr4->sin_port = htons(port);
if (inet_pton(domain, ip, (void *)&addr4->sin_addr) != 1) {
log_err("Invalid IPv4: %s", ip);
return -1;
}
} else if (domain == AF_INET6) {
if (addr_len < sizeof(struct sockaddr_in6))
return -1;
addr6 = (struct sockaddr_in6 *)addr;
addr6->sin6_family = domain;
addr6->sin6_port = htons(port);
if (inet_pton(domain, ip, (void *)&addr6->sin6_addr) != 1) {
log_err("Invalid IPv6: %s", ip);
return -1;
}
}
return 0;
}
static int load_insns(const struct sock_addr_test *test,
const struct bpf_insn *insns, size_t insns_cnt)
{
LIBBPF_OPTS(bpf_prog_load_opts, opts);
int ret;
opts.expected_attach_type = test->expected_attach_type;
opts.log_buf = bpf_log_buf;
opts.log_size = BPF_LOG_BUF_SIZE;
ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, NULL, "GPL", insns, insns_cnt, &opts);
if (ret < 0 && test->expected_result != LOAD_REJECT) {
log_err(">>> Loading program error.\n"
">>> Verifier output:\n%s\n-------\n", bpf_log_buf);
}
return ret;
}
static int load_path(const struct sock_addr_test *test, const char *path)
{
struct bpf_object *obj;
struct bpf_program *prog;
int err;
obj = bpf_object__open_file(path, NULL);
err = libbpf_get_error(obj);
if (err) {
log_err(">>> Opening BPF object (%s) error.\n", path);
return -1;
}
prog = bpf_object__next_program(obj, NULL);
if (!prog)
goto err_out;
bpf_program__set_type(prog, BPF_PROG_TYPE_CGROUP_SOCK_ADDR);
bpf_program__set_expected_attach_type(prog, test->expected_attach_type);
bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32);
err = bpf_object__load(obj);
if (err) {
if (test->expected_result != LOAD_REJECT)
log_err(">>> Loading program (%s) error.\n", path);
goto err_out;
}
return bpf_program__fd(prog);
err_out:
bpf_object__close(obj);
return -1;
}
static int bind4_prog_load(const struct sock_addr_test *test)
{
return load_path(test, BIND4_PROG_PATH);
}
static int bind6_prog_load(const struct sock_addr_test *test)
{
return load_path(test, BIND6_PROG_PATH);
}
static int connect4_prog_load(const struct sock_addr_test *test)
{
return load_path(test, CONNECT4_PROG_PATH);
}
static int connect6_prog_load(const struct sock_addr_test *test)
{
return load_path(test, CONNECT6_PROG_PATH);
}
static int xmsg_ret_only_prog_load(const struct sock_addr_test *test,
int32_t rc)
{
struct bpf_insn insns[] = {
/* return rc */
BPF_MOV64_IMM(BPF_REG_0, rc),
BPF_EXIT_INSN(),
};
return load_insns(test, insns, ARRAY_SIZE(insns));
}
static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
{
return xmsg_ret_only_prog_load(test, /*rc*/ 1);
}
static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
{
return xmsg_ret_only_prog_load(test, /*rc*/ 0);
}
static int recvmsg_allow_prog_load(const struct sock_addr_test *test)
{
return xmsg_ret_only_prog_load(test, /*rc*/ 1);
}
static int recvmsg_deny_prog_load(const struct sock_addr_test *test)
{
return xmsg_ret_only_prog_load(test, /*rc*/ 0);
}
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
{
struct sockaddr_in dst4_rw_addr;
struct in_addr src4_rw_ip;
if (inet_pton(AF_INET, SRC4_REWRITE_IP, (void *)&src4_rw_ip) != 1) {
log_err("Invalid IPv4: %s", SRC4_REWRITE_IP);
return -1;
}
if (mk_sockaddr(AF_INET, SERV4_REWRITE_IP, SERV4_REWRITE_PORT,
(struct sockaddr *)&dst4_rw_addr,
sizeof(dst4_rw_addr)) == -1)
return -1;
struct bpf_insn insns[] = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* if (sk.family == AF_INET && */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, family)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 8),
/* sk.type == SOCK_DGRAM) { */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, type)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_DGRAM, 6),
/* msg_src_ip4 = src4_rw_ip */
BPF_MOV32_IMM(BPF_REG_7, src4_rw_ip.s_addr),
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
offsetof(struct bpf_sock_addr, msg_src_ip4)),
/* user_ip4 = dst4_rw_addr.sin_addr */
BPF_MOV32_IMM(BPF_REG_7, dst4_rw_addr.sin_addr.s_addr),
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
offsetof(struct bpf_sock_addr, user_ip4)),
/* user_port = dst4_rw_addr.sin_port */
BPF_MOV32_IMM(BPF_REG_7, dst4_rw_addr.sin_port),
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
offsetof(struct bpf_sock_addr, user_port)),
/* } */
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
};
return load_insns(test, insns, ARRAY_SIZE(insns));
}
static int recvmsg4_rw_c_prog_load(const struct sock_addr_test *test)
{
return load_path(test, RECVMSG4_PROG_PATH);
}
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test)
{
return load_path(test, SENDMSG4_PROG_PATH);
}
static int sendmsg6_rw_dst_asm_prog_load(const struct sock_addr_test *test,
const char *rw_dst_ip)
{
struct sockaddr_in6 dst6_rw_addr;
struct in6_addr src6_rw_ip;
if (inet_pton(AF_INET6, SRC6_REWRITE_IP, (void *)&src6_rw_ip) != 1) {
log_err("Invalid IPv6: %s", SRC6_REWRITE_IP);
return -1;
}
if (mk_sockaddr(AF_INET6, rw_dst_ip, SERV6_REWRITE_PORT,
(struct sockaddr *)&dst6_rw_addr,
sizeof(dst6_rw_addr)) == -1)
return -1;
struct bpf_insn insns[] = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* if (sk.family == AF_INET6) { */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, family)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET6, 18),
#define STORE_IPV6_WORD_N(DST, SRC, N) \
BPF_MOV32_IMM(BPF_REG_7, SRC[N]), \
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7, \
offsetof(struct bpf_sock_addr, DST[N]))
#define STORE_IPV6(DST, SRC) \
STORE_IPV6_WORD_N(DST, SRC, 0), \
STORE_IPV6_WORD_N(DST, SRC, 1), \
STORE_IPV6_WORD_N(DST, SRC, 2), \
STORE_IPV6_WORD_N(DST, SRC, 3)
STORE_IPV6(msg_src_ip6, src6_rw_ip.s6_addr32),
STORE_IPV6(user_ip6, dst6_rw_addr.sin6_addr.s6_addr32),
/* user_port = dst6_rw_addr.sin6_port */
BPF_MOV32_IMM(BPF_REG_7, dst6_rw_addr.sin6_port),
BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
offsetof(struct bpf_sock_addr, user_port)),
/* } */
/* return 1 */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
};
return load_insns(test, insns, ARRAY_SIZE(insns));
}
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test)
{
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_REWRITE_IP);
}
static int recvmsg6_rw_c_prog_load(const struct sock_addr_test *test)
{
return load_path(test, RECVMSG6_PROG_PATH);
}
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
{
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
}
static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
{
return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
}
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
{
return load_path(test, SENDMSG6_PROG_PATH);
}
static int cmp_addr(const struct sockaddr_storage *addr1,
const struct sockaddr_storage *addr2, int cmp_port)
{
const struct sockaddr_in *four1, *four2;
const struct sockaddr_in6 *six1, *six2;
if (addr1->ss_family != addr2->ss_family)
return -1;
if (addr1->ss_family == AF_INET) {
four1 = (const struct sockaddr_in *)addr1;
four2 = (const struct sockaddr_in *)addr2;
return !((four1->sin_port == four2->sin_port || !cmp_port) &&
four1->sin_addr.s_addr == four2->sin_addr.s_addr);
} else if (addr1->ss_family == AF_INET6) {
six1 = (const struct sockaddr_in6 *)addr1;
six2 = (const struct sockaddr_in6 *)addr2;
return !((six1->sin6_port == six2->sin6_port || !cmp_port) &&
!memcmp(&six1->sin6_addr, &six2->sin6_addr,
sizeof(struct in6_addr)));
}
return -1;
}
static int cmp_sock_addr(info_fn fn, int sock1,
const struct sockaddr_storage *addr2, int cmp_port)
{
struct sockaddr_storage addr1;
socklen_t len1 = sizeof(addr1);
memset(&addr1, 0, len1);
if (fn(sock1, (struct sockaddr *)&addr1, (socklen_t *)&len1) != 0)
return -1;
return cmp_addr(&addr1, addr2, cmp_port);
}
static int cmp_local_ip(int sock1, const struct sockaddr_storage *addr2)
{
return cmp_sock_addr(getsockname, sock1, addr2, /*cmp_port*/ 0);
}
static int cmp_local_addr(int sock1, const struct sockaddr_storage *addr2)
{
return cmp_sock_addr(getsockname, sock1, addr2, /*cmp_port*/ 1);
}
static int cmp_peer_addr(int sock1, const struct sockaddr_storage *addr2)
{
return cmp_sock_addr(getpeername, sock1, addr2, /*cmp_port*/ 1);
}
static int start_server(int type, const struct sockaddr_storage *addr,
socklen_t addr_len)
{
int fd;
fd = socket(addr->ss_family, type, 0);
if (fd == -1) {
log_err("Failed to create server socket");
goto out;
}
if (bind(fd, (const struct sockaddr *)addr, addr_len) == -1) {
log_err("Failed to bind server socket");
goto close_out;
}
if (type == SOCK_STREAM) {
if (listen(fd, 128) == -1) {
log_err("Failed to listen on server socket");
goto close_out;
}
}
goto out;
close_out:
close(fd);
fd = -1;
out:
return fd;
}
static int connect_to_server(int type, const struct sockaddr_storage *addr,
socklen_t addr_len)
{
int domain;
int fd = -1;
domain = addr->ss_family;
if (domain != AF_INET && domain != AF_INET6) {
log_err("Unsupported address family");
goto err;
}
fd = socket(domain, type, 0);
if (fd == -1) {
log_err("Failed to create client socket");
goto err;
}
if (connect(fd, (const struct sockaddr *)addr, addr_len) == -1) {
log_err("Fail to connect to server");
goto err;
}
goto out;
err:
close(fd);
fd = -1;
out:
return fd;
}
int init_pktinfo(int domain, struct cmsghdr *cmsg)
{
struct in6_pktinfo *pktinfo6;
struct in_pktinfo *pktinfo4;
if (domain == AF_INET) {
cmsg->cmsg_level = SOL_IP;
cmsg->cmsg_type = IP_PKTINFO;
cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
pktinfo4 = (struct in_pktinfo *)CMSG_DATA(cmsg);
memset(pktinfo4, 0, sizeof(struct in_pktinfo));
if (inet_pton(domain, SRC4_IP,
(void *)&pktinfo4->ipi_spec_dst) != 1)
return -1;
} else if (domain == AF_INET6) {
cmsg->cmsg_level = SOL_IPV6;
cmsg->cmsg_type = IPV6_PKTINFO;
cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
pktinfo6 = (struct in6_pktinfo *)CMSG_DATA(cmsg);
memset(pktinfo6, 0, sizeof(struct in6_pktinfo));
if (inet_pton(domain, SRC6_IP,
(void *)&pktinfo6->ipi6_addr) != 1)
return -1;
} else {
return -1;
}
return 0;
}
static int sendmsg_to_server(int type, const struct sockaddr_storage *addr,
socklen_t addr_len, int set_cmsg, int flags,
int *syscall_err)
{
union {
char buf[CMSG_SPACE(sizeof(struct in6_pktinfo))];
struct cmsghdr align;
} control6;
union {
char buf[CMSG_SPACE(sizeof(struct in_pktinfo))];
struct cmsghdr align;
} control4;
struct msghdr hdr;
struct iovec iov;
char data = 'a';
int domain;
int fd = -1;
domain = addr->ss_family;
if (domain != AF_INET && domain != AF_INET6) {
log_err("Unsupported address family");
goto err;
}
fd = socket(domain, type, 0);
if (fd == -1) {
log_err("Failed to create client socket");
goto err;
}
memset(&iov, 0, sizeof(iov));
iov.iov_base = &data;
iov.iov_len = sizeof(data);
memset(&hdr, 0, sizeof(hdr));
hdr.msg_name = (void *)addr;
hdr.msg_namelen = addr_len;
hdr.msg_iov = &iov;
hdr.msg_iovlen = 1;
if (set_cmsg) {
if (domain == AF_INET) {
hdr.msg_control = &control4;
hdr.msg_controllen = sizeof(control4.buf);
} else if (domain == AF_INET6) {
hdr.msg_control = &control6;
hdr.msg_controllen = sizeof(control6.buf);
}
if (init_pktinfo(domain, CMSG_FIRSTHDR(&hdr))) {
log_err("Fail to init pktinfo");
goto err;
}
}
if (sendmsg(fd, &hdr, flags) != sizeof(data)) {
log_err("Fail to send message to server");
*syscall_err = errno;
goto err;
}
goto out;
err:
close(fd);
fd = -1;
out:
return fd;
}
static int fastconnect_to_server(const struct sockaddr_storage *addr,
socklen_t addr_len)
{
int sendmsg_err;
return sendmsg_to_server(SOCK_STREAM, addr, addr_len, /*set_cmsg*/0,
MSG_FASTOPEN, &sendmsg_err);
}
static int recvmsg_from_client(int sockfd, struct sockaddr_storage *src_addr)
{
struct timeval tv;
struct msghdr hdr;
struct iovec iov;
char data[64];
fd_set rfds;
FD_ZERO(&rfds);
FD_SET(sockfd, &rfds);
tv.tv_sec = 2;
tv.tv_usec = 0;
if (select(sockfd + 1, &rfds, NULL, NULL, &tv) <= 0 ||
!FD_ISSET(sockfd, &rfds))
return -1;
memset(&iov, 0, sizeof(iov));
iov.iov_base = data;
iov.iov_len = sizeof(data);
memset(&hdr, 0, sizeof(hdr));
hdr.msg_name = src_addr;
hdr.msg_namelen = sizeof(struct sockaddr_storage);
hdr.msg_iov = &iov;
hdr.msg_iovlen = 1;
return recvmsg(sockfd, &hdr, 0);
}
static int init_addrs(const struct sock_addr_test *test,
struct sockaddr_storage *requested_addr,
struct sockaddr_storage *expected_addr,
struct sockaddr_storage *expected_src_addr)
{
socklen_t addr_len = sizeof(struct sockaddr_storage);
if (mk_sockaddr(test->domain, test->expected_ip, test->expected_port,
(struct sockaddr *)expected_addr, addr_len) == -1)
goto err;
if (mk_sockaddr(test->domain, test->requested_ip, test->requested_port,
(struct sockaddr *)requested_addr, addr_len) == -1)
goto err;
if (test->expected_src_ip &&
mk_sockaddr(test->domain, test->expected_src_ip, 0,
(struct sockaddr *)expected_src_addr, addr_len) == -1)
goto err;
return 0;
err:
return -1;
}
static int run_bind_test_case(const struct sock_addr_test *test)
{
socklen_t addr_len = sizeof(struct sockaddr_storage);
struct sockaddr_storage requested_addr;
struct sockaddr_storage expected_addr;
int clientfd = -1;
int servfd = -1;
int err = 0;
if (init_addrs(test, &requested_addr, &expected_addr, NULL))
goto err;
servfd = start_server(test->type, &requested_addr, addr_len);
if (servfd == -1)
goto err;
if (cmp_local_addr(servfd, &expected_addr))
goto err;
/* Try to connect to server just in case */
clientfd = connect_to_server(test->type, &expected_addr, addr_len);
if (clientfd == -1)
goto err;
goto out;
err:
err = -1;
out:
close(clientfd);
close(servfd);
return err;
}
static int run_connect_test_case(const struct sock_addr_test *test)
{
socklen_t addr_len = sizeof(struct sockaddr_storage);
struct sockaddr_storage expected_src_addr;
struct sockaddr_storage requested_addr;
struct sockaddr_storage expected_addr;
int clientfd = -1;
int servfd = -1;
int err = 0;
if (init_addrs(test, &requested_addr, &expected_addr,
&expected_src_addr))
goto err;
/* Prepare server to connect to */
servfd = start_server(test->type, &expected_addr, addr_len);
if (servfd == -1)
goto err;
clientfd = connect_to_server(test->type, &requested_addr, addr_len);
if (clientfd == -1)
goto err;
/* Make sure src and dst addrs were overridden properly */
if (cmp_peer_addr(clientfd, &expected_addr))
goto err;
if (cmp_local_ip(clientfd, &expected_src_addr))
goto err;
if (test->type == SOCK_STREAM) {
/* Test TCP Fast Open scenario */
clientfd = fastconnect_to_server(&requested_addr, addr_len);
if (clientfd == -1)
goto err;
/* Make sure src and dst addrs were overridden properly */
if (cmp_peer_addr(clientfd, &expected_addr))
goto err;
if (cmp_local_ip(clientfd, &expected_src_addr))
goto err;
}
goto out;
err:
err = -1;
out:
close(clientfd);
close(servfd);
return err;
}
static int run_xmsg_test_case(const struct sock_addr_test *test, int max_cmsg)
{
socklen_t addr_len = sizeof(struct sockaddr_storage);
struct sockaddr_storage expected_addr;
struct sockaddr_storage server_addr;
struct sockaddr_storage sendmsg_addr;
struct sockaddr_storage recvmsg_addr;
int clientfd = -1;
int servfd = -1;
int set_cmsg;
int err = 0;
if (test->type != SOCK_DGRAM)
goto err;
if (init_addrs(test, &sendmsg_addr, &server_addr, &expected_addr))
goto err;
/* Prepare server to sendmsg to */
servfd = start_server(test->type, &server_addr, addr_len);
if (servfd == -1)
goto err;
for (set_cmsg = 0; set_cmsg <= max_cmsg; ++set_cmsg) {
if (clientfd >= 0)
close(clientfd);
clientfd = sendmsg_to_server(test->type, &sendmsg_addr,
addr_len, set_cmsg, /*flags*/0,
&err);
if (err)
goto out;
else if (clientfd == -1)
goto err;
/* Try to receive message on server instead of using
* getpeername(2) on client socket, to check that client's
* destination address was rewritten properly, since
* getpeername(2) doesn't work with unconnected datagram
* sockets.
*
* Get source address from recvmsg(2) as well to make sure
* source was rewritten properly: getsockname(2) can't be used
* since socket is unconnected and source defined for one
* specific packet may differ from the one used by default and
* returned by getsockname(2).
*/
if (recvmsg_from_client(servfd, &recvmsg_addr) == -1)
goto err;
if (cmp_addr(&recvmsg_addr, &expected_addr, /*cmp_port*/0))
goto err;
}
goto out;
err:
err = -1;
out:
close(clientfd);
close(servfd);
return err;
}
static int run_test_case(int cgfd, const struct sock_addr_test *test)
{
int progfd = -1;
int err = 0;
printf("Test case: %s .. ", test->descr);
progfd = test->loadfn(test);
if (test->expected_result == LOAD_REJECT && progfd < 0)
goto out;
else if (test->expected_result == LOAD_REJECT || progfd < 0)
goto err;
err = bpf_prog_attach(progfd, cgfd, test->attach_type,
BPF_F_ALLOW_OVERRIDE);
if (test->expected_result == ATTACH_REJECT && err) {
err = 0; /* error was expected, reset it */
goto out;
} else if (test->expected_result == ATTACH_REJECT || err) {
goto err;
} else if (test->expected_result == ATTACH_OKAY) {
err = 0;
goto out;
}
switch (test->attach_type) {
case BPF_CGROUP_INET4_BIND:
case BPF_CGROUP_INET6_BIND:
err = run_bind_test_case(test);
break;
case BPF_CGROUP_INET4_CONNECT:
case BPF_CGROUP_INET6_CONNECT:
err = run_connect_test_case(test);
break;
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP6_SENDMSG:
err = run_xmsg_test_case(test, 1);
break;
case BPF_CGROUP_UDP4_RECVMSG:
case BPF_CGROUP_UDP6_RECVMSG:
err = run_xmsg_test_case(test, 0);
break;
default:
goto err;
}
if (test->expected_result == SYSCALL_EPERM && err == EPERM) {
err = 0; /* error was expected, reset it */
goto out;
}
if (test->expected_result == SYSCALL_ENOTSUPP && err == ENOTSUPP) {
err = 0; /* error was expected, reset it */
goto out;
}
if (err || test->expected_result != SUCCESS)
goto err;
goto out;
err:
err = -1;
out:
/* Detaching w/o checking return code: best effort attempt. */
if (progfd != -1)
bpf_prog_detach(cgfd, test->attach_type);
close(progfd);
printf("[%s]\n", err ? "FAIL" : "PASS");
return err;
}
static int run_tests(int cgfd)
{
int passes = 0;
int fails = 0;
int i;
for (i = 0; i < ARRAY_SIZE(tests); ++i) {
if (run_test_case(cgfd, &tests[i]))
++fails;
else
++passes;
}
printf("Summary: %d PASSED, %d FAILED\n", passes, fails);
return fails ? -1 : 0;
}
int main(int argc, char **argv)
{
int cgfd = -1;
int err = 0;
if (argc < 2) {
fprintf(stderr,
"%s has to be run via %s.sh. Skip direct run.\n",
argv[0], argv[0]);
exit(err);
}
cgfd = cgroup_setup_and_join(CG_PATH);
if (cgfd < 0)
goto err;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
if (run_tests(cgfd))
goto err;
goto out;
err:
err = -1;
out:
close(cgfd);
cleanup_cgroup_environment();
return err;
}
| linux-master | tools/testing/selftests/bpf/test_sock_addr.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#define _GNU_SOURCE
#include <argp.h>
#include <string.h>
#include <stdlib.h>
#include <sched.h>
#include <pthread.h>
#include <dirent.h>
#include <signal.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/sysinfo.h>
#include <sys/stat.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
#include <libelf.h>
#include <gelf.h>
#include <float.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#endif
enum stat_id {
VERDICT,
DURATION,
TOTAL_INSNS,
TOTAL_STATES,
PEAK_STATES,
MAX_STATES_PER_INSN,
MARK_READ_MAX_LEN,
FILE_NAME,
PROG_NAME,
ALL_STATS_CNT,
NUM_STATS_CNT = FILE_NAME - VERDICT,
};
/* In comparison mode each stat can specify up to four different values:
* - A side value;
* - B side value;
* - absolute diff value;
* - relative (percentage) diff value.
*
* When specifying stat specs in comparison mode, user can use one of the
* following variant suffixes to specify which exact variant should be used for
* ordering or filtering:
* - `_a` for A side value;
* - `_b` for B side value;
* - `_diff` for absolute diff value;
* - `_pct` for relative (percentage) diff value.
*
* If no variant suffix is provided, then `_b` (control data) is assumed.
*
* As an example, let's say instructions stat has the following output:
*
* Insns (A) Insns (B) Insns (DIFF)
* --------- --------- --------------
* 21547 20920 -627 (-2.91%)
*
* Then:
* - 21547 is A side value (insns_a);
* - 20920 is B side value (insns_b);
* - -627 is absolute diff value (insns_diff);
* - -2.91% is relative diff value (insns_pct).
*
* For verdict there is no verdict_pct variant.
* For file and program name, _a and _b variants are equivalent and there are
* no _diff or _pct variants.
*/
enum stat_variant {
VARIANT_A,
VARIANT_B,
VARIANT_DIFF,
VARIANT_PCT,
};
struct verif_stats {
char *file_name;
char *prog_name;
long stats[NUM_STATS_CNT];
};
/* joined comparison mode stats */
struct verif_stats_join {
char *file_name;
char *prog_name;
const struct verif_stats *stats_a;
const struct verif_stats *stats_b;
};
struct stat_specs {
int spec_cnt;
enum stat_id ids[ALL_STATS_CNT];
enum stat_variant variants[ALL_STATS_CNT];
bool asc[ALL_STATS_CNT];
int lens[ALL_STATS_CNT * 3]; /* 3x for comparison mode */
};
enum resfmt {
RESFMT_TABLE,
RESFMT_TABLE_CALCLEN, /* fake format to pre-calculate table's column widths */
RESFMT_CSV,
};
enum filter_kind {
FILTER_NAME,
FILTER_STAT,
};
enum operator_kind {
OP_EQ, /* == or = */
OP_NEQ, /* != or <> */
OP_LT, /* < */
OP_LE, /* <= */
OP_GT, /* > */
OP_GE, /* >= */
};
struct filter {
enum filter_kind kind;
/* FILTER_NAME */
char *any_glob;
char *file_glob;
char *prog_glob;
/* FILTER_STAT */
enum operator_kind op;
int stat_id;
enum stat_variant stat_var;
long value;
};
static struct env {
char **filenames;
int filename_cnt;
bool verbose;
bool debug;
bool quiet;
bool force_checkpoints;
enum resfmt out_fmt;
bool show_version;
bool comparison_mode;
bool replay_mode;
int log_level;
int log_size;
bool log_fixed;
struct verif_stats *prog_stats;
int prog_stat_cnt;
/* baseline_stats is allocated and used only in comparison mode */
struct verif_stats *baseline_stats;
int baseline_stat_cnt;
struct verif_stats_join *join_stats;
int join_stat_cnt;
struct stat_specs output_spec;
struct stat_specs sort_spec;
struct filter *allow_filters;
struct filter *deny_filters;
int allow_filter_cnt;
int deny_filter_cnt;
int files_processed;
int files_skipped;
int progs_processed;
int progs_skipped;
} env;
static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
{
if (!env.verbose)
return 0;
if (level == LIBBPF_DEBUG && !env.debug)
return 0;
return vfprintf(stderr, format, args);
}
#ifndef VERISTAT_VERSION
#define VERISTAT_VERSION "<kernel>"
#endif
const char *argp_program_version = "veristat v" VERISTAT_VERSION;
const char *argp_program_bug_address = "<[email protected]>";
const char argp_program_doc[] =
"veristat BPF verifier stats collection and comparison tool.\n"
"\n"
"USAGE: veristat <obj-file> [<obj-file>...]\n"
" OR: veristat -C <baseline.csv> <comparison.csv>\n"
" OR: veristat -R <results.csv>\n";
enum {
OPT_LOG_FIXED = 1000,
OPT_LOG_SIZE = 1001,
};
static const struct argp_option opts[] = {
{ NULL, 'h', NULL, OPTION_HIDDEN, "Show the full help" },
{ "version", 'V', NULL, 0, "Print version" },
{ "verbose", 'v', NULL, 0, "Verbose mode" },
{ "debug", 'd', NULL, 0, "Debug mode (turns on libbpf debug logging)" },
{ "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" },
{ "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" },
{ "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" },
{ "test-states", 't', NULL, 0,
"Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" },
{ "quiet", 'q', NULL, 0, "Quiet mode" },
{ "emit", 'e', "SPEC", 0, "Specify stats to be emitted" },
{ "sort", 's', "SPEC", 0, "Specify sort order" },
{ "output-format", 'o', "FMT", 0, "Result output format (table, csv), default is table." },
{ "compare", 'C', NULL, 0, "Comparison mode" },
{ "replay", 'R', NULL, 0, "Replay mode" },
{ "filter", 'f', "FILTER", 0, "Filter expressions (or @filename for file with expressions)." },
{},
};
static int parse_stats(const char *stats_str, struct stat_specs *specs);
static int append_filter(struct filter **filters, int *cnt, const char *str);
static int append_filter_file(const char *path);
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
void *tmp;
int err;
switch (key) {
case 'h':
argp_state_help(state, stderr, ARGP_HELP_STD_HELP);
break;
case 'V':
env.show_version = true;
break;
case 'v':
env.verbose = true;
break;
case 'd':
env.debug = true;
env.verbose = true;
break;
case 'q':
env.quiet = true;
break;
case 'e':
err = parse_stats(arg, &env.output_spec);
if (err)
return err;
break;
case 's':
err = parse_stats(arg, &env.sort_spec);
if (err)
return err;
break;
case 'o':
if (strcmp(arg, "table") == 0) {
env.out_fmt = RESFMT_TABLE;
} else if (strcmp(arg, "csv") == 0) {
env.out_fmt = RESFMT_CSV;
} else {
fprintf(stderr, "Unrecognized output format '%s'\n", arg);
return -EINVAL;
}
break;
case 'l':
errno = 0;
env.log_level = strtol(arg, NULL, 10);
if (errno) {
fprintf(stderr, "invalid log level: %s\n", arg);
argp_usage(state);
}
break;
case OPT_LOG_FIXED:
env.log_fixed = true;
break;
case OPT_LOG_SIZE:
errno = 0;
env.log_size = strtol(arg, NULL, 10);
if (errno) {
fprintf(stderr, "invalid log size: %s\n", arg);
argp_usage(state);
}
break;
case 't':
env.force_checkpoints = true;
break;
case 'C':
env.comparison_mode = true;
break;
case 'R':
env.replay_mode = true;
break;
case 'f':
if (arg[0] == '@')
err = append_filter_file(arg + 1);
else if (arg[0] == '!')
err = append_filter(&env.deny_filters, &env.deny_filter_cnt, arg + 1);
else
err = append_filter(&env.allow_filters, &env.allow_filter_cnt, arg);
if (err) {
fprintf(stderr, "Failed to collect program filter expressions: %d\n", err);
return err;
}
break;
case ARGP_KEY_ARG:
tmp = realloc(env.filenames, (env.filename_cnt + 1) * sizeof(*env.filenames));
if (!tmp)
return -ENOMEM;
env.filenames = tmp;
env.filenames[env.filename_cnt] = strdup(arg);
if (!env.filenames[env.filename_cnt])
return -ENOMEM;
env.filename_cnt++;
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
static const struct argp argp = {
.options = opts,
.parser = parse_arg,
.doc = argp_program_doc,
};
/* Adapted from perf/util/string.c */
static bool glob_matches(const char *str, const char *pat)
{
while (*str && *pat && *pat != '*') {
if (*str != *pat)
return false;
str++;
pat++;
}
/* Check wild card */
if (*pat == '*') {
while (*pat == '*')
pat++;
if (!*pat) /* Tail wild card matches all */
return true;
while (*str)
if (glob_matches(str++, pat))
return true;
}
return !*str && !*pat;
}
static bool is_bpf_obj_file(const char *path) {
Elf64_Ehdr *ehdr;
int fd, err = -EINVAL;
Elf *elf = NULL;
fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd < 0)
return true; /* we'll fail later and propagate error */
/* ensure libelf is initialized */
(void)elf_version(EV_CURRENT);
elf = elf_begin(fd, ELF_C_READ, NULL);
if (!elf)
goto cleanup;
if (elf_kind(elf) != ELF_K_ELF || gelf_getclass(elf) != ELFCLASS64)
goto cleanup;
ehdr = elf64_getehdr(elf);
/* Old LLVM set e_machine to EM_NONE */
if (!ehdr || ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF))
goto cleanup;
err = 0;
cleanup:
if (elf)
elf_end(elf);
close(fd);
return err == 0;
}
static bool should_process_file_prog(const char *filename, const char *prog_name)
{
struct filter *f;
int i, allow_cnt = 0;
for (i = 0; i < env.deny_filter_cnt; i++) {
f = &env.deny_filters[i];
if (f->kind != FILTER_NAME)
continue;
if (f->any_glob && glob_matches(filename, f->any_glob))
return false;
if (f->any_glob && prog_name && glob_matches(prog_name, f->any_glob))
return false;
if (f->file_glob && glob_matches(filename, f->file_glob))
return false;
if (f->prog_glob && prog_name && glob_matches(prog_name, f->prog_glob))
return false;
}
for (i = 0; i < env.allow_filter_cnt; i++) {
f = &env.allow_filters[i];
if (f->kind != FILTER_NAME)
continue;
allow_cnt++;
if (f->any_glob) {
if (glob_matches(filename, f->any_glob))
return true;
/* If we don't know program name yet, any_glob filter
* has to assume that current BPF object file might be
* relevant; we'll check again later on after opening
* BPF object file, at which point program name will
* be known finally.
*/
if (!prog_name || glob_matches(prog_name, f->any_glob))
return true;
} else {
if (f->file_glob && !glob_matches(filename, f->file_glob))
continue;
if (f->prog_glob && prog_name && !glob_matches(prog_name, f->prog_glob))
continue;
return true;
}
}
/* if there are no file/prog name allow filters, allow all progs,
* unless they are denied earlier explicitly
*/
return allow_cnt == 0;
}
static struct {
enum operator_kind op_kind;
const char *op_str;
} operators[] = {
/* Order of these definitions matter to avoid situations like '<'
* matching part of what is actually a '<>' operator. That is,
* substrings should go last.
*/
{ OP_EQ, "==" },
{ OP_NEQ, "!=" },
{ OP_NEQ, "<>" },
{ OP_LE, "<=" },
{ OP_LT, "<" },
{ OP_GE, ">=" },
{ OP_GT, ">" },
{ OP_EQ, "=" },
};
static bool parse_stat_id_var(const char *name, size_t len, int *id, enum stat_variant *var);
static int append_filter(struct filter **filters, int *cnt, const char *str)
{
struct filter *f;
void *tmp;
const char *p;
int i;
tmp = realloc(*filters, (*cnt + 1) * sizeof(**filters));
if (!tmp)
return -ENOMEM;
*filters = tmp;
f = &(*filters)[*cnt];
memset(f, 0, sizeof(*f));
/* First, let's check if it's a stats filter of the following form:
* <stat><op><value, where:
* - <stat> is one of supported numerical stats (verdict is also
* considered numerical, failure == 0, success == 1);
* - <op> is comparison operator (see `operators` definitions);
* - <value> is an integer (or failure/success, or false/true as
* special aliases for 0 and 1, respectively).
* If the form doesn't match what user provided, we assume file/prog
* glob filter.
*/
for (i = 0; i < ARRAY_SIZE(operators); i++) {
enum stat_variant var;
int id;
long val;
const char *end = str;
const char *op_str;
op_str = operators[i].op_str;
p = strstr(str, op_str);
if (!p)
continue;
if (!parse_stat_id_var(str, p - str, &id, &var)) {
fprintf(stderr, "Unrecognized stat name in '%s'!\n", str);
return -EINVAL;
}
if (id >= FILE_NAME) {
fprintf(stderr, "Non-integer stat is specified in '%s'!\n", str);
return -EINVAL;
}
p += strlen(op_str);
if (strcasecmp(p, "true") == 0 ||
strcasecmp(p, "t") == 0 ||
strcasecmp(p, "success") == 0 ||
strcasecmp(p, "succ") == 0 ||
strcasecmp(p, "s") == 0 ||
strcasecmp(p, "match") == 0 ||
strcasecmp(p, "m") == 0) {
val = 1;
} else if (strcasecmp(p, "false") == 0 ||
strcasecmp(p, "f") == 0 ||
strcasecmp(p, "failure") == 0 ||
strcasecmp(p, "fail") == 0 ||
strcasecmp(p, "mismatch") == 0 ||
strcasecmp(p, "mis") == 0) {
val = 0;
} else {
errno = 0;
val = strtol(p, (char **)&end, 10);
if (errno || end == p || *end != '\0' ) {
fprintf(stderr, "Invalid integer value in '%s'!\n", str);
return -EINVAL;
}
}
f->kind = FILTER_STAT;
f->stat_id = id;
f->stat_var = var;
f->op = operators[i].op_kind;
f->value = val;
*cnt += 1;
return 0;
}
/* File/prog filter can be specified either as '<glob>' or
* '<file-glob>/<prog-glob>'. In the former case <glob> is applied to
* both file and program names. This seems to be way more useful in
* practice. If user needs full control, they can use '/<prog-glob>'
* form to glob just program name, or '<file-glob>/' to glob only file
* name. But usually common <glob> seems to be the most useful and
* ergonomic way.
*/
f->kind = FILTER_NAME;
p = strchr(str, '/');
if (!p) {
f->any_glob = strdup(str);
if (!f->any_glob)
return -ENOMEM;
} else {
if (str != p) {
/* non-empty file glob */
f->file_glob = strndup(str, p - str);
if (!f->file_glob)
return -ENOMEM;
}
if (strlen(p + 1) > 0) {
/* non-empty prog glob */
f->prog_glob = strdup(p + 1);
if (!f->prog_glob) {
free(f->file_glob);
f->file_glob = NULL;
return -ENOMEM;
}
}
}
*cnt += 1;
return 0;
}
static int append_filter_file(const char *path)
{
char buf[1024];
FILE *f;
int err = 0;
f = fopen(path, "r");
if (!f) {
err = -errno;
fprintf(stderr, "Failed to open filters in '%s': %d\n", path, err);
return err;
}
while (fscanf(f, " %1023[^\n]\n", buf) == 1) {
/* lines starting with # are comments, skip them */
if (buf[0] == '\0' || buf[0] == '#')
continue;
/* lines starting with ! are negative match filters */
if (buf[0] == '!')
err = append_filter(&env.deny_filters, &env.deny_filter_cnt, buf + 1);
else
err = append_filter(&env.allow_filters, &env.allow_filter_cnt, buf);
if (err)
goto cleanup;
}
cleanup:
fclose(f);
return err;
}
static const struct stat_specs default_output_spec = {
.spec_cnt = 7,
.ids = {
FILE_NAME, PROG_NAME, VERDICT, DURATION,
TOTAL_INSNS, TOTAL_STATES, PEAK_STATES,
},
};
static const struct stat_specs default_csv_output_spec = {
.spec_cnt = 9,
.ids = {
FILE_NAME, PROG_NAME, VERDICT, DURATION,
TOTAL_INSNS, TOTAL_STATES, PEAK_STATES,
MAX_STATES_PER_INSN, MARK_READ_MAX_LEN,
},
};
static const struct stat_specs default_sort_spec = {
.spec_cnt = 2,
.ids = {
FILE_NAME, PROG_NAME,
},
.asc = { true, true, },
};
/* sorting for comparison mode to join two data sets */
static const struct stat_specs join_sort_spec = {
.spec_cnt = 2,
.ids = {
FILE_NAME, PROG_NAME,
},
.asc = { true, true, },
};
static struct stat_def {
const char *header;
const char *names[4];
bool asc_by_default;
bool left_aligned;
} stat_defs[] = {
[FILE_NAME] = { "File", {"file_name", "filename", "file"}, true /* asc */, true /* left */ },
[PROG_NAME] = { "Program", {"prog_name", "progname", "prog"}, true /* asc */, true /* left */ },
[VERDICT] = { "Verdict", {"verdict"}, true /* asc: failure, success */, true /* left */ },
[DURATION] = { "Duration (us)", {"duration", "dur"}, },
[TOTAL_INSNS] = { "Insns", {"total_insns", "insns"}, },
[TOTAL_STATES] = { "States", {"total_states", "states"}, },
[PEAK_STATES] = { "Peak states", {"peak_states"}, },
[MAX_STATES_PER_INSN] = { "Max states per insn", {"max_states_per_insn"}, },
[MARK_READ_MAX_LEN] = { "Max mark read length", {"max_mark_read_len", "mark_read"}, },
};
static bool parse_stat_id_var(const char *name, size_t len, int *id, enum stat_variant *var)
{
static const char *var_sfxs[] = {
[VARIANT_A] = "_a",
[VARIANT_B] = "_b",
[VARIANT_DIFF] = "_diff",
[VARIANT_PCT] = "_pct",
};
int i, j, k;
for (i = 0; i < ARRAY_SIZE(stat_defs); i++) {
struct stat_def *def = &stat_defs[i];
size_t alias_len, sfx_len;
const char *alias;
for (j = 0; j < ARRAY_SIZE(stat_defs[i].names); j++) {
alias = def->names[j];
if (!alias)
continue;
alias_len = strlen(alias);
if (strncmp(name, alias, alias_len) != 0)
continue;
if (alias_len == len) {
/* If no variant suffix is specified, we
* assume control group (just in case we are
* in comparison mode. Variant is ignored in
* non-comparison mode.
*/
*var = VARIANT_B;
*id = i;
return true;
}
for (k = 0; k < ARRAY_SIZE(var_sfxs); k++) {
sfx_len = strlen(var_sfxs[k]);
if (alias_len + sfx_len != len)
continue;
if (strncmp(name + alias_len, var_sfxs[k], sfx_len) == 0) {
*var = (enum stat_variant)k;
*id = i;
return true;
}
}
}
}
return false;
}
static bool is_asc_sym(char c)
{
return c == '^';
}
static bool is_desc_sym(char c)
{
return c == 'v' || c == 'V' || c == '.' || c == '!' || c == '_';
}
static int parse_stat(const char *stat_name, struct stat_specs *specs)
{
int id;
bool has_order = false, is_asc = false;
size_t len = strlen(stat_name);
enum stat_variant var;
if (specs->spec_cnt >= ARRAY_SIZE(specs->ids)) {
fprintf(stderr, "Can't specify more than %zd stats\n", ARRAY_SIZE(specs->ids));
return -E2BIG;
}
if (len > 1 && (is_asc_sym(stat_name[len - 1]) || is_desc_sym(stat_name[len - 1]))) {
has_order = true;
is_asc = is_asc_sym(stat_name[len - 1]);
len -= 1;
}
if (!parse_stat_id_var(stat_name, len, &id, &var)) {
fprintf(stderr, "Unrecognized stat name '%s'\n", stat_name);
return -ESRCH;
}
specs->ids[specs->spec_cnt] = id;
specs->variants[specs->spec_cnt] = var;
specs->asc[specs->spec_cnt] = has_order ? is_asc : stat_defs[id].asc_by_default;
specs->spec_cnt++;
return 0;
}
static int parse_stats(const char *stats_str, struct stat_specs *specs)
{
char *input, *state = NULL, *next;
int err;
input = strdup(stats_str);
if (!input)
return -ENOMEM;
while ((next = strtok_r(state ? NULL : input, ",", &state))) {
err = parse_stat(next, specs);
if (err)
return err;
}
return 0;
}
static void free_verif_stats(struct verif_stats *stats, size_t stat_cnt)
{
int i;
if (!stats)
return;
for (i = 0; i < stat_cnt; i++) {
free(stats[i].file_name);
free(stats[i].prog_name);
}
free(stats);
}
static char verif_log_buf[64 * 1024];
#define MAX_PARSED_LOG_LINES 100
static int parse_verif_log(char * const buf, size_t buf_sz, struct verif_stats *s)
{
const char *cur;
int pos, lines;
buf[buf_sz - 1] = '\0';
for (pos = strlen(buf) - 1, lines = 0; pos >= 0 && lines < MAX_PARSED_LOG_LINES; lines++) {
/* find previous endline or otherwise take the start of log buf */
for (cur = &buf[pos]; cur > buf && cur[0] != '\n'; cur--, pos--) {
}
/* next time start from end of previous line (or pos goes to <0) */
pos--;
/* if we found endline, point right after endline symbol;
* otherwise, stay at the beginning of log buf
*/
if (cur[0] == '\n')
cur++;
if (1 == sscanf(cur, "verification time %ld usec\n", &s->stats[DURATION]))
continue;
if (6 == sscanf(cur, "processed %ld insns (limit %*d) max_states_per_insn %ld total_states %ld peak_states %ld mark_read %ld",
&s->stats[TOTAL_INSNS],
&s->stats[MAX_STATES_PER_INSN],
&s->stats[TOTAL_STATES],
&s->stats[PEAK_STATES],
&s->stats[MARK_READ_MAX_LEN]))
continue;
}
return 0;
}
static int guess_prog_type_by_ctx_name(const char *ctx_name,
enum bpf_prog_type *prog_type,
enum bpf_attach_type *attach_type)
{
/* We need to guess program type based on its declared context type.
* This guess can't be perfect as many different program types might
* share the same context type. So we can only hope to reasonably
* well guess this and get lucky.
*
* Just in case, we support both UAPI-side type names and
* kernel-internal names.
*/
static struct {
const char *uapi_name;
const char *kern_name;
enum bpf_prog_type prog_type;
enum bpf_attach_type attach_type;
} ctx_map[] = {
/* __sk_buff is most ambiguous, we assume TC program */
{ "__sk_buff", "sk_buff", BPF_PROG_TYPE_SCHED_CLS },
{ "bpf_sock", "sock", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND },
{ "bpf_sock_addr", "bpf_sock_addr_kern", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND },
{ "bpf_sock_ops", "bpf_sock_ops_kern", BPF_PROG_TYPE_SOCK_OPS, BPF_CGROUP_SOCK_OPS },
{ "sk_msg_md", "sk_msg", BPF_PROG_TYPE_SK_MSG, BPF_SK_MSG_VERDICT },
{ "bpf_cgroup_dev_ctx", "bpf_cgroup_dev_ctx", BPF_PROG_TYPE_CGROUP_DEVICE, BPF_CGROUP_DEVICE },
{ "bpf_sysctl", "bpf_sysctl_kern", BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL },
{ "bpf_sockopt", "bpf_sockopt_kern", BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT },
{ "sk_reuseport_md", "sk_reuseport_kern", BPF_PROG_TYPE_SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE },
{ "bpf_sk_lookup", "bpf_sk_lookup_kern", BPF_PROG_TYPE_SK_LOOKUP, BPF_SK_LOOKUP },
{ "xdp_md", "xdp_buff", BPF_PROG_TYPE_XDP, BPF_XDP },
/* tracing types with no expected attach type */
{ "bpf_user_pt_regs_t", "pt_regs", BPF_PROG_TYPE_KPROBE },
{ "bpf_perf_event_data", "bpf_perf_event_data_kern", BPF_PROG_TYPE_PERF_EVENT },
/* raw_tp programs use u64[] from kernel side, we don't want
* to match on that, probably; so NULL for kern-side type
*/
{ "bpf_raw_tracepoint_args", NULL, BPF_PROG_TYPE_RAW_TRACEPOINT },
};
int i;
if (!ctx_name)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ctx_map); i++) {
if (strcmp(ctx_map[i].uapi_name, ctx_name) == 0 ||
(ctx_map[i].kern_name && strcmp(ctx_map[i].kern_name, ctx_name) == 0)) {
*prog_type = ctx_map[i].prog_type;
*attach_type = ctx_map[i].attach_type;
return 0;
}
}
return -ESRCH;
}
static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const char *filename)
{
struct bpf_map *map;
bpf_object__for_each_map(map, obj) {
/* disable pinning */
bpf_map__set_pin_path(map, NULL);
/* fix up map size, if necessary */
switch (bpf_map__type(map)) {
case BPF_MAP_TYPE_SK_STORAGE:
case BPF_MAP_TYPE_TASK_STORAGE:
case BPF_MAP_TYPE_INODE_STORAGE:
case BPF_MAP_TYPE_CGROUP_STORAGE:
break;
default:
if (bpf_map__max_entries(map) == 0)
bpf_map__set_max_entries(map, 1);
}
}
/* SEC(freplace) programs can't be loaded with veristat as is,
* but we can try guessing their target program's expected type by
* looking at the type of program's first argument and substituting
* corresponding program type
*/
if (bpf_program__type(prog) == BPF_PROG_TYPE_EXT) {
const struct btf *btf = bpf_object__btf(obj);
const char *prog_name = bpf_program__name(prog);
enum bpf_prog_type prog_type;
enum bpf_attach_type attach_type;
const struct btf_type *t;
const char *ctx_name;
int id;
if (!btf)
goto skip_freplace_fixup;
id = btf__find_by_name_kind(btf, prog_name, BTF_KIND_FUNC);
t = btf__type_by_id(btf, id);
t = btf__type_by_id(btf, t->type);
if (!btf_is_func_proto(t) || btf_vlen(t) != 1)
goto skip_freplace_fixup;
/* context argument is a pointer to a struct/typedef */
t = btf__type_by_id(btf, btf_params(t)[0].type);
while (t && btf_is_mod(t))
t = btf__type_by_id(btf, t->type);
if (!t || !btf_is_ptr(t))
goto skip_freplace_fixup;
t = btf__type_by_id(btf, t->type);
while (t && btf_is_mod(t))
t = btf__type_by_id(btf, t->type);
if (!t)
goto skip_freplace_fixup;
ctx_name = btf__name_by_offset(btf, t->name_off);
if (guess_prog_type_by_ctx_name(ctx_name, &prog_type, &attach_type) == 0) {
bpf_program__set_type(prog, prog_type);
bpf_program__set_expected_attach_type(prog, attach_type);
if (!env.quiet) {
printf("Using guessed program type '%s' for %s/%s...\n",
libbpf_bpf_prog_type_str(prog_type),
filename, prog_name);
}
} else {
if (!env.quiet) {
printf("Failed to guess program type for freplace program with context type name '%s' for %s/%s. Consider using canonical type names to help veristat...\n",
ctx_name, filename, prog_name);
}
}
}
skip_freplace_fixup:
return;
}
static int process_prog(const char *filename, struct bpf_object *obj, struct bpf_program *prog)
{
const char *prog_name = bpf_program__name(prog);
const char *base_filename = basename(filename);
char *buf;
int buf_sz, log_level;
struct verif_stats *stats;
int err = 0;
void *tmp;
if (!should_process_file_prog(base_filename, bpf_program__name(prog))) {
env.progs_skipped++;
return 0;
}
tmp = realloc(env.prog_stats, (env.prog_stat_cnt + 1) * sizeof(*env.prog_stats));
if (!tmp)
return -ENOMEM;
env.prog_stats = tmp;
stats = &env.prog_stats[env.prog_stat_cnt++];
memset(stats, 0, sizeof(*stats));
if (env.verbose) {
buf_sz = env.log_size ? env.log_size : 16 * 1024 * 1024;
buf = malloc(buf_sz);
if (!buf)
return -ENOMEM;
/* ensure we always request stats */
log_level = env.log_level | 4 | (env.log_fixed ? 8 : 0);
} else {
buf = verif_log_buf;
buf_sz = sizeof(verif_log_buf);
/* request only verifier stats */
log_level = 4 | (env.log_fixed ? 8 : 0);
}
verif_log_buf[0] = '\0';
bpf_program__set_log_buf(prog, buf, buf_sz);
bpf_program__set_log_level(prog, log_level);
/* increase chances of successful BPF object loading */
fixup_obj(obj, prog, base_filename);
if (env.force_checkpoints)
bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_STATE_FREQ);
err = bpf_object__load(obj);
env.progs_processed++;
stats->file_name = strdup(base_filename);
stats->prog_name = strdup(bpf_program__name(prog));
stats->stats[VERDICT] = err == 0; /* 1 - success, 0 - failure */
parse_verif_log(buf, buf_sz, stats);
if (env.verbose) {
printf("PROCESSING %s/%s, DURATION US: %ld, VERDICT: %s, VERIFIER LOG:\n%s\n",
filename, prog_name, stats->stats[DURATION],
err ? "failure" : "success", buf);
}
if (verif_log_buf != buf)
free(buf);
return 0;
};
static int process_obj(const char *filename)
{
struct bpf_object *obj = NULL, *tobj;
struct bpf_program *prog, *tprog, *lprog;
libbpf_print_fn_t old_libbpf_print_fn;
LIBBPF_OPTS(bpf_object_open_opts, opts);
int err = 0, prog_cnt = 0;
if (!should_process_file_prog(basename(filename), NULL)) {
if (env.verbose)
printf("Skipping '%s' due to filters...\n", filename);
env.files_skipped++;
return 0;
}
if (!is_bpf_obj_file(filename)) {
if (env.verbose)
printf("Skipping '%s' as it's not a BPF object file...\n", filename);
env.files_skipped++;
return 0;
}
if (!env.quiet && env.out_fmt == RESFMT_TABLE)
printf("Processing '%s'...\n", basename(filename));
old_libbpf_print_fn = libbpf_set_print(libbpf_print_fn);
obj = bpf_object__open_file(filename, &opts);
if (!obj) {
/* if libbpf can't open BPF object file, it could be because
* that BPF object file is incomplete and has to be statically
* linked into a final BPF object file; instead of bailing
* out, report it into stderr, mark it as skipped, and
* proceed
*/
fprintf(stderr, "Failed to open '%s': %d\n", filename, -errno);
env.files_skipped++;
err = 0;
goto cleanup;
}
env.files_processed++;
bpf_object__for_each_program(prog, obj) {
prog_cnt++;
}
if (prog_cnt == 1) {
prog = bpf_object__next_program(obj, NULL);
bpf_program__set_autoload(prog, true);
process_prog(filename, obj, prog);
goto cleanup;
}
bpf_object__for_each_program(prog, obj) {
const char *prog_name = bpf_program__name(prog);
tobj = bpf_object__open_file(filename, &opts);
if (!tobj) {
err = -errno;
fprintf(stderr, "Failed to open '%s': %d\n", filename, err);
goto cleanup;
}
lprog = NULL;
bpf_object__for_each_program(tprog, tobj) {
const char *tprog_name = bpf_program__name(tprog);
if (strcmp(prog_name, tprog_name) == 0) {
bpf_program__set_autoload(tprog, true);
lprog = tprog;
} else {
bpf_program__set_autoload(tprog, false);
}
}
process_prog(filename, tobj, lprog);
bpf_object__close(tobj);
}
cleanup:
bpf_object__close(obj);
libbpf_set_print(old_libbpf_print_fn);
return err;
}
static int cmp_stat(const struct verif_stats *s1, const struct verif_stats *s2,
enum stat_id id, bool asc)
{
int cmp = 0;
switch (id) {
case FILE_NAME:
cmp = strcmp(s1->file_name, s2->file_name);
break;
case PROG_NAME:
cmp = strcmp(s1->prog_name, s2->prog_name);
break;
case VERDICT:
case DURATION:
case TOTAL_INSNS:
case TOTAL_STATES:
case PEAK_STATES:
case MAX_STATES_PER_INSN:
case MARK_READ_MAX_LEN: {
long v1 = s1->stats[id];
long v2 = s2->stats[id];
if (v1 != v2)
cmp = v1 < v2 ? -1 : 1;
break;
}
default:
fprintf(stderr, "Unrecognized stat #%d\n", id);
exit(1);
}
return asc ? cmp : -cmp;
}
static int cmp_prog_stats(const void *v1, const void *v2)
{
const struct verif_stats *s1 = v1, *s2 = v2;
int i, cmp;
for (i = 0; i < env.sort_spec.spec_cnt; i++) {
cmp = cmp_stat(s1, s2, env.sort_spec.ids[i], env.sort_spec.asc[i]);
if (cmp != 0)
return cmp;
}
/* always disambiguate with file+prog, which are unique */
cmp = strcmp(s1->file_name, s2->file_name);
if (cmp != 0)
return cmp;
return strcmp(s1->prog_name, s2->prog_name);
}
static void fetch_join_stat_value(const struct verif_stats_join *s,
enum stat_id id, enum stat_variant var,
const char **str_val,
double *num_val)
{
long v1, v2;
if (id == FILE_NAME) {
*str_val = s->file_name;
return;
}
if (id == PROG_NAME) {
*str_val = s->prog_name;
return;
}
v1 = s->stats_a ? s->stats_a->stats[id] : 0;
v2 = s->stats_b ? s->stats_b->stats[id] : 0;
switch (var) {
case VARIANT_A:
if (!s->stats_a)
*num_val = -DBL_MAX;
else
*num_val = s->stats_a->stats[id];
return;
case VARIANT_B:
if (!s->stats_b)
*num_val = -DBL_MAX;
else
*num_val = s->stats_b->stats[id];
return;
case VARIANT_DIFF:
if (!s->stats_a || !s->stats_b)
*num_val = -DBL_MAX;
else if (id == VERDICT)
*num_val = v1 == v2 ? 1.0 /* MATCH */ : 0.0 /* MISMATCH */;
else
*num_val = (double)(v2 - v1);
return;
case VARIANT_PCT:
if (!s->stats_a || !s->stats_b) {
*num_val = -DBL_MAX;
} else if (v1 == 0) {
if (v1 == v2)
*num_val = 0.0;
else
*num_val = v2 < v1 ? -100.0 : 100.0;
} else {
*num_val = (v2 - v1) * 100.0 / v1;
}
return;
}
}
static int cmp_join_stat(const struct verif_stats_join *s1,
const struct verif_stats_join *s2,
enum stat_id id, enum stat_variant var, bool asc)
{
const char *str1 = NULL, *str2 = NULL;
double v1, v2;
int cmp = 0;
fetch_join_stat_value(s1, id, var, &str1, &v1);
fetch_join_stat_value(s2, id, var, &str2, &v2);
if (str1)
cmp = strcmp(str1, str2);
else if (v1 != v2)
cmp = v1 < v2 ? -1 : 1;
return asc ? cmp : -cmp;
}
static int cmp_join_stats(const void *v1, const void *v2)
{
const struct verif_stats_join *s1 = v1, *s2 = v2;
int i, cmp;
for (i = 0; i < env.sort_spec.spec_cnt; i++) {
cmp = cmp_join_stat(s1, s2,
env.sort_spec.ids[i],
env.sort_spec.variants[i],
env.sort_spec.asc[i]);
if (cmp != 0)
return cmp;
}
/* always disambiguate with file+prog, which are unique */
cmp = strcmp(s1->file_name, s2->file_name);
if (cmp != 0)
return cmp;
return strcmp(s1->prog_name, s2->prog_name);
}
#define HEADER_CHAR '-'
#define COLUMN_SEP " "
static void output_header_underlines(void)
{
int i, j, len;
for (i = 0; i < env.output_spec.spec_cnt; i++) {
len = env.output_spec.lens[i];
printf("%s", i == 0 ? "" : COLUMN_SEP);
for (j = 0; j < len; j++)
printf("%c", HEADER_CHAR);
}
printf("\n");
}
static void output_headers(enum resfmt fmt)
{
const char *fmt_str;
int i, len;
for (i = 0; i < env.output_spec.spec_cnt; i++) {
int id = env.output_spec.ids[i];
int *max_len = &env.output_spec.lens[i];
switch (fmt) {
case RESFMT_TABLE_CALCLEN:
len = snprintf(NULL, 0, "%s", stat_defs[id].header);
if (len > *max_len)
*max_len = len;
break;
case RESFMT_TABLE:
fmt_str = stat_defs[id].left_aligned ? "%s%-*s" : "%s%*s";
printf(fmt_str, i == 0 ? "" : COLUMN_SEP, *max_len, stat_defs[id].header);
if (i == env.output_spec.spec_cnt - 1)
printf("\n");
break;
case RESFMT_CSV:
printf("%s%s", i == 0 ? "" : ",", stat_defs[id].names[0]);
if (i == env.output_spec.spec_cnt - 1)
printf("\n");
break;
}
}
if (fmt == RESFMT_TABLE)
output_header_underlines();
}
static void prepare_value(const struct verif_stats *s, enum stat_id id,
const char **str, long *val)
{
switch (id) {
case FILE_NAME:
*str = s ? s->file_name : "N/A";
break;
case PROG_NAME:
*str = s ? s->prog_name : "N/A";
break;
case VERDICT:
if (!s)
*str = "N/A";
else
*str = s->stats[VERDICT] ? "success" : "failure";
break;
case DURATION:
case TOTAL_INSNS:
case TOTAL_STATES:
case PEAK_STATES:
case MAX_STATES_PER_INSN:
case MARK_READ_MAX_LEN:
*val = s ? s->stats[id] : 0;
break;
default:
fprintf(stderr, "Unrecognized stat #%d\n", id);
exit(1);
}
}
static void output_stats(const struct verif_stats *s, enum resfmt fmt, bool last)
{
int i;
for (i = 0; i < env.output_spec.spec_cnt; i++) {
int id = env.output_spec.ids[i];
int *max_len = &env.output_spec.lens[i], len;
const char *str = NULL;
long val = 0;
prepare_value(s, id, &str, &val);
switch (fmt) {
case RESFMT_TABLE_CALCLEN:
if (str)
len = snprintf(NULL, 0, "%s", str);
else
len = snprintf(NULL, 0, "%ld", val);
if (len > *max_len)
*max_len = len;
break;
case RESFMT_TABLE:
if (str)
printf("%s%-*s", i == 0 ? "" : COLUMN_SEP, *max_len, str);
else
printf("%s%*ld", i == 0 ? "" : COLUMN_SEP, *max_len, val);
if (i == env.output_spec.spec_cnt - 1)
printf("\n");
break;
case RESFMT_CSV:
if (str)
printf("%s%s", i == 0 ? "" : ",", str);
else
printf("%s%ld", i == 0 ? "" : ",", val);
if (i == env.output_spec.spec_cnt - 1)
printf("\n");
break;
}
}
if (last && fmt == RESFMT_TABLE) {
output_header_underlines();
printf("Done. Processed %d files, %d programs. Skipped %d files, %d programs.\n",
env.files_processed, env.files_skipped, env.progs_processed, env.progs_skipped);
}
}
static int parse_stat_value(const char *str, enum stat_id id, struct verif_stats *st)
{
switch (id) {
case FILE_NAME:
st->file_name = strdup(str);
if (!st->file_name)
return -ENOMEM;
break;
case PROG_NAME:
st->prog_name = strdup(str);
if (!st->prog_name)
return -ENOMEM;
break;
case VERDICT:
if (strcmp(str, "success") == 0) {
st->stats[VERDICT] = true;
} else if (strcmp(str, "failure") == 0) {
st->stats[VERDICT] = false;
} else {
fprintf(stderr, "Unrecognized verification verdict '%s'\n", str);
return -EINVAL;
}
break;
case DURATION:
case TOTAL_INSNS:
case TOTAL_STATES:
case PEAK_STATES:
case MAX_STATES_PER_INSN:
case MARK_READ_MAX_LEN: {
long val;
int err, n;
if (sscanf(str, "%ld %n", &val, &n) != 1 || n != strlen(str)) {
err = -errno;
fprintf(stderr, "Failed to parse '%s' as integer\n", str);
return err;
}
st->stats[id] = val;
break;
}
default:
fprintf(stderr, "Unrecognized stat #%d\n", id);
return -EINVAL;
}
return 0;
}
static int parse_stats_csv(const char *filename, struct stat_specs *specs,
struct verif_stats **statsp, int *stat_cntp)
{
char line[4096];
FILE *f;
int err = 0;
bool header = true;
f = fopen(filename, "r");
if (!f) {
err = -errno;
fprintf(stderr, "Failed to open '%s': %d\n", filename, err);
return err;
}
*stat_cntp = 0;
while (fgets(line, sizeof(line), f)) {
char *input = line, *state = NULL, *next;
struct verif_stats *st = NULL;
int col = 0;
if (!header) {
void *tmp;
tmp = realloc(*statsp, (*stat_cntp + 1) * sizeof(**statsp));
if (!tmp) {
err = -ENOMEM;
goto cleanup;
}
*statsp = tmp;
st = &(*statsp)[*stat_cntp];
memset(st, 0, sizeof(*st));
*stat_cntp += 1;
}
while ((next = strtok_r(state ? NULL : input, ",\n", &state))) {
if (header) {
/* for the first line, set up spec stats */
err = parse_stat(next, specs);
if (err)
goto cleanup;
continue;
}
/* for all other lines, parse values based on spec */
if (col >= specs->spec_cnt) {
fprintf(stderr, "Found extraneous column #%d in row #%d of '%s'\n",
col, *stat_cntp, filename);
err = -EINVAL;
goto cleanup;
}
err = parse_stat_value(next, specs->ids[col], st);
if (err)
goto cleanup;
col++;
}
if (header) {
header = false;
continue;
}
if (col < specs->spec_cnt) {
fprintf(stderr, "Not enough columns in row #%d in '%s'\n",
*stat_cntp, filename);
err = -EINVAL;
goto cleanup;
}
if (!st->file_name || !st->prog_name) {
fprintf(stderr, "Row #%d in '%s' is missing file and/or program name\n",
*stat_cntp, filename);
err = -EINVAL;
goto cleanup;
}
/* in comparison mode we can only check filters after we
* parsed entire line; if row should be ignored we pretend we
* never parsed it
*/
if (!should_process_file_prog(st->file_name, st->prog_name)) {
free(st->file_name);
free(st->prog_name);
*stat_cntp -= 1;
}
}
if (!feof(f)) {
err = -errno;
fprintf(stderr, "Failed I/O for '%s': %d\n", filename, err);
}
cleanup:
fclose(f);
return err;
}
/* empty/zero stats for mismatched rows */
static const struct verif_stats fallback_stats = { .file_name = "", .prog_name = "" };
static bool is_key_stat(enum stat_id id)
{
return id == FILE_NAME || id == PROG_NAME;
}
static void output_comp_header_underlines(void)
{
int i, j, k;
for (i = 0; i < env.output_spec.spec_cnt; i++) {
int id = env.output_spec.ids[i];
int max_j = is_key_stat(id) ? 1 : 3;
for (j = 0; j < max_j; j++) {
int len = env.output_spec.lens[3 * i + j];
printf("%s", i + j == 0 ? "" : COLUMN_SEP);
for (k = 0; k < len; k++)
printf("%c", HEADER_CHAR);
}
}
printf("\n");
}
static void output_comp_headers(enum resfmt fmt)
{
static const char *table_sfxs[3] = {" (A)", " (B)", " (DIFF)"};
static const char *name_sfxs[3] = {"_base", "_comp", "_diff"};
int i, j, len;
for (i = 0; i < env.output_spec.spec_cnt; i++) {
int id = env.output_spec.ids[i];
/* key stats don't have A/B/DIFF columns, they are common for both data sets */
int max_j = is_key_stat(id) ? 1 : 3;
for (j = 0; j < max_j; j++) {
int *max_len = &env.output_spec.lens[3 * i + j];
bool last = (i == env.output_spec.spec_cnt - 1) && (j == max_j - 1);
const char *sfx;
switch (fmt) {
case RESFMT_TABLE_CALCLEN:
sfx = is_key_stat(id) ? "" : table_sfxs[j];
len = snprintf(NULL, 0, "%s%s", stat_defs[id].header, sfx);
if (len > *max_len)
*max_len = len;
break;
case RESFMT_TABLE:
sfx = is_key_stat(id) ? "" : table_sfxs[j];
printf("%s%-*s%s", i + j == 0 ? "" : COLUMN_SEP,
*max_len - (int)strlen(sfx), stat_defs[id].header, sfx);
if (last)
printf("\n");
break;
case RESFMT_CSV:
sfx = is_key_stat(id) ? "" : name_sfxs[j];
printf("%s%s%s", i + j == 0 ? "" : ",", stat_defs[id].names[0], sfx);
if (last)
printf("\n");
break;
}
}
}
if (fmt == RESFMT_TABLE)
output_comp_header_underlines();
}
static void output_comp_stats(const struct verif_stats_join *join_stats,
enum resfmt fmt, bool last)
{
const struct verif_stats *base = join_stats->stats_a;
const struct verif_stats *comp = join_stats->stats_b;
char base_buf[1024] = {}, comp_buf[1024] = {}, diff_buf[1024] = {};
int i;
for (i = 0; i < env.output_spec.spec_cnt; i++) {
int id = env.output_spec.ids[i], len;
int *max_len_base = &env.output_spec.lens[3 * i + 0];
int *max_len_comp = &env.output_spec.lens[3 * i + 1];
int *max_len_diff = &env.output_spec.lens[3 * i + 2];
const char *base_str = NULL, *comp_str = NULL;
long base_val = 0, comp_val = 0, diff_val = 0;
prepare_value(base, id, &base_str, &base_val);
prepare_value(comp, id, &comp_str, &comp_val);
/* normalize all the outputs to be in string buffers for simplicity */
if (is_key_stat(id)) {
/* key stats (file and program name) are always strings */
if (base)
snprintf(base_buf, sizeof(base_buf), "%s", base_str);
else
snprintf(base_buf, sizeof(base_buf), "%s", comp_str);
} else if (base_str) {
snprintf(base_buf, sizeof(base_buf), "%s", base_str);
snprintf(comp_buf, sizeof(comp_buf), "%s", comp_str);
if (!base || !comp)
snprintf(diff_buf, sizeof(diff_buf), "%s", "N/A");
else if (strcmp(base_str, comp_str) == 0)
snprintf(diff_buf, sizeof(diff_buf), "%s", "MATCH");
else
snprintf(diff_buf, sizeof(diff_buf), "%s", "MISMATCH");
} else {
double p = 0.0;
if (base)
snprintf(base_buf, sizeof(base_buf), "%ld", base_val);
else
snprintf(base_buf, sizeof(base_buf), "%s", "N/A");
if (comp)
snprintf(comp_buf, sizeof(comp_buf), "%ld", comp_val);
else
snprintf(comp_buf, sizeof(comp_buf), "%s", "N/A");
diff_val = comp_val - base_val;
if (!base || !comp) {
snprintf(diff_buf, sizeof(diff_buf), "%s", "N/A");
} else {
if (base_val == 0) {
if (comp_val == base_val)
p = 0.0; /* avoid +0 (+100%) case */
else
p = comp_val < base_val ? -100.0 : 100.0;
} else {
p = diff_val * 100.0 / base_val;
}
snprintf(diff_buf, sizeof(diff_buf), "%+ld (%+.2lf%%)", diff_val, p);
}
}
switch (fmt) {
case RESFMT_TABLE_CALCLEN:
len = strlen(base_buf);
if (len > *max_len_base)
*max_len_base = len;
if (!is_key_stat(id)) {
len = strlen(comp_buf);
if (len > *max_len_comp)
*max_len_comp = len;
len = strlen(diff_buf);
if (len > *max_len_diff)
*max_len_diff = len;
}
break;
case RESFMT_TABLE: {
/* string outputs are left-aligned, number outputs are right-aligned */
const char *fmt = base_str ? "%s%-*s" : "%s%*s";
printf(fmt, i == 0 ? "" : COLUMN_SEP, *max_len_base, base_buf);
if (!is_key_stat(id)) {
printf(fmt, COLUMN_SEP, *max_len_comp, comp_buf);
printf(fmt, COLUMN_SEP, *max_len_diff, diff_buf);
}
if (i == env.output_spec.spec_cnt - 1)
printf("\n");
break;
}
case RESFMT_CSV:
printf("%s%s", i == 0 ? "" : ",", base_buf);
if (!is_key_stat(id)) {
printf("%s%s", i == 0 ? "" : ",", comp_buf);
printf("%s%s", i == 0 ? "" : ",", diff_buf);
}
if (i == env.output_spec.spec_cnt - 1)
printf("\n");
break;
}
}
if (last && fmt == RESFMT_TABLE)
output_comp_header_underlines();
}
static int cmp_stats_key(const struct verif_stats *base, const struct verif_stats *comp)
{
int r;
r = strcmp(base->file_name, comp->file_name);
if (r != 0)
return r;
return strcmp(base->prog_name, comp->prog_name);
}
static bool is_join_stat_filter_matched(struct filter *f, const struct verif_stats_join *stats)
{
static const double eps = 1e-9;
const char *str = NULL;
double value = 0.0;
fetch_join_stat_value(stats, f->stat_id, f->stat_var, &str, &value);
switch (f->op) {
case OP_EQ: return value > f->value - eps && value < f->value + eps;
case OP_NEQ: return value < f->value - eps || value > f->value + eps;
case OP_LT: return value < f->value - eps;
case OP_LE: return value <= f->value + eps;
case OP_GT: return value > f->value + eps;
case OP_GE: return value >= f->value - eps;
}
fprintf(stderr, "BUG: unknown filter op %d!\n", f->op);
return false;
}
static bool should_output_join_stats(const struct verif_stats_join *stats)
{
struct filter *f;
int i, allow_cnt = 0;
for (i = 0; i < env.deny_filter_cnt; i++) {
f = &env.deny_filters[i];
if (f->kind != FILTER_STAT)
continue;
if (is_join_stat_filter_matched(f, stats))
return false;
}
for (i = 0; i < env.allow_filter_cnt; i++) {
f = &env.allow_filters[i];
if (f->kind != FILTER_STAT)
continue;
allow_cnt++;
if (is_join_stat_filter_matched(f, stats))
return true;
}
/* if there are no stat allowed filters, pass everything through */
return allow_cnt == 0;
}
static int handle_comparison_mode(void)
{
struct stat_specs base_specs = {}, comp_specs = {};
struct stat_specs tmp_sort_spec;
enum resfmt cur_fmt;
int err, i, j, last_idx;
if (env.filename_cnt != 2) {
fprintf(stderr, "Comparison mode expects exactly two input CSV files!\n\n");
argp_help(&argp, stderr, ARGP_HELP_USAGE, "veristat");
return -EINVAL;
}
err = parse_stats_csv(env.filenames[0], &base_specs,
&env.baseline_stats, &env.baseline_stat_cnt);
if (err) {
fprintf(stderr, "Failed to parse stats from '%s': %d\n", env.filenames[0], err);
return err;
}
err = parse_stats_csv(env.filenames[1], &comp_specs,
&env.prog_stats, &env.prog_stat_cnt);
if (err) {
fprintf(stderr, "Failed to parse stats from '%s': %d\n", env.filenames[1], err);
return err;
}
/* To keep it simple we validate that the set and order of stats in
* both CSVs are exactly the same. This can be lifted with a bit more
* pre-processing later.
*/
if (base_specs.spec_cnt != comp_specs.spec_cnt) {
fprintf(stderr, "Number of stats in '%s' and '%s' differs (%d != %d)!\n",
env.filenames[0], env.filenames[1],
base_specs.spec_cnt, comp_specs.spec_cnt);
return -EINVAL;
}
for (i = 0; i < base_specs.spec_cnt; i++) {
if (base_specs.ids[i] != comp_specs.ids[i]) {
fprintf(stderr, "Stats composition differs between '%s' and '%s' (%s != %s)!\n",
env.filenames[0], env.filenames[1],
stat_defs[base_specs.ids[i]].names[0],
stat_defs[comp_specs.ids[i]].names[0]);
return -EINVAL;
}
}
/* Replace user-specified sorting spec with file+prog sorting rule to
* be able to join two datasets correctly. Once we are done, we will
* restore the original sort spec.
*/
tmp_sort_spec = env.sort_spec;
env.sort_spec = join_sort_spec;
qsort(env.prog_stats, env.prog_stat_cnt, sizeof(*env.prog_stats), cmp_prog_stats);
qsort(env.baseline_stats, env.baseline_stat_cnt, sizeof(*env.baseline_stats), cmp_prog_stats);
env.sort_spec = tmp_sort_spec;
/* Join two datasets together. If baseline and comparison datasets
* have different subset of rows (we match by 'object + prog' as
* a unique key) then assume empty/missing/zero value for rows that
* are missing in the opposite data set.
*/
i = j = 0;
while (i < env.baseline_stat_cnt || j < env.prog_stat_cnt) {
const struct verif_stats *base, *comp;
struct verif_stats_join *join;
void *tmp;
int r;
base = i < env.baseline_stat_cnt ? &env.baseline_stats[i] : &fallback_stats;
comp = j < env.prog_stat_cnt ? &env.prog_stats[j] : &fallback_stats;
if (!base->file_name || !base->prog_name) {
fprintf(stderr, "Entry #%d in '%s' doesn't have file and/or program name specified!\n",
i, env.filenames[0]);
return -EINVAL;
}
if (!comp->file_name || !comp->prog_name) {
fprintf(stderr, "Entry #%d in '%s' doesn't have file and/or program name specified!\n",
j, env.filenames[1]);
return -EINVAL;
}
tmp = realloc(env.join_stats, (env.join_stat_cnt + 1) * sizeof(*env.join_stats));
if (!tmp)
return -ENOMEM;
env.join_stats = tmp;
join = &env.join_stats[env.join_stat_cnt];
memset(join, 0, sizeof(*join));
r = cmp_stats_key(base, comp);
if (r == 0) {
join->file_name = base->file_name;
join->prog_name = base->prog_name;
join->stats_a = base;
join->stats_b = comp;
i++;
j++;
} else if (base != &fallback_stats && (comp == &fallback_stats || r < 0)) {
join->file_name = base->file_name;
join->prog_name = base->prog_name;
join->stats_a = base;
join->stats_b = NULL;
i++;
} else if (comp != &fallback_stats && (base == &fallback_stats || r > 0)) {
join->file_name = comp->file_name;
join->prog_name = comp->prog_name;
join->stats_a = NULL;
join->stats_b = comp;
j++;
} else {
fprintf(stderr, "%s:%d: should never reach here i=%i, j=%i",
__FILE__, __LINE__, i, j);
return -EINVAL;
}
env.join_stat_cnt += 1;
}
/* now sort joined results accorsing to sort spec */
qsort(env.join_stats, env.join_stat_cnt, sizeof(*env.join_stats), cmp_join_stats);
/* for human-readable table output we need to do extra pass to
* calculate column widths, so we substitute current output format
* with RESFMT_TABLE_CALCLEN and later revert it back to RESFMT_TABLE
* and do everything again.
*/
if (env.out_fmt == RESFMT_TABLE)
cur_fmt = RESFMT_TABLE_CALCLEN;
else
cur_fmt = env.out_fmt;
one_more_time:
output_comp_headers(cur_fmt);
last_idx = -1;
for (i = 0; i < env.join_stat_cnt; i++) {
const struct verif_stats_join *join = &env.join_stats[i];
if (!should_output_join_stats(join))
continue;
if (cur_fmt == RESFMT_TABLE_CALCLEN)
last_idx = i;
output_comp_stats(join, cur_fmt, i == last_idx);
}
if (cur_fmt == RESFMT_TABLE_CALCLEN) {
cur_fmt = RESFMT_TABLE;
goto one_more_time; /* ... this time with feeling */
}
return 0;
}
static bool is_stat_filter_matched(struct filter *f, const struct verif_stats *stats)
{
long value = stats->stats[f->stat_id];
switch (f->op) {
case OP_EQ: return value == f->value;
case OP_NEQ: return value != f->value;
case OP_LT: return value < f->value;
case OP_LE: return value <= f->value;
case OP_GT: return value > f->value;
case OP_GE: return value >= f->value;
}
fprintf(stderr, "BUG: unknown filter op %d!\n", f->op);
return false;
}
static bool should_output_stats(const struct verif_stats *stats)
{
struct filter *f;
int i, allow_cnt = 0;
for (i = 0; i < env.deny_filter_cnt; i++) {
f = &env.deny_filters[i];
if (f->kind != FILTER_STAT)
continue;
if (is_stat_filter_matched(f, stats))
return false;
}
for (i = 0; i < env.allow_filter_cnt; i++) {
f = &env.allow_filters[i];
if (f->kind != FILTER_STAT)
continue;
allow_cnt++;
if (is_stat_filter_matched(f, stats))
return true;
}
/* if there are no stat allowed filters, pass everything through */
return allow_cnt == 0;
}
static void output_prog_stats(void)
{
const struct verif_stats *stats;
int i, last_stat_idx = 0;
if (env.out_fmt == RESFMT_TABLE) {
/* calculate column widths */
output_headers(RESFMT_TABLE_CALCLEN);
for (i = 0; i < env.prog_stat_cnt; i++) {
stats = &env.prog_stats[i];
if (!should_output_stats(stats))
continue;
output_stats(stats, RESFMT_TABLE_CALCLEN, false);
last_stat_idx = i;
}
}
/* actually output the table */
output_headers(env.out_fmt);
for (i = 0; i < env.prog_stat_cnt; i++) {
stats = &env.prog_stats[i];
if (!should_output_stats(stats))
continue;
output_stats(stats, env.out_fmt, i == last_stat_idx);
}
}
static int handle_verif_mode(void)
{
int i, err;
if (env.filename_cnt == 0) {
fprintf(stderr, "Please provide path to BPF object file!\n\n");
argp_help(&argp, stderr, ARGP_HELP_USAGE, "veristat");
return -EINVAL;
}
for (i = 0; i < env.filename_cnt; i++) {
err = process_obj(env.filenames[i]);
if (err) {
fprintf(stderr, "Failed to process '%s': %d\n", env.filenames[i], err);
return err;
}
}
qsort(env.prog_stats, env.prog_stat_cnt, sizeof(*env.prog_stats), cmp_prog_stats);
output_prog_stats();
return 0;
}
static int handle_replay_mode(void)
{
struct stat_specs specs = {};
int err;
if (env.filename_cnt != 1) {
fprintf(stderr, "Replay mode expects exactly one input CSV file!\n\n");
argp_help(&argp, stderr, ARGP_HELP_USAGE, "veristat");
return -EINVAL;
}
err = parse_stats_csv(env.filenames[0], &specs,
&env.prog_stats, &env.prog_stat_cnt);
if (err) {
fprintf(stderr, "Failed to parse stats from '%s': %d\n", env.filenames[0], err);
return err;
}
qsort(env.prog_stats, env.prog_stat_cnt, sizeof(*env.prog_stats), cmp_prog_stats);
output_prog_stats();
return 0;
}
int main(int argc, char **argv)
{
int err = 0, i;
if (argp_parse(&argp, argc, argv, 0, NULL, NULL))
return 1;
if (env.show_version) {
printf("%s\n", argp_program_version);
return 0;
}
if (env.verbose && env.quiet) {
fprintf(stderr, "Verbose and quiet modes are incompatible, please specify just one or neither!\n\n");
argp_help(&argp, stderr, ARGP_HELP_USAGE, "veristat");
return 1;
}
if (env.verbose && env.log_level == 0)
env.log_level = 1;
if (env.output_spec.spec_cnt == 0) {
if (env.out_fmt == RESFMT_CSV)
env.output_spec = default_csv_output_spec;
else
env.output_spec = default_output_spec;
}
if (env.sort_spec.spec_cnt == 0)
env.sort_spec = default_sort_spec;
if (env.comparison_mode && env.replay_mode) {
fprintf(stderr, "Can't specify replay and comparison mode at the same time!\n\n");
argp_help(&argp, stderr, ARGP_HELP_USAGE, "veristat");
return 1;
}
if (env.comparison_mode)
err = handle_comparison_mode();
else if (env.replay_mode)
err = handle_replay_mode();
else
err = handle_verif_mode();
free_verif_stats(env.prog_stats, env.prog_stat_cnt);
free_verif_stats(env.baseline_stats, env.baseline_stat_cnt);
free(env.join_stats);
for (i = 0; i < env.filename_cnt; i++)
free(env.filenames[i]);
free(env.filenames);
for (i = 0; i < env.allow_filter_cnt; i++) {
free(env.allow_filters[i].any_glob);
free(env.allow_filters[i].file_glob);
free(env.allow_filters[i].prog_glob);
}
free(env.allow_filters);
for (i = 0; i < env.deny_filter_cnt; i++) {
free(env.deny_filters[i].any_glob);
free(env.deny_filters[i].file_glob);
free(env.deny_filters[i].prog_glob);
}
free(env.deny_filters);
return -err;
}
| linux-master | tools/testing/selftests/bpf/veristat.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <stdio.h>
#include <errno.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include "test_progs.h"
static const char * const btf_kind_str_mapping[] = {
[BTF_KIND_UNKN] = "UNKNOWN",
[BTF_KIND_INT] = "INT",
[BTF_KIND_PTR] = "PTR",
[BTF_KIND_ARRAY] = "ARRAY",
[BTF_KIND_STRUCT] = "STRUCT",
[BTF_KIND_UNION] = "UNION",
[BTF_KIND_ENUM] = "ENUM",
[BTF_KIND_FWD] = "FWD",
[BTF_KIND_TYPEDEF] = "TYPEDEF",
[BTF_KIND_VOLATILE] = "VOLATILE",
[BTF_KIND_CONST] = "CONST",
[BTF_KIND_RESTRICT] = "RESTRICT",
[BTF_KIND_FUNC] = "FUNC",
[BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
[BTF_KIND_VAR] = "VAR",
[BTF_KIND_DATASEC] = "DATASEC",
[BTF_KIND_FLOAT] = "FLOAT",
[BTF_KIND_DECL_TAG] = "DECL_TAG",
[BTF_KIND_TYPE_TAG] = "TYPE_TAG",
[BTF_KIND_ENUM64] = "ENUM64",
};
static const char *btf_kind_str(__u16 kind)
{
if (kind > BTF_KIND_ENUM64)
return "UNKNOWN";
return btf_kind_str_mapping[kind];
}
static const char *btf_int_enc_str(__u8 encoding)
{
switch (encoding) {
case 0:
return "(none)";
case BTF_INT_SIGNED:
return "SIGNED";
case BTF_INT_CHAR:
return "CHAR";
case BTF_INT_BOOL:
return "BOOL";
default:
return "UNKN";
}
}
static const char *btf_var_linkage_str(__u32 linkage)
{
switch (linkage) {
case BTF_VAR_STATIC:
return "static";
case BTF_VAR_GLOBAL_ALLOCATED:
return "global-alloc";
default:
return "(unknown)";
}
}
static const char *btf_func_linkage_str(const struct btf_type *t)
{
switch (btf_vlen(t)) {
case BTF_FUNC_STATIC:
return "static";
case BTF_FUNC_GLOBAL:
return "global";
case BTF_FUNC_EXTERN:
return "extern";
default:
return "(unknown)";
}
}
static const char *btf_str(const struct btf *btf, __u32 off)
{
if (!off)
return "(anon)";
return btf__str_by_offset(btf, off) ?: "(invalid)";
}
int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id)
{
const struct btf_type *t;
int kind, i;
__u32 vlen;
t = btf__type_by_id(btf, id);
if (!t)
return -EINVAL;
vlen = btf_vlen(t);
kind = btf_kind(t);
fprintf(out, "[%u] %s '%s'", id, btf_kind_str(kind), btf_str(btf, t->name_off));
switch (kind) {
case BTF_KIND_INT:
fprintf(out, " size=%u bits_offset=%u nr_bits=%u encoding=%s",
t->size, btf_int_offset(t), btf_int_bits(t),
btf_int_enc_str(btf_int_encoding(t)));
break;
case BTF_KIND_PTR:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPEDEF:
case BTF_KIND_TYPE_TAG:
fprintf(out, " type_id=%u", t->type);
break;
case BTF_KIND_ARRAY: {
const struct btf_array *arr = btf_array(t);
fprintf(out, " type_id=%u index_type_id=%u nr_elems=%u",
arr->type, arr->index_type, arr->nelems);
break;
}
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
const struct btf_member *m = btf_members(t);
fprintf(out, " size=%u vlen=%u", t->size, vlen);
for (i = 0; i < vlen; i++, m++) {
__u32 bit_off, bit_sz;
bit_off = btf_member_bit_offset(t, i);
bit_sz = btf_member_bitfield_size(t, i);
fprintf(out, "\n\t'%s' type_id=%u bits_offset=%u",
btf_str(btf, m->name_off), m->type, bit_off);
if (bit_sz)
fprintf(out, " bitfield_size=%u", bit_sz);
}
break;
}
case BTF_KIND_ENUM: {
const struct btf_enum *v = btf_enum(t);
const char *fmt_str;
fmt_str = btf_kflag(t) ? "\n\t'%s' val=%d" : "\n\t'%s' val=%u";
fprintf(out, " encoding=%s size=%u vlen=%u",
btf_kflag(t) ? "SIGNED" : "UNSIGNED", t->size, vlen);
for (i = 0; i < vlen; i++, v++) {
fprintf(out, fmt_str,
btf_str(btf, v->name_off), v->val);
}
break;
}
case BTF_KIND_ENUM64: {
const struct btf_enum64 *v = btf_enum64(t);
const char *fmt_str;
fmt_str = btf_kflag(t) ? "\n\t'%s' val=%lld" : "\n\t'%s' val=%llu";
fprintf(out, " encoding=%s size=%u vlen=%u",
btf_kflag(t) ? "SIGNED" : "UNSIGNED", t->size, vlen);
for (i = 0; i < vlen; i++, v++) {
fprintf(out, fmt_str,
btf_str(btf, v->name_off),
((__u64)v->val_hi32 << 32) | v->val_lo32);
}
break;
}
case BTF_KIND_FWD:
fprintf(out, " fwd_kind=%s", btf_kflag(t) ? "union" : "struct");
break;
case BTF_KIND_FUNC:
fprintf(out, " type_id=%u linkage=%s", t->type, btf_func_linkage_str(t));
break;
case BTF_KIND_FUNC_PROTO: {
const struct btf_param *p = btf_params(t);
fprintf(out, " ret_type_id=%u vlen=%u", t->type, vlen);
for (i = 0; i < vlen; i++, p++) {
fprintf(out, "\n\t'%s' type_id=%u",
btf_str(btf, p->name_off), p->type);
}
break;
}
case BTF_KIND_VAR:
fprintf(out, " type_id=%u, linkage=%s",
t->type, btf_var_linkage_str(btf_var(t)->linkage));
break;
case BTF_KIND_DATASEC: {
const struct btf_var_secinfo *v = btf_var_secinfos(t);
fprintf(out, " size=%u vlen=%u", t->size, vlen);
for (i = 0; i < vlen; i++, v++) {
fprintf(out, "\n\ttype_id=%u offset=%u size=%u",
v->type, v->offset, v->size);
}
break;
}
case BTF_KIND_FLOAT:
fprintf(out, " size=%u", t->size);
break;
case BTF_KIND_DECL_TAG:
fprintf(out, " type_id=%u component_idx=%d",
t->type, btf_decl_tag(t)->component_idx);
break;
default:
break;
}
return 0;
}
/* Print raw BTF type dump into a local buffer and return string pointer back.
* Buffer *will* be overwritten by subsequent btf_type_raw_dump() calls
*/
const char *btf_type_raw_dump(const struct btf *btf, int type_id)
{
static char buf[16 * 1024];
FILE *buf_file;
buf_file = fmemopen(buf, sizeof(buf) - 1, "w");
if (!buf_file) {
fprintf(stderr, "Failed to open memstream: %d\n", errno);
return NULL;
}
fprintf_btf_type_raw(buf_file, btf, type_id);
fflush(buf_file);
fclose(buf_file);
return buf;
}
int btf_validate_raw(struct btf *btf, int nr_types, const char *exp_types[])
{
int i;
bool ok = true;
ASSERT_EQ(btf__type_cnt(btf) - 1, nr_types, "btf_nr_types");
for (i = 1; i <= nr_types; i++) {
if (!ASSERT_STREQ(btf_type_raw_dump(btf, i), exp_types[i - 1], "raw_dump"))
ok = false;
}
return ok;
}
static void btf_dump_printf(void *ctx, const char *fmt, va_list args)
{
vfprintf(ctx, fmt, args);
}
/* Print BTF-to-C dump into a local buffer and return string pointer back.
* Buffer *will* be overwritten by subsequent btf_type_raw_dump() calls
*/
const char *btf_type_c_dump(const struct btf *btf)
{
static char buf[16 * 1024];
FILE *buf_file;
struct btf_dump *d = NULL;
int err, i;
buf_file = fmemopen(buf, sizeof(buf) - 1, "w");
if (!buf_file) {
fprintf(stderr, "Failed to open memstream: %d\n", errno);
return NULL;
}
d = btf_dump__new(btf, btf_dump_printf, buf_file, NULL);
if (libbpf_get_error(d)) {
fprintf(stderr, "Failed to create btf_dump instance: %ld\n", libbpf_get_error(d));
goto err_out;
}
for (i = 1; i < btf__type_cnt(btf); i++) {
err = btf_dump__dump_type(d, i);
if (err) {
fprintf(stderr, "Failed to dump type [%d]: %d\n", i, err);
goto err_out;
}
}
btf_dump__free(d);
fflush(buf_file);
fclose(buf_file);
return buf;
err_out:
btf_dump__free(d);
fclose(buf_file);
return NULL;
}
| linux-master | tools/testing/selftests/bpf/btf_helpers.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <linux/capability.h>
#include <stdlib.h>
#include <test_progs.h>
#include <bpf/btf.h>
#include "autoconf_helper.h"
#include "unpriv_helpers.h"
#include "cap_helpers.h"
#define str_has_pfx(str, pfx) \
(strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
#define TEST_LOADER_LOG_BUF_SZ 1048576
#define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure"
#define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
#define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
#define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
#define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
#define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
#define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
#define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
#define TEST_TAG_DESCRIPTION_PFX "comment:test_description="
#define TEST_TAG_RETVAL_PFX "comment:test_retval="
#define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv="
#define TEST_TAG_AUXILIARY "comment:test_auxiliary"
#define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv"
/* Warning: duplicated in bpf_misc.h */
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#define EFFICIENT_UNALIGNED_ACCESS 1
#else
#define EFFICIENT_UNALIGNED_ACCESS 0
#endif
static int sysctl_unpriv_disabled = -1;
enum mode {
PRIV = 1,
UNPRIV = 2
};
struct test_subspec {
char *name;
bool expect_failure;
const char **expect_msgs;
size_t expect_msg_cnt;
int retval;
bool execute;
};
struct test_spec {
const char *prog_name;
struct test_subspec priv;
struct test_subspec unpriv;
int log_level;
int prog_flags;
int mode_mask;
bool auxiliary;
bool valid;
};
static int tester_init(struct test_loader *tester)
{
if (!tester->log_buf) {
tester->log_buf_sz = TEST_LOADER_LOG_BUF_SZ;
tester->log_buf = malloc(tester->log_buf_sz);
if (!ASSERT_OK_PTR(tester->log_buf, "tester_log_buf"))
return -ENOMEM;
}
return 0;
}
void test_loader_fini(struct test_loader *tester)
{
if (!tester)
return;
free(tester->log_buf);
}
static void free_test_spec(struct test_spec *spec)
{
free(spec->priv.name);
free(spec->unpriv.name);
free(spec->priv.expect_msgs);
free(spec->unpriv.expect_msgs);
spec->priv.name = NULL;
spec->unpriv.name = NULL;
spec->priv.expect_msgs = NULL;
spec->unpriv.expect_msgs = NULL;
}
static int push_msg(const char *msg, struct test_subspec *subspec)
{
void *tmp;
tmp = realloc(subspec->expect_msgs, (1 + subspec->expect_msg_cnt) * sizeof(void *));
if (!tmp) {
ASSERT_FAIL("failed to realloc memory for messages\n");
return -ENOMEM;
}
subspec->expect_msgs = tmp;
subspec->expect_msgs[subspec->expect_msg_cnt++] = msg;
return 0;
}
static int parse_int(const char *str, int *val, const char *name)
{
char *end;
long tmp;
errno = 0;
if (str_has_pfx(str, "0x"))
tmp = strtol(str + 2, &end, 16);
else
tmp = strtol(str, &end, 10);
if (errno || end[0] != '\0') {
PRINT_FAIL("failed to parse %s from '%s'\n", name, str);
return -EINVAL;
}
*val = tmp;
return 0;
}
static int parse_retval(const char *str, int *val, const char *name)
{
struct {
char *name;
int val;
} named_values[] = {
{ "INT_MIN" , INT_MIN },
{ "POINTER_VALUE", POINTER_VALUE },
{ "TEST_DATA_LEN", TEST_DATA_LEN },
};
int i;
for (i = 0; i < ARRAY_SIZE(named_values); ++i) {
if (strcmp(str, named_values[i].name) != 0)
continue;
*val = named_values[i].val;
return 0;
}
return parse_int(str, val, name);
}
/* Uses btf_decl_tag attributes to describe the expected test
* behavior, see bpf_misc.h for detailed description of each attribute
* and attribute combinations.
*/
static int parse_test_spec(struct test_loader *tester,
struct bpf_object *obj,
struct bpf_program *prog,
struct test_spec *spec)
{
const char *description = NULL;
bool has_unpriv_result = false;
bool has_unpriv_retval = false;
int func_id, i, err = 0;
struct btf *btf;
memset(spec, 0, sizeof(*spec));
spec->prog_name = bpf_program__name(prog);
btf = bpf_object__btf(obj);
if (!btf) {
ASSERT_FAIL("BPF object has no BTF");
return -EINVAL;
}
func_id = btf__find_by_name_kind(btf, spec->prog_name, BTF_KIND_FUNC);
if (func_id < 0) {
ASSERT_FAIL("failed to find FUNC BTF type for '%s'", spec->prog_name);
return -EINVAL;
}
for (i = 1; i < btf__type_cnt(btf); i++) {
const char *s, *val, *msg;
const struct btf_type *t;
int tmp;
t = btf__type_by_id(btf, i);
if (!btf_is_decl_tag(t))
continue;
if (t->type != func_id || btf_decl_tag(t)->component_idx != -1)
continue;
s = btf__str_by_offset(btf, t->name_off);
if (str_has_pfx(s, TEST_TAG_DESCRIPTION_PFX)) {
description = s + sizeof(TEST_TAG_DESCRIPTION_PFX) - 1;
} else if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) {
spec->priv.expect_failure = true;
spec->mode_mask |= PRIV;
} else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS) == 0) {
spec->priv.expect_failure = false;
spec->mode_mask |= PRIV;
} else if (strcmp(s, TEST_TAG_EXPECT_FAILURE_UNPRIV) == 0) {
spec->unpriv.expect_failure = true;
spec->mode_mask |= UNPRIV;
has_unpriv_result = true;
} else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS_UNPRIV) == 0) {
spec->unpriv.expect_failure = false;
spec->mode_mask |= UNPRIV;
has_unpriv_result = true;
} else if (strcmp(s, TEST_TAG_AUXILIARY) == 0) {
spec->auxiliary = true;
spec->mode_mask |= PRIV;
} else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) {
spec->auxiliary = true;
spec->mode_mask |= UNPRIV;
} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) {
msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1;
err = push_msg(msg, &spec->priv);
if (err)
goto cleanup;
spec->mode_mask |= PRIV;
} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) {
msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1;
err = push_msg(msg, &spec->unpriv);
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
} else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX)) {
val = s + sizeof(TEST_TAG_RETVAL_PFX) - 1;
err = parse_retval(val, &spec->priv.retval, "__retval");
if (err)
goto cleanup;
spec->priv.execute = true;
spec->mode_mask |= PRIV;
} else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX_UNPRIV)) {
val = s + sizeof(TEST_TAG_RETVAL_PFX_UNPRIV) - 1;
err = parse_retval(val, &spec->unpriv.retval, "__retval_unpriv");
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
spec->unpriv.execute = true;
has_unpriv_retval = true;
} else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) {
val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1;
err = parse_int(val, &spec->log_level, "test log level");
if (err)
goto cleanup;
} else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) {
val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1;
if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) {
spec->prog_flags |= BPF_F_STRICT_ALIGNMENT;
} else if (strcmp(val, "BPF_F_ANY_ALIGNMENT") == 0) {
spec->prog_flags |= BPF_F_ANY_ALIGNMENT;
} else if (strcmp(val, "BPF_F_TEST_RND_HI32") == 0) {
spec->prog_flags |= BPF_F_TEST_RND_HI32;
} else if (strcmp(val, "BPF_F_TEST_STATE_FREQ") == 0) {
spec->prog_flags |= BPF_F_TEST_STATE_FREQ;
} else if (strcmp(val, "BPF_F_SLEEPABLE") == 0) {
spec->prog_flags |= BPF_F_SLEEPABLE;
} else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) {
spec->prog_flags |= BPF_F_XDP_HAS_FRAGS;
} else /* assume numeric value */ {
err = parse_int(val, &tmp, "test prog flags");
if (err)
goto cleanup;
spec->prog_flags |= tmp;
}
}
}
if (spec->mode_mask == 0)
spec->mode_mask = PRIV;
if (!description)
description = spec->prog_name;
if (spec->mode_mask & PRIV) {
spec->priv.name = strdup(description);
if (!spec->priv.name) {
PRINT_FAIL("failed to allocate memory for priv.name\n");
err = -ENOMEM;
goto cleanup;
}
}
if (spec->mode_mask & UNPRIV) {
int descr_len = strlen(description);
const char *suffix = " @unpriv";
char *name;
name = malloc(descr_len + strlen(suffix) + 1);
if (!name) {
PRINT_FAIL("failed to allocate memory for unpriv.name\n");
err = -ENOMEM;
goto cleanup;
}
strcpy(name, description);
strcpy(&name[descr_len], suffix);
spec->unpriv.name = name;
}
if (spec->mode_mask & (PRIV | UNPRIV)) {
if (!has_unpriv_result)
spec->unpriv.expect_failure = spec->priv.expect_failure;
if (!has_unpriv_retval) {
spec->unpriv.retval = spec->priv.retval;
spec->unpriv.execute = spec->priv.execute;
}
if (!spec->unpriv.expect_msgs) {
size_t sz = spec->priv.expect_msg_cnt * sizeof(void *);
spec->unpriv.expect_msgs = malloc(sz);
if (!spec->unpriv.expect_msgs) {
PRINT_FAIL("failed to allocate memory for unpriv.expect_msgs\n");
err = -ENOMEM;
goto cleanup;
}
memcpy(spec->unpriv.expect_msgs, spec->priv.expect_msgs, sz);
spec->unpriv.expect_msg_cnt = spec->priv.expect_msg_cnt;
}
}
spec->valid = true;
return 0;
cleanup:
free_test_spec(spec);
return err;
}
static void prepare_case(struct test_loader *tester,
struct test_spec *spec,
struct bpf_object *obj,
struct bpf_program *prog)
{
int min_log_level = 0, prog_flags;
if (env.verbosity > VERBOSE_NONE)
min_log_level = 1;
if (env.verbosity > VERBOSE_VERY)
min_log_level = 2;
bpf_program__set_log_buf(prog, tester->log_buf, tester->log_buf_sz);
/* Make sure we set at least minimal log level, unless test requires
* even higher level already. Make sure to preserve independent log
* level 4 (verifier stats), though.
*/
if ((spec->log_level & 3) < min_log_level)
bpf_program__set_log_level(prog, (spec->log_level & 4) | min_log_level);
else
bpf_program__set_log_level(prog, spec->log_level);
prog_flags = bpf_program__flags(prog);
bpf_program__set_flags(prog, prog_flags | spec->prog_flags);
tester->log_buf[0] = '\0';
tester->next_match_pos = 0;
}
static void emit_verifier_log(const char *log_buf, bool force)
{
if (!force && env.verbosity == VERBOSE_NONE)
return;
fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log_buf);
}
static void validate_case(struct test_loader *tester,
struct test_subspec *subspec,
struct bpf_object *obj,
struct bpf_program *prog,
int load_err)
{
int i, j;
for (i = 0; i < subspec->expect_msg_cnt; i++) {
char *match;
const char *expect_msg;
expect_msg = subspec->expect_msgs[i];
match = strstr(tester->log_buf + tester->next_match_pos, expect_msg);
if (!ASSERT_OK_PTR(match, "expect_msg")) {
/* if we are in verbose mode, we've already emitted log */
if (env.verbosity == VERBOSE_NONE)
emit_verifier_log(tester->log_buf, true /*force*/);
for (j = 0; j < i; j++)
fprintf(stderr,
"MATCHED MSG: '%s'\n", subspec->expect_msgs[j]);
fprintf(stderr, "EXPECTED MSG: '%s'\n", expect_msg);
return;
}
tester->next_match_pos = match - tester->log_buf + strlen(expect_msg);
}
}
struct cap_state {
__u64 old_caps;
bool initialized;
};
static int drop_capabilities(struct cap_state *caps)
{
const __u64 caps_to_drop = (1ULL << CAP_SYS_ADMIN | 1ULL << CAP_NET_ADMIN |
1ULL << CAP_PERFMON | 1ULL << CAP_BPF);
int err;
err = cap_disable_effective(caps_to_drop, &caps->old_caps);
if (err) {
PRINT_FAIL("failed to drop capabilities: %i, %s\n", err, strerror(err));
return err;
}
caps->initialized = true;
return 0;
}
static int restore_capabilities(struct cap_state *caps)
{
int err;
if (!caps->initialized)
return 0;
err = cap_enable_effective(caps->old_caps, NULL);
if (err)
PRINT_FAIL("failed to restore capabilities: %i, %s\n", err, strerror(err));
caps->initialized = false;
return err;
}
static bool can_execute_unpriv(struct test_loader *tester, struct test_spec *spec)
{
if (sysctl_unpriv_disabled < 0)
sysctl_unpriv_disabled = get_unpriv_disabled() ? 1 : 0;
if (sysctl_unpriv_disabled)
return false;
if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS)
return false;
return true;
}
static bool is_unpriv_capable_map(struct bpf_map *map)
{
enum bpf_map_type type;
__u32 flags;
type = bpf_map__type(map);
switch (type) {
case BPF_MAP_TYPE_HASH:
case BPF_MAP_TYPE_PERCPU_HASH:
case BPF_MAP_TYPE_HASH_OF_MAPS:
flags = bpf_map__map_flags(map);
return !(flags & BPF_F_ZERO_SEED);
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
case BPF_MAP_TYPE_ARRAY:
case BPF_MAP_TYPE_RINGBUF:
case BPF_MAP_TYPE_PROG_ARRAY:
case BPF_MAP_TYPE_CGROUP_ARRAY:
case BPF_MAP_TYPE_PERCPU_ARRAY:
case BPF_MAP_TYPE_USER_RINGBUF:
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_CGROUP_STORAGE:
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
return true;
default:
return false;
}
}
static int do_prog_test_run(int fd_prog, int *retval)
{
__u8 tmp_out[TEST_DATA_LEN << 2] = {};
__u8 tmp_in[TEST_DATA_LEN] = {};
int err, saved_errno;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = tmp_in,
.data_size_in = sizeof(tmp_in),
.data_out = tmp_out,
.data_size_out = sizeof(tmp_out),
.repeat = 1,
);
err = bpf_prog_test_run_opts(fd_prog, &topts);
saved_errno = errno;
if (err) {
PRINT_FAIL("FAIL: Unexpected bpf_prog_test_run error: %d (%s) ",
saved_errno, strerror(saved_errno));
return err;
}
ASSERT_OK(0, "bpf_prog_test_run");
*retval = topts.retval;
return 0;
}
static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subspec)
{
if (!subspec->execute)
return false;
if (subspec->expect_failure)
return false;
if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) {
if (env.verbosity != VERBOSE_NONE)
printf("alignment prevents execution\n");
return false;
}
return true;
}
/* this function is forced noinline and has short generic name to look better
* in test_progs output (in case of a failure)
*/
static noinline
void run_subtest(struct test_loader *tester,
struct bpf_object_open_opts *open_opts,
const void *obj_bytes,
size_t obj_byte_cnt,
struct test_spec *specs,
struct test_spec *spec,
bool unpriv)
{
struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
struct bpf_program *tprog, *tprog_iter;
struct test_spec *spec_iter;
struct cap_state caps = {};
struct bpf_object *tobj;
struct bpf_map *map;
int retval, err, i;
bool should_load;
if (!test__start_subtest(subspec->name))
return;
if (unpriv) {
if (!can_execute_unpriv(tester, spec)) {
test__skip();
test__end_subtest();
return;
}
if (drop_capabilities(&caps)) {
test__end_subtest();
return;
}
}
tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, open_opts);
if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */
goto subtest_cleanup;
i = 0;
bpf_object__for_each_program(tprog_iter, tobj) {
spec_iter = &specs[i++];
should_load = false;
if (spec_iter->valid) {
if (strcmp(bpf_program__name(tprog_iter), spec->prog_name) == 0) {
tprog = tprog_iter;
should_load = true;
}
if (spec_iter->auxiliary &&
spec_iter->mode_mask & (unpriv ? UNPRIV : PRIV))
should_load = true;
}
bpf_program__set_autoload(tprog_iter, should_load);
}
prepare_case(tester, spec, tobj, tprog);
/* By default bpf_object__load() automatically creates all
* maps declared in the skeleton. Some map types are only
* allowed in priv mode. Disable autoload for such maps in
* unpriv mode.
*/
bpf_object__for_each_map(map, tobj)
bpf_map__set_autocreate(map, !unpriv || is_unpriv_capable_map(map));
err = bpf_object__load(tobj);
if (subspec->expect_failure) {
if (!ASSERT_ERR(err, "unexpected_load_success")) {
emit_verifier_log(tester->log_buf, false /*force*/);
goto tobj_cleanup;
}
} else {
if (!ASSERT_OK(err, "unexpected_load_failure")) {
emit_verifier_log(tester->log_buf, true /*force*/);
goto tobj_cleanup;
}
}
emit_verifier_log(tester->log_buf, false /*force*/);
validate_case(tester, subspec, tobj, tprog, err);
if (should_do_test_run(spec, subspec)) {
/* For some reason test_verifier executes programs
* with all capabilities restored. Do the same here.
*/
if (restore_capabilities(&caps))
goto tobj_cleanup;
if (tester->pre_execution_cb) {
err = tester->pre_execution_cb(tobj);
if (err) {
PRINT_FAIL("pre_execution_cb failed: %d\n", err);
goto tobj_cleanup;
}
}
do_prog_test_run(bpf_program__fd(tprog), &retval);
if (retval != subspec->retval && subspec->retval != POINTER_VALUE) {
PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
goto tobj_cleanup;
}
}
tobj_cleanup:
bpf_object__close(tobj);
subtest_cleanup:
test__end_subtest();
restore_capabilities(&caps);
}
static void process_subtest(struct test_loader *tester,
const char *skel_name,
skel_elf_bytes_fn elf_bytes_factory)
{
LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name);
struct test_spec *specs = NULL;
struct bpf_object *obj = NULL;
struct bpf_program *prog;
const void *obj_bytes;
int err, i, nr_progs;
size_t obj_byte_cnt;
if (tester_init(tester) < 0)
return; /* failed to initialize tester */
obj_bytes = elf_bytes_factory(&obj_byte_cnt);
obj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, &open_opts);
if (!ASSERT_OK_PTR(obj, "obj_open_mem"))
return;
nr_progs = 0;
bpf_object__for_each_program(prog, obj)
++nr_progs;
specs = calloc(nr_progs, sizeof(struct test_spec));
if (!ASSERT_OK_PTR(specs, "Can't alloc specs array"))
return;
i = 0;
bpf_object__for_each_program(prog, obj) {
/* ignore tests for which we can't derive test specification */
err = parse_test_spec(tester, obj, prog, &specs[i++]);
if (err)
PRINT_FAIL("Can't parse test spec for program '%s'\n",
bpf_program__name(prog));
}
i = 0;
bpf_object__for_each_program(prog, obj) {
struct test_spec *spec = &specs[i++];
if (!spec->valid || spec->auxiliary)
continue;
if (spec->mode_mask & PRIV)
run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt,
specs, spec, false);
if (spec->mode_mask & UNPRIV)
run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt,
specs, spec, true);
}
for (i = 0; i < nr_progs; ++i)
free_test_spec(&specs[i]);
free(specs);
bpf_object__close(obj);
}
void test_loader__run_subtests(struct test_loader *tester,
const char *skel_name,
skel_elf_bytes_fn elf_bytes_factory)
{
/* see comment in run_subtest() for why we do this function nesting */
process_subtest(tester, skel_name, elf_bytes_factory);
}
| linux-master | tools/testing/selftests/bpf/test_loader.c |
#include <stdbool.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <signal.h>
#define _SDT_HAS_SEMAPHORES 1
#include "sdt.h"
#define SEC(name) __attribute__((section(name), used))
#define BUF_SIZE 256
/* defined in urandom_read_aux.c */
void urand_read_without_sema(int iter_num, int iter_cnt, int read_sz);
/* these are coming from urandom_read_lib{1,2}.c */
void urandlib_read_with_sema(int iter_num, int iter_cnt, int read_sz);
void urandlib_read_without_sema(int iter_num, int iter_cnt, int read_sz);
unsigned short urand_read_with_sema_semaphore SEC(".probes");
static __attribute__((noinline))
void urandom_read(int fd, int count)
{
char buf[BUF_SIZE];
int i;
for (i = 0; i < count; ++i) {
read(fd, buf, BUF_SIZE);
/* trigger USDTs defined in executable itself */
urand_read_without_sema(i, count, BUF_SIZE);
STAP_PROBE3(urand, read_with_sema, i, count, BUF_SIZE);
/* trigger USDTs defined in shared lib */
urandlib_read_without_sema(i, count, BUF_SIZE);
urandlib_read_with_sema(i, count, BUF_SIZE);
}
}
static volatile bool parent_ready;
static void handle_sigpipe(int sig)
{
parent_ready = true;
}
int main(int argc, char *argv[])
{
int fd = open("/dev/urandom", O_RDONLY);
int count = 4;
bool report_pid = false;
if (fd < 0)
return 1;
if (argc >= 2)
count = atoi(argv[1]);
if (argc >= 3) {
report_pid = true;
/* install SIGPIPE handler to catch when parent closes their
* end of the pipe (on the other side of our stdout)
*/
signal(SIGPIPE, handle_sigpipe);
}
/* report PID and wait for parent process to send us "signal" by
* closing stdout
*/
if (report_pid) {
while (!parent_ready) {
fprintf(stdout, "%d\n", getpid());
fflush(stdout);
}
/* at this point stdout is closed, parent process knows our
* PID and is ready to trace us
*/
}
urandom_read(fd, count);
close(fd);
return 0;
}
| linux-master | tools/testing/selftests/bpf/urandom_read.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <time.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <sched.h>
#include <limits.h>
#include <assert.h>
#include <sys/socket.h>
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/if_alg.h>
#include <bpf/bpf.h>
#include "../../../include/linux/filter.h"
#include "testing_helpers.h"
static struct bpf_insn prog[BPF_MAXINSNS];
static void bpf_gen_imm_prog(unsigned int insns, int fd_map)
{
int i;
srand(time(NULL));
for (i = 0; i < insns; i++)
prog[i] = BPF_ALU64_IMM(BPF_MOV, i % BPF_REG_10, rand());
prog[i - 1] = BPF_EXIT_INSN();
}
static void bpf_gen_map_prog(unsigned int insns, int fd_map)
{
int i, j = 0;
for (i = 0; i + 1 < insns; i += 2) {
struct bpf_insn tmp[] = {
BPF_LD_MAP_FD(j++ % BPF_REG_10, fd_map)
};
memcpy(&prog[i], tmp, sizeof(tmp));
}
if (insns % 2 == 0)
prog[insns - 2] = BPF_ALU64_IMM(BPF_MOV, i % BPF_REG_10, 42);
prog[insns - 1] = BPF_EXIT_INSN();
}
static int bpf_try_load_prog(int insns, int fd_map,
void (*bpf_filler)(unsigned int insns,
int fd_map))
{
int fd_prog;
bpf_filler(insns, fd_map);
fd_prog = bpf_test_load_program(BPF_PROG_TYPE_SCHED_CLS, prog, insns, "", 0,
NULL, 0);
assert(fd_prog > 0);
if (fd_map > 0)
bpf_filler(insns, 0);
return fd_prog;
}
static int __hex2bin(char ch)
{
if ((ch >= '0') && (ch <= '9'))
return ch - '0';
ch = tolower(ch);
if ((ch >= 'a') && (ch <= 'f'))
return ch - 'a' + 10;
return -1;
}
static int hex2bin(uint8_t *dst, const char *src, size_t count)
{
while (count--) {
int hi = __hex2bin(*src++);
int lo = __hex2bin(*src++);
if ((hi < 0) || (lo < 0))
return -1;
*dst++ = (hi << 4) | lo;
}
return 0;
}
static void tag_from_fdinfo(int fd_prog, uint8_t *tag, uint32_t len)
{
const int prefix_len = sizeof("prog_tag:\t") - 1;
char buff[256];
int ret = -1;
FILE *fp;
snprintf(buff, sizeof(buff), "/proc/%d/fdinfo/%d", getpid(),
fd_prog);
fp = fopen(buff, "r");
assert(fp);
while (fgets(buff, sizeof(buff), fp)) {
if (strncmp(buff, "prog_tag:\t", prefix_len))
continue;
ret = hex2bin(tag, buff + prefix_len, len);
break;
}
fclose(fp);
assert(!ret);
}
static void tag_from_alg(int insns, uint8_t *tag, uint32_t len)
{
static const struct sockaddr_alg alg = {
.salg_family = AF_ALG,
.salg_type = "hash",
.salg_name = "sha1",
};
int fd_base, fd_alg, ret;
ssize_t size;
fd_base = socket(AF_ALG, SOCK_SEQPACKET, 0);
assert(fd_base > 0);
ret = bind(fd_base, (struct sockaddr *)&alg, sizeof(alg));
assert(!ret);
fd_alg = accept(fd_base, NULL, 0);
assert(fd_alg > 0);
insns *= sizeof(struct bpf_insn);
size = write(fd_alg, prog, insns);
assert(size == insns);
size = read(fd_alg, tag, len);
assert(size == len);
close(fd_alg);
close(fd_base);
}
static void tag_dump(const char *prefix, uint8_t *tag, uint32_t len)
{
int i;
printf("%s", prefix);
for (i = 0; i < len; i++)
printf("%02x", tag[i]);
printf("\n");
}
static void tag_exit_report(int insns, int fd_map, uint8_t *ftag,
uint8_t *atag, uint32_t len)
{
printf("Program tag mismatch for %d insns%s!\n", insns,
fd_map < 0 ? "" : " with map");
tag_dump(" fdinfo result: ", ftag, len);
tag_dump(" af_alg result: ", atag, len);
exit(1);
}
static void do_test(uint32_t *tests, int start_insns, int fd_map,
void (*bpf_filler)(unsigned int insns, int fd))
{
int i, fd_prog;
for (i = start_insns; i <= BPF_MAXINSNS; i++) {
uint8_t ftag[8], atag[sizeof(ftag)];
fd_prog = bpf_try_load_prog(i, fd_map, bpf_filler);
tag_from_fdinfo(fd_prog, ftag, sizeof(ftag));
tag_from_alg(i, atag, sizeof(atag));
if (memcmp(ftag, atag, sizeof(ftag)))
tag_exit_report(i, fd_map, ftag, atag, sizeof(ftag));
close(fd_prog);
sched_yield();
(*tests)++;
}
}
int main(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
uint32_t tests = 0;
int i, fd_map;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
fd_map = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int),
sizeof(int), 1, &opts);
assert(fd_map > 0);
for (i = 0; i < 5; i++) {
do_test(&tests, 2, -1, bpf_gen_imm_prog);
do_test(&tests, 3, fd_map, bpf_gen_map_prog);
}
printf("test_tag: OK (%u tests)\n", tests);
close(fd_map);
return 0;
}
| linux-master | tools/testing/selftests/bpf/test_tag.c |
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* AF_XDP user-space access library.
*
* Copyright(c) 2018 - 2019 Intel Corporation.
*
* Author(s): Magnus Karlsson <[email protected]>
*/
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <asm/barrier.h>
#include <linux/compiler.h>
#include <linux/ethtool.h>
#include <linux/filter.h>
#include <linux/if_ether.h>
#include <linux/if_link.h>
#include <linux/if_packet.h>
#include <linux/if_xdp.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/sockios.h>
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "xsk.h"
#include "bpf_util.h"
#ifndef SOL_XDP
#define SOL_XDP 283
#endif
#ifndef AF_XDP
#define AF_XDP 44
#endif
#ifndef PF_XDP
#define PF_XDP AF_XDP
#endif
#define pr_warn(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__)
#define XSKMAP_SIZE 1
struct xsk_umem {
struct xsk_ring_prod *fill_save;
struct xsk_ring_cons *comp_save;
char *umem_area;
struct xsk_umem_config config;
int fd;
int refcount;
struct list_head ctx_list;
bool rx_ring_setup_done;
bool tx_ring_setup_done;
};
struct xsk_ctx {
struct xsk_ring_prod *fill;
struct xsk_ring_cons *comp;
__u32 queue_id;
struct xsk_umem *umem;
int refcount;
int ifindex;
struct list_head list;
};
struct xsk_socket {
struct xsk_ring_cons *rx;
struct xsk_ring_prod *tx;
struct xsk_ctx *ctx;
struct xsk_socket_config config;
int fd;
};
struct nl_mtu_req {
struct nlmsghdr nh;
struct ifinfomsg msg;
char buf[512];
};
int xsk_umem__fd(const struct xsk_umem *umem)
{
return umem ? umem->fd : -EINVAL;
}
int xsk_socket__fd(const struct xsk_socket *xsk)
{
return xsk ? xsk->fd : -EINVAL;
}
static bool xsk_page_aligned(void *buffer)
{
unsigned long addr = (unsigned long)buffer;
return !(addr & (getpagesize() - 1));
}
static void xsk_set_umem_config(struct xsk_umem_config *cfg,
const struct xsk_umem_config *usr_cfg)
{
if (!usr_cfg) {
cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
return;
}
cfg->fill_size = usr_cfg->fill_size;
cfg->comp_size = usr_cfg->comp_size;
cfg->frame_size = usr_cfg->frame_size;
cfg->frame_headroom = usr_cfg->frame_headroom;
cfg->flags = usr_cfg->flags;
}
static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
const struct xsk_socket_config *usr_cfg)
{
if (!usr_cfg) {
cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
cfg->bind_flags = 0;
return 0;
}
cfg->rx_size = usr_cfg->rx_size;
cfg->tx_size = usr_cfg->tx_size;
cfg->bind_flags = usr_cfg->bind_flags;
return 0;
}
static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
{
socklen_t optlen;
int err;
optlen = sizeof(*off);
err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen);
if (err)
return err;
if (optlen == sizeof(*off))
return 0;
return -EINVAL;
}
static int xsk_create_umem_rings(struct xsk_umem *umem, int fd,
struct xsk_ring_prod *fill,
struct xsk_ring_cons *comp)
{
struct xdp_mmap_offsets off;
void *map;
int err;
err = setsockopt(fd, SOL_XDP, XDP_UMEM_FILL_RING,
&umem->config.fill_size,
sizeof(umem->config.fill_size));
if (err)
return -errno;
err = setsockopt(fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
&umem->config.comp_size,
sizeof(umem->config.comp_size));
if (err)
return -errno;
err = xsk_get_mmap_offsets(fd, &off);
if (err)
return -errno;
map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
XDP_UMEM_PGOFF_FILL_RING);
if (map == MAP_FAILED)
return -errno;
fill->mask = umem->config.fill_size - 1;
fill->size = umem->config.fill_size;
fill->producer = map + off.fr.producer;
fill->consumer = map + off.fr.consumer;
fill->flags = map + off.fr.flags;
fill->ring = map + off.fr.desc;
fill->cached_cons = umem->config.fill_size;
map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
XDP_UMEM_PGOFF_COMPLETION_RING);
if (map == MAP_FAILED) {
err = -errno;
goto out_mmap;
}
comp->mask = umem->config.comp_size - 1;
comp->size = umem->config.comp_size;
comp->producer = map + off.cr.producer;
comp->consumer = map + off.cr.consumer;
comp->flags = map + off.cr.flags;
comp->ring = map + off.cr.desc;
return 0;
out_mmap:
munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
return err;
}
int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area,
__u64 size, struct xsk_ring_prod *fill,
struct xsk_ring_cons *comp,
const struct xsk_umem_config *usr_config)
{
struct xdp_umem_reg mr;
struct xsk_umem *umem;
int err;
if (!umem_area || !umem_ptr || !fill || !comp)
return -EFAULT;
if (!size && !xsk_page_aligned(umem_area))
return -EINVAL;
umem = calloc(1, sizeof(*umem));
if (!umem)
return -ENOMEM;
umem->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
if (umem->fd < 0) {
err = -errno;
goto out_umem_alloc;
}
umem->umem_area = umem_area;
INIT_LIST_HEAD(&umem->ctx_list);
xsk_set_umem_config(&umem->config, usr_config);
memset(&mr, 0, sizeof(mr));
mr.addr = (uintptr_t)umem_area;
mr.len = size;
mr.chunk_size = umem->config.frame_size;
mr.headroom = umem->config.frame_headroom;
mr.flags = umem->config.flags;
err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
if (err) {
err = -errno;
goto out_socket;
}
err = xsk_create_umem_rings(umem, umem->fd, fill, comp);
if (err)
goto out_socket;
umem->fill_save = fill;
umem->comp_save = comp;
*umem_ptr = umem;
return 0;
out_socket:
close(umem->fd);
out_umem_alloc:
free(umem);
return err;
}
bool xsk_is_in_mode(u32 ifindex, int mode)
{
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
int ret;
ret = bpf_xdp_query(ifindex, mode, &opts);
if (ret) {
printf("XDP mode query returned error %s\n", strerror(errno));
return false;
}
if (mode == XDP_FLAGS_DRV_MODE)
return opts.attach_mode == XDP_ATTACHED_DRV;
else if (mode == XDP_FLAGS_SKB_MODE)
return opts.attach_mode == XDP_ATTACHED_SKB;
return false;
}
/* Lifted from netlink.c in tools/lib/bpf */
static int netlink_recvmsg(int sock, struct msghdr *mhdr, int flags)
{
int len;
do {
len = recvmsg(sock, mhdr, flags);
} while (len < 0 && (errno == EINTR || errno == EAGAIN));
if (len < 0)
return -errno;
return len;
}
/* Lifted from netlink.c in tools/lib/bpf */
static int alloc_iov(struct iovec *iov, int len)
{
void *nbuf;
nbuf = realloc(iov->iov_base, len);
if (!nbuf)
return -ENOMEM;
iov->iov_base = nbuf;
iov->iov_len = len;
return 0;
}
/* Original version lifted from netlink.c in tools/lib/bpf */
static int netlink_recv(int sock)
{
struct iovec iov = {};
struct msghdr mhdr = {
.msg_iov = &iov,
.msg_iovlen = 1,
};
bool multipart = true;
struct nlmsgerr *err;
struct nlmsghdr *nh;
int len, ret;
ret = alloc_iov(&iov, 4096);
if (ret)
goto done;
while (multipart) {
multipart = false;
len = netlink_recvmsg(sock, &mhdr, MSG_PEEK | MSG_TRUNC);
if (len < 0) {
ret = len;
goto done;
}
if (len > iov.iov_len) {
ret = alloc_iov(&iov, len);
if (ret)
goto done;
}
len = netlink_recvmsg(sock, &mhdr, 0);
if (len < 0) {
ret = len;
goto done;
}
if (len == 0)
break;
for (nh = (struct nlmsghdr *)iov.iov_base; NLMSG_OK(nh, len);
nh = NLMSG_NEXT(nh, len)) {
if (nh->nlmsg_flags & NLM_F_MULTI)
multipart = true;
switch (nh->nlmsg_type) {
case NLMSG_ERROR:
err = (struct nlmsgerr *)NLMSG_DATA(nh);
if (!err->error)
continue;
ret = err->error;
goto done;
case NLMSG_DONE:
ret = 0;
goto done;
default:
break;
}
}
}
ret = 0;
done:
free(iov.iov_base);
return ret;
}
int xsk_set_mtu(int ifindex, int mtu)
{
struct nl_mtu_req req;
struct rtattr *rta;
int fd, ret;
fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE);
if (fd < 0)
return fd;
memset(&req, 0, sizeof(req));
req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
req.nh.nlmsg_type = RTM_NEWLINK;
req.msg.ifi_family = AF_UNSPEC;
req.msg.ifi_index = ifindex;
rta = (struct rtattr *)(((char *)&req) + NLMSG_ALIGN(req.nh.nlmsg_len));
rta->rta_type = IFLA_MTU;
rta->rta_len = RTA_LENGTH(sizeof(unsigned int));
req.nh.nlmsg_len = NLMSG_ALIGN(req.nh.nlmsg_len) + RTA_LENGTH(sizeof(mtu));
memcpy(RTA_DATA(rta), &mtu, sizeof(mtu));
ret = send(fd, &req, req.nh.nlmsg_len, 0);
if (ret < 0) {
close(fd);
return errno;
}
ret = netlink_recv(fd);
close(fd);
return ret;
}
int xsk_attach_xdp_program(struct bpf_program *prog, int ifindex, u32 xdp_flags)
{
int prog_fd;
prog_fd = bpf_program__fd(prog);
return bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL);
}
void xsk_detach_xdp_program(int ifindex, u32 xdp_flags)
{
bpf_xdp_detach(ifindex, xdp_flags, NULL);
}
void xsk_clear_xskmap(struct bpf_map *map)
{
u32 index = 0;
int map_fd;
map_fd = bpf_map__fd(map);
bpf_map_delete_elem(map_fd, &index);
}
int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk)
{
int map_fd, sock_fd;
u32 index = 0;
map_fd = bpf_map__fd(map);
sock_fd = xsk_socket__fd(xsk);
return bpf_map_update_elem(map_fd, &index, &sock_fd, 0);
}
static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
__u32 queue_id)
{
struct xsk_ctx *ctx;
if (list_empty(&umem->ctx_list))
return NULL;
list_for_each_entry(ctx, &umem->ctx_list, list) {
if (ctx->ifindex == ifindex && ctx->queue_id == queue_id) {
ctx->refcount++;
return ctx;
}
}
return NULL;
}
static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)
{
struct xsk_umem *umem = ctx->umem;
struct xdp_mmap_offsets off;
int err;
if (--ctx->refcount)
return;
if (!unmap)
goto out_free;
err = xsk_get_mmap_offsets(umem->fd, &off);
if (err)
goto out_free;
munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
sizeof(__u64));
munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
sizeof(__u64));
out_free:
list_del(&ctx->list);
free(ctx);
}
static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
struct xsk_umem *umem, int ifindex,
__u32 queue_id,
struct xsk_ring_prod *fill,
struct xsk_ring_cons *comp)
{
struct xsk_ctx *ctx;
int err;
ctx = calloc(1, sizeof(*ctx));
if (!ctx)
return NULL;
if (!umem->fill_save) {
err = xsk_create_umem_rings(umem, xsk->fd, fill, comp);
if (err) {
free(ctx);
return NULL;
}
} else if (umem->fill_save != fill || umem->comp_save != comp) {
/* Copy over rings to new structs. */
memcpy(fill, umem->fill_save, sizeof(*fill));
memcpy(comp, umem->comp_save, sizeof(*comp));
}
ctx->ifindex = ifindex;
ctx->refcount = 1;
ctx->umem = umem;
ctx->queue_id = queue_id;
ctx->fill = fill;
ctx->comp = comp;
list_add(&ctx->list, &umem->ctx_list);
return ctx;
}
int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
int ifindex,
__u32 queue_id, struct xsk_umem *umem,
struct xsk_ring_cons *rx,
struct xsk_ring_prod *tx,
struct xsk_ring_prod *fill,
struct xsk_ring_cons *comp,
const struct xsk_socket_config *usr_config)
{
bool unmap, rx_setup_done = false, tx_setup_done = false;
void *rx_map = NULL, *tx_map = NULL;
struct sockaddr_xdp sxdp = {};
struct xdp_mmap_offsets off;
struct xsk_socket *xsk;
struct xsk_ctx *ctx;
int err;
if (!umem || !xsk_ptr || !(rx || tx))
return -EFAULT;
unmap = umem->fill_save != fill;
xsk = calloc(1, sizeof(*xsk));
if (!xsk)
return -ENOMEM;
err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
if (err)
goto out_xsk_alloc;
if (umem->refcount++ > 0) {
xsk->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
if (xsk->fd < 0) {
err = -errno;
goto out_xsk_alloc;
}
} else {
xsk->fd = umem->fd;
rx_setup_done = umem->rx_ring_setup_done;
tx_setup_done = umem->tx_ring_setup_done;
}
ctx = xsk_get_ctx(umem, ifindex, queue_id);
if (!ctx) {
if (!fill || !comp) {
err = -EFAULT;
goto out_socket;
}
ctx = xsk_create_ctx(xsk, umem, ifindex, queue_id, fill, comp);
if (!ctx) {
err = -ENOMEM;
goto out_socket;
}
}
xsk->ctx = ctx;
if (rx && !rx_setup_done) {
err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
&xsk->config.rx_size,
sizeof(xsk->config.rx_size));
if (err) {
err = -errno;
goto out_put_ctx;
}
if (xsk->fd == umem->fd)
umem->rx_ring_setup_done = true;
}
if (tx && !tx_setup_done) {
err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
&xsk->config.tx_size,
sizeof(xsk->config.tx_size));
if (err) {
err = -errno;
goto out_put_ctx;
}
if (xsk->fd == umem->fd)
umem->tx_ring_setup_done = true;
}
err = xsk_get_mmap_offsets(xsk->fd, &off);
if (err) {
err = -errno;
goto out_put_ctx;
}
if (rx) {
rx_map = mmap(NULL, off.rx.desc +
xsk->config.rx_size * sizeof(struct xdp_desc),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
xsk->fd, XDP_PGOFF_RX_RING);
if (rx_map == MAP_FAILED) {
err = -errno;
goto out_put_ctx;
}
rx->mask = xsk->config.rx_size - 1;
rx->size = xsk->config.rx_size;
rx->producer = rx_map + off.rx.producer;
rx->consumer = rx_map + off.rx.consumer;
rx->flags = rx_map + off.rx.flags;
rx->ring = rx_map + off.rx.desc;
rx->cached_prod = *rx->producer;
rx->cached_cons = *rx->consumer;
}
xsk->rx = rx;
if (tx) {
tx_map = mmap(NULL, off.tx.desc +
xsk->config.tx_size * sizeof(struct xdp_desc),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
xsk->fd, XDP_PGOFF_TX_RING);
if (tx_map == MAP_FAILED) {
err = -errno;
goto out_mmap_rx;
}
tx->mask = xsk->config.tx_size - 1;
tx->size = xsk->config.tx_size;
tx->producer = tx_map + off.tx.producer;
tx->consumer = tx_map + off.tx.consumer;
tx->flags = tx_map + off.tx.flags;
tx->ring = tx_map + off.tx.desc;
tx->cached_prod = *tx->producer;
/* cached_cons is r->size bigger than the real consumer pointer
* See xsk_prod_nb_free
*/
tx->cached_cons = *tx->consumer + xsk->config.tx_size;
}
xsk->tx = tx;
sxdp.sxdp_family = PF_XDP;
sxdp.sxdp_ifindex = ctx->ifindex;
sxdp.sxdp_queue_id = ctx->queue_id;
if (umem->refcount > 1) {
sxdp.sxdp_flags |= XDP_SHARED_UMEM;
sxdp.sxdp_shared_umem_fd = umem->fd;
} else {
sxdp.sxdp_flags = xsk->config.bind_flags;
}
err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
if (err) {
err = -errno;
goto out_mmap_tx;
}
*xsk_ptr = xsk;
umem->fill_save = NULL;
umem->comp_save = NULL;
return 0;
out_mmap_tx:
if (tx)
munmap(tx_map, off.tx.desc +
xsk->config.tx_size * sizeof(struct xdp_desc));
out_mmap_rx:
if (rx)
munmap(rx_map, off.rx.desc +
xsk->config.rx_size * sizeof(struct xdp_desc));
out_put_ctx:
xsk_put_ctx(ctx, unmap);
out_socket:
if (--umem->refcount)
close(xsk->fd);
out_xsk_alloc:
free(xsk);
return err;
}
int xsk_socket__create(struct xsk_socket **xsk_ptr, int ifindex,
__u32 queue_id, struct xsk_umem *umem,
struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
const struct xsk_socket_config *usr_config)
{
if (!umem)
return -EFAULT;
return xsk_socket__create_shared(xsk_ptr, ifindex, queue_id, umem,
rx, tx, umem->fill_save,
umem->comp_save, usr_config);
}
int xsk_umem__delete(struct xsk_umem *umem)
{
struct xdp_mmap_offsets off;
int err;
if (!umem)
return 0;
if (umem->refcount)
return -EBUSY;
err = xsk_get_mmap_offsets(umem->fd, &off);
if (!err && umem->fill_save && umem->comp_save) {
munmap(umem->fill_save->ring - off.fr.desc,
off.fr.desc + umem->config.fill_size * sizeof(__u64));
munmap(umem->comp_save->ring - off.cr.desc,
off.cr.desc + umem->config.comp_size * sizeof(__u64));
}
close(umem->fd);
free(umem);
return 0;
}
void xsk_socket__delete(struct xsk_socket *xsk)
{
size_t desc_sz = sizeof(struct xdp_desc);
struct xdp_mmap_offsets off;
struct xsk_umem *umem;
struct xsk_ctx *ctx;
int err;
if (!xsk)
return;
ctx = xsk->ctx;
umem = ctx->umem;
xsk_put_ctx(ctx, true);
err = xsk_get_mmap_offsets(xsk->fd, &off);
if (!err) {
if (xsk->rx) {
munmap(xsk->rx->ring - off.rx.desc,
off.rx.desc + xsk->config.rx_size * desc_sz);
}
if (xsk->tx) {
munmap(xsk->tx->ring - off.tx.desc,
off.tx.desc + xsk->config.tx_size * desc_sz);
}
}
umem->refcount--;
/* Do not close an fd that also has an associated umem connected
* to it.
*/
if (xsk->fd != umem->fd)
close(xsk->fd);
free(xsk);
}
| linux-master | tools/testing/selftests/bpf/xsk.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018 Covalent IO, Inc. http://covalent.io
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <sys/select.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <stdbool.h>
#include <signal.h>
#include <fcntl.h>
#include <sys/wait.h>
#include <time.h>
#include <sched.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/sendfile.h>
#include <linux/netlink.h>
#include <linux/socket.h>
#include <linux/sock_diag.h>
#include <linux/bpf.h>
#include <linux/if_link.h>
#include <linux/tls.h>
#include <assert.h>
#include <libgen.h>
#include <getopt.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
#include "cgroup_helpers.h"
int running;
static void running_handler(int a);
#ifndef TCP_ULP
# define TCP_ULP 31
#endif
#ifndef SOL_TLS
# define SOL_TLS 282
#endif
/* randomly selected ports for testing on lo */
#define S1_PORT 10000
#define S2_PORT 10001
#define BPF_SOCKMAP_FILENAME "test_sockmap_kern.bpf.o"
#define BPF_SOCKHASH_FILENAME "test_sockhash_kern.bpf.o"
#define CG_PATH "/sockmap"
/* global sockets */
int s1, s2, c1, c2, p1, p2;
int test_cnt;
int passed;
int failed;
int map_fd[9];
struct bpf_map *maps[9];
int prog_fd[11];
int txmsg_pass;
int txmsg_redir;
int txmsg_drop;
int txmsg_apply;
int txmsg_cork;
int txmsg_start;
int txmsg_end;
int txmsg_start_push;
int txmsg_end_push;
int txmsg_start_pop;
int txmsg_pop;
int txmsg_ingress;
int txmsg_redir_skb;
int txmsg_ktls_skb;
int txmsg_ktls_skb_drop;
int txmsg_ktls_skb_redir;
int ktls;
int peek_flag;
int skb_use_parser;
int txmsg_omit_skb_parser;
static const struct option long_options[] = {
{"help", no_argument, NULL, 'h' },
{"cgroup", required_argument, NULL, 'c' },
{"rate", required_argument, NULL, 'r' },
{"verbose", optional_argument, NULL, 'v' },
{"iov_count", required_argument, NULL, 'i' },
{"length", required_argument, NULL, 'l' },
{"test", required_argument, NULL, 't' },
{"data_test", no_argument, NULL, 'd' },
{"txmsg", no_argument, &txmsg_pass, 1 },
{"txmsg_redir", no_argument, &txmsg_redir, 1 },
{"txmsg_drop", no_argument, &txmsg_drop, 1 },
{"txmsg_apply", required_argument, NULL, 'a'},
{"txmsg_cork", required_argument, NULL, 'k'},
{"txmsg_start", required_argument, NULL, 's'},
{"txmsg_end", required_argument, NULL, 'e'},
{"txmsg_start_push", required_argument, NULL, 'p'},
{"txmsg_end_push", required_argument, NULL, 'q'},
{"txmsg_start_pop", required_argument, NULL, 'w'},
{"txmsg_pop", required_argument, NULL, 'x'},
{"txmsg_ingress", no_argument, &txmsg_ingress, 1 },
{"txmsg_redir_skb", no_argument, &txmsg_redir_skb, 1 },
{"ktls", no_argument, &ktls, 1 },
{"peek", no_argument, &peek_flag, 1 },
{"txmsg_omit_skb_parser", no_argument, &txmsg_omit_skb_parser, 1},
{"whitelist", required_argument, NULL, 'n' },
{"blacklist", required_argument, NULL, 'b' },
{0, 0, NULL, 0 }
};
struct test_env {
const char *type;
const char *subtest;
const char *prepend;
int test_num;
int subtest_num;
int succ_cnt;
int fail_cnt;
int fail_last;
};
struct test_env env;
struct sockmap_options {
int verbose;
bool base;
bool sendpage;
bool data_test;
bool drop_expected;
bool check_recved_len;
bool tx_wait_mem;
int iov_count;
int iov_length;
int rate;
char *map;
char *whitelist;
char *blacklist;
char *prepend;
};
struct _test {
char *title;
void (*tester)(int cg_fd, struct sockmap_options *opt);
};
static void test_start(void)
{
env.subtest_num++;
}
static void test_fail(void)
{
env.fail_cnt++;
}
static void test_pass(void)
{
env.succ_cnt++;
}
static void test_reset(void)
{
txmsg_start = txmsg_end = 0;
txmsg_start_pop = txmsg_pop = 0;
txmsg_start_push = txmsg_end_push = 0;
txmsg_pass = txmsg_drop = txmsg_redir = 0;
txmsg_apply = txmsg_cork = 0;
txmsg_ingress = txmsg_redir_skb = 0;
txmsg_ktls_skb = txmsg_ktls_skb_drop = txmsg_ktls_skb_redir = 0;
txmsg_omit_skb_parser = 0;
skb_use_parser = 0;
}
static int test_start_subtest(const struct _test *t, struct sockmap_options *o)
{
env.type = o->map;
env.subtest = t->title;
env.prepend = o->prepend;
env.test_num++;
env.subtest_num = 0;
env.fail_last = env.fail_cnt;
test_reset();
return 0;
}
static void test_end_subtest(void)
{
int error = env.fail_cnt - env.fail_last;
int type = strcmp(env.type, BPF_SOCKMAP_FILENAME);
if (!error)
test_pass();
fprintf(stdout, "#%2d/%2d %8s:%s:%s:%s\n",
env.test_num, env.subtest_num,
!type ? "sockmap" : "sockhash",
env.prepend ? : "",
env.subtest, error ? "FAIL" : "OK");
}
static void test_print_results(void)
{
fprintf(stdout, "Pass: %d Fail: %d\n",
env.succ_cnt, env.fail_cnt);
}
static void usage(char *argv[])
{
int i;
printf(" Usage: %s --cgroup <cgroup_path>\n", argv[0]);
printf(" options:\n");
for (i = 0; long_options[i].name != 0; i++) {
printf(" --%-12s", long_options[i].name);
if (long_options[i].flag != NULL)
printf(" flag (internal value:%d)\n",
*long_options[i].flag);
else
printf(" -%c\n", long_options[i].val);
}
printf("\n");
}
char *sock_to_string(int s)
{
if (s == c1)
return "client1";
else if (s == c2)
return "client2";
else if (s == s1)
return "server1";
else if (s == s2)
return "server2";
else if (s == p1)
return "peer1";
else if (s == p2)
return "peer2";
else
return "unknown";
}
static int sockmap_init_ktls(int verbose, int s)
{
struct tls12_crypto_info_aes_gcm_128 tls_tx = {
.info = {
.version = TLS_1_2_VERSION,
.cipher_type = TLS_CIPHER_AES_GCM_128,
},
};
struct tls12_crypto_info_aes_gcm_128 tls_rx = {
.info = {
.version = TLS_1_2_VERSION,
.cipher_type = TLS_CIPHER_AES_GCM_128,
},
};
int so_buf = 6553500;
int err;
err = setsockopt(s, 6, TCP_ULP, "tls", sizeof("tls"));
if (err) {
fprintf(stderr, "setsockopt: TCP_ULP(%s) failed with error %i\n", sock_to_string(s), err);
return -EINVAL;
}
err = setsockopt(s, SOL_TLS, TLS_TX, (void *)&tls_tx, sizeof(tls_tx));
if (err) {
fprintf(stderr, "setsockopt: TLS_TX(%s) failed with error %i\n", sock_to_string(s), err);
return -EINVAL;
}
err = setsockopt(s, SOL_TLS, TLS_RX, (void *)&tls_rx, sizeof(tls_rx));
if (err) {
fprintf(stderr, "setsockopt: TLS_RX(%s) failed with error %i\n", sock_to_string(s), err);
return -EINVAL;
}
err = setsockopt(s, SOL_SOCKET, SO_SNDBUF, &so_buf, sizeof(so_buf));
if (err) {
fprintf(stderr, "setsockopt: (%s) failed sndbuf with error %i\n", sock_to_string(s), err);
return -EINVAL;
}
err = setsockopt(s, SOL_SOCKET, SO_RCVBUF, &so_buf, sizeof(so_buf));
if (err) {
fprintf(stderr, "setsockopt: (%s) failed rcvbuf with error %i\n", sock_to_string(s), err);
return -EINVAL;
}
if (verbose)
fprintf(stdout, "socket(%s) kTLS enabled\n", sock_to_string(s));
return 0;
}
static int sockmap_init_sockets(int verbose)
{
int i, err, one = 1;
struct sockaddr_in addr;
int *fds[4] = {&s1, &s2, &c1, &c2};
s1 = s2 = p1 = p2 = c1 = c2 = 0;
/* Init sockets */
for (i = 0; i < 4; i++) {
*fds[i] = socket(AF_INET, SOCK_STREAM, 0);
if (*fds[i] < 0) {
perror("socket s1 failed()");
return errno;
}
}
/* Allow reuse */
for (i = 0; i < 2; i++) {
err = setsockopt(*fds[i], SOL_SOCKET, SO_REUSEADDR,
(char *)&one, sizeof(one));
if (err) {
perror("setsockopt failed()");
return errno;
}
}
/* Non-blocking sockets */
for (i = 0; i < 2; i++) {
err = ioctl(*fds[i], FIONBIO, (char *)&one);
if (err < 0) {
perror("ioctl s1 failed()");
return errno;
}
}
/* Bind server sockets */
memset(&addr, 0, sizeof(struct sockaddr_in));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = inet_addr("127.0.0.1");
addr.sin_port = htons(S1_PORT);
err = bind(s1, (struct sockaddr *)&addr, sizeof(addr));
if (err < 0) {
perror("bind s1 failed()");
return errno;
}
addr.sin_port = htons(S2_PORT);
err = bind(s2, (struct sockaddr *)&addr, sizeof(addr));
if (err < 0) {
perror("bind s2 failed()");
return errno;
}
/* Listen server sockets */
addr.sin_port = htons(S1_PORT);
err = listen(s1, 32);
if (err < 0) {
perror("listen s1 failed()");
return errno;
}
addr.sin_port = htons(S2_PORT);
err = listen(s2, 32);
if (err < 0) {
perror("listen s1 failed()");
return errno;
}
/* Initiate Connect */
addr.sin_port = htons(S1_PORT);
err = connect(c1, (struct sockaddr *)&addr, sizeof(addr));
if (err < 0 && errno != EINPROGRESS) {
perror("connect c1 failed()");
return errno;
}
addr.sin_port = htons(S2_PORT);
err = connect(c2, (struct sockaddr *)&addr, sizeof(addr));
if (err < 0 && errno != EINPROGRESS) {
perror("connect c2 failed()");
return errno;
} else if (err < 0) {
err = 0;
}
/* Accept Connecrtions */
p1 = accept(s1, NULL, NULL);
if (p1 < 0) {
perror("accept s1 failed()");
return errno;
}
p2 = accept(s2, NULL, NULL);
if (p2 < 0) {
perror("accept s1 failed()");
return errno;
}
if (verbose > 1) {
printf("connected sockets: c1 <-> p1, c2 <-> p2\n");
printf("cgroups binding: c1(%i) <-> s1(%i) - - - c2(%i) <-> s2(%i)\n",
c1, s1, c2, s2);
}
return 0;
}
struct msg_stats {
size_t bytes_sent;
size_t bytes_recvd;
struct timespec start;
struct timespec end;
};
static int msg_loop_sendpage(int fd, int iov_length, int cnt,
struct msg_stats *s,
struct sockmap_options *opt)
{
bool drop = opt->drop_expected;
unsigned char k = 0;
FILE *file;
int i, fp;
file = tmpfile();
if (!file) {
perror("create file for sendpage");
return 1;
}
for (i = 0; i < iov_length * cnt; i++, k++)
fwrite(&k, sizeof(char), 1, file);
fflush(file);
fseek(file, 0, SEEK_SET);
fp = fileno(file);
clock_gettime(CLOCK_MONOTONIC, &s->start);
for (i = 0; i < cnt; i++) {
int sent;
errno = 0;
sent = sendfile(fd, fp, NULL, iov_length);
if (!drop && sent < 0) {
perror("sendpage loop error");
fclose(file);
return sent;
} else if (drop && sent >= 0) {
printf("sendpage loop error expected: %i errno %i\n",
sent, errno);
fclose(file);
return -EIO;
}
if (sent > 0)
s->bytes_sent += sent;
}
clock_gettime(CLOCK_MONOTONIC, &s->end);
fclose(file);
return 0;
}
static void msg_free_iov(struct msghdr *msg)
{
int i;
for (i = 0; i < msg->msg_iovlen; i++)
free(msg->msg_iov[i].iov_base);
free(msg->msg_iov);
msg->msg_iov = NULL;
msg->msg_iovlen = 0;
}
static int msg_alloc_iov(struct msghdr *msg,
int iov_count, int iov_length,
bool data, bool xmit)
{
unsigned char k = 0;
struct iovec *iov;
int i;
iov = calloc(iov_count, sizeof(struct iovec));
if (!iov)
return errno;
for (i = 0; i < iov_count; i++) {
unsigned char *d = calloc(iov_length, sizeof(char));
if (!d) {
fprintf(stderr, "iov_count %i/%i OOM\n", i, iov_count);
goto unwind_iov;
}
iov[i].iov_base = d;
iov[i].iov_len = iov_length;
if (data && xmit) {
int j;
for (j = 0; j < iov_length; j++)
d[j] = k++;
}
}
msg->msg_iov = iov;
msg->msg_iovlen = iov_count;
return 0;
unwind_iov:
for (i--; i >= 0 ; i--)
free(msg->msg_iov[i].iov_base);
return -ENOMEM;
}
static int msg_verify_data(struct msghdr *msg, int size, int chunk_sz)
{
int i, j = 0, bytes_cnt = 0;
unsigned char k = 0;
for (i = 0; i < msg->msg_iovlen; i++) {
unsigned char *d = msg->msg_iov[i].iov_base;
/* Special case test for skb ingress + ktls */
if (i == 0 && txmsg_ktls_skb) {
if (msg->msg_iov[i].iov_len < 4)
return -EIO;
if (memcmp(d, "PASS", 4) != 0) {
fprintf(stderr,
"detected skb data error with skb ingress update @iov[%i]:%i \"%02x %02x %02x %02x\" != \"PASS\"\n",
i, 0, d[0], d[1], d[2], d[3]);
return -EIO;
}
j = 4; /* advance index past PASS header */
}
for (; j < msg->msg_iov[i].iov_len && size; j++) {
if (d[j] != k++) {
fprintf(stderr,
"detected data corruption @iov[%i]:%i %02x != %02x, %02x ?= %02x\n",
i, j, d[j], k - 1, d[j+1], k);
return -EIO;
}
bytes_cnt++;
if (bytes_cnt == chunk_sz) {
k = 0;
bytes_cnt = 0;
}
size--;
}
}
return 0;
}
static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
struct msg_stats *s, bool tx,
struct sockmap_options *opt)
{
struct msghdr msg = {0}, msg_peek = {0};
int err, i, flags = MSG_NOSIGNAL;
bool drop = opt->drop_expected;
bool data = opt->data_test;
int iov_alloc_length = iov_length;
if (!tx && opt->check_recved_len)
iov_alloc_length *= 2;
err = msg_alloc_iov(&msg, iov_count, iov_alloc_length, data, tx);
if (err)
goto out_errno;
if (peek_flag) {
err = msg_alloc_iov(&msg_peek, iov_count, iov_length, data, tx);
if (err)
goto out_errno;
}
if (tx) {
clock_gettime(CLOCK_MONOTONIC, &s->start);
for (i = 0; i < cnt; i++) {
int sent;
errno = 0;
sent = sendmsg(fd, &msg, flags);
if (!drop && sent < 0) {
if (opt->tx_wait_mem && errno == EACCES) {
errno = 0;
goto out_errno;
}
perror("sendmsg loop error");
goto out_errno;
} else if (drop && sent >= 0) {
fprintf(stderr,
"sendmsg loop error expected: %i errno %i\n",
sent, errno);
errno = -EIO;
goto out_errno;
}
if (sent > 0)
s->bytes_sent += sent;
}
clock_gettime(CLOCK_MONOTONIC, &s->end);
} else {
int slct, recvp = 0, recv, max_fd = fd;
float total_bytes, txmsg_pop_total;
int fd_flags = O_NONBLOCK;
struct timeval timeout;
fd_set w;
fcntl(fd, fd_flags);
/* Account for pop bytes noting each iteration of apply will
* call msg_pop_data helper so we need to account for this
* by calculating the number of apply iterations. Note user
* of the tool can create cases where no data is sent by
* manipulating pop/push/pull/etc. For example txmsg_apply 1
* with txmsg_pop 1 will try to apply 1B at a time but each
* iteration will then pop 1B so no data will ever be sent.
* This is really only useful for testing edge cases in code
* paths.
*/
total_bytes = (float)iov_count * (float)iov_length * (float)cnt;
if (txmsg_apply)
txmsg_pop_total = txmsg_pop * (total_bytes / txmsg_apply);
else
txmsg_pop_total = txmsg_pop * cnt;
total_bytes -= txmsg_pop_total;
err = clock_gettime(CLOCK_MONOTONIC, &s->start);
if (err < 0)
perror("recv start time");
while (s->bytes_recvd < total_bytes) {
if (txmsg_cork) {
timeout.tv_sec = 0;
timeout.tv_usec = 300000;
} else {
timeout.tv_sec = 3;
timeout.tv_usec = 0;
}
/* FD sets */
FD_ZERO(&w);
FD_SET(fd, &w);
slct = select(max_fd + 1, &w, NULL, NULL, &timeout);
if (slct == -1) {
perror("select()");
clock_gettime(CLOCK_MONOTONIC, &s->end);
goto out_errno;
} else if (!slct) {
if (opt->verbose)
fprintf(stderr, "unexpected timeout: recved %zu/%f pop_total %f\n", s->bytes_recvd, total_bytes, txmsg_pop_total);
errno = -EIO;
clock_gettime(CLOCK_MONOTONIC, &s->end);
goto out_errno;
}
if (opt->tx_wait_mem) {
FD_ZERO(&w);
FD_SET(fd, &w);
slct = select(max_fd + 1, NULL, NULL, &w, &timeout);
errno = 0;
close(fd);
goto out_errno;
}
errno = 0;
if (peek_flag) {
flags |= MSG_PEEK;
recvp = recvmsg(fd, &msg_peek, flags);
if (recvp < 0) {
if (errno != EWOULDBLOCK) {
clock_gettime(CLOCK_MONOTONIC, &s->end);
goto out_errno;
}
}
flags = 0;
}
recv = recvmsg(fd, &msg, flags);
if (recv < 0) {
if (errno != EWOULDBLOCK) {
clock_gettime(CLOCK_MONOTONIC, &s->end);
perror("recv failed()");
goto out_errno;
}
}
s->bytes_recvd += recv;
if (opt->check_recved_len && s->bytes_recvd > total_bytes) {
errno = EMSGSIZE;
fprintf(stderr, "recv failed(), bytes_recvd:%zd, total_bytes:%f\n",
s->bytes_recvd, total_bytes);
goto out_errno;
}
if (data) {
int chunk_sz = opt->sendpage ?
iov_length * cnt :
iov_length * iov_count;
errno = msg_verify_data(&msg, recv, chunk_sz);
if (errno) {
perror("data verify msg failed");
goto out_errno;
}
if (recvp) {
errno = msg_verify_data(&msg_peek,
recvp,
chunk_sz);
if (errno) {
perror("data verify msg_peek failed");
goto out_errno;
}
}
}
}
clock_gettime(CLOCK_MONOTONIC, &s->end);
}
msg_free_iov(&msg);
msg_free_iov(&msg_peek);
return err;
out_errno:
msg_free_iov(&msg);
msg_free_iov(&msg_peek);
return errno;
}
static float giga = 1000000000;
static inline float sentBps(struct msg_stats s)
{
return s.bytes_sent / (s.end.tv_sec - s.start.tv_sec);
}
static inline float recvdBps(struct msg_stats s)
{
return s.bytes_recvd / (s.end.tv_sec - s.start.tv_sec);
}
static int sendmsg_test(struct sockmap_options *opt)
{
float sent_Bps = 0, recvd_Bps = 0;
int rx_fd, txpid, rxpid, err = 0;
struct msg_stats s = {0};
int iov_count = opt->iov_count;
int iov_buf = opt->iov_length;
int rx_status, tx_status;
int cnt = opt->rate;
errno = 0;
if (opt->base)
rx_fd = p1;
else
rx_fd = p2;
if (ktls) {
/* Redirecting into non-TLS socket which sends into a TLS
* socket is not a valid test. So in this case lets not
* enable kTLS but still run the test.
*/
if (!txmsg_redir || txmsg_ingress) {
err = sockmap_init_ktls(opt->verbose, rx_fd);
if (err)
return err;
}
err = sockmap_init_ktls(opt->verbose, c1);
if (err)
return err;
}
if (opt->tx_wait_mem) {
struct timeval timeout;
int rxtx_buf_len = 1024;
timeout.tv_sec = 3;
timeout.tv_usec = 0;
err = setsockopt(c2, SOL_SOCKET, SO_SNDTIMEO, &timeout, sizeof(struct timeval));
err |= setsockopt(c2, SOL_SOCKET, SO_SNDBUFFORCE, &rxtx_buf_len, sizeof(int));
err |= setsockopt(p2, SOL_SOCKET, SO_RCVBUFFORCE, &rxtx_buf_len, sizeof(int));
if (err) {
perror("setsockopt failed()");
return errno;
}
}
rxpid = fork();
if (rxpid == 0) {
if (txmsg_pop || txmsg_start_pop)
iov_buf -= (txmsg_pop - txmsg_start_pop + 1);
if (opt->drop_expected || txmsg_ktls_skb_drop)
_exit(0);
if (!iov_buf) /* zero bytes sent case */
_exit(0);
if (opt->sendpage)
iov_count = 1;
err = msg_loop(rx_fd, iov_count, iov_buf,
cnt, &s, false, opt);
if (opt->verbose > 1)
fprintf(stderr,
"msg_loop_rx: iov_count %i iov_buf %i cnt %i err %i\n",
iov_count, iov_buf, cnt, err);
if (s.end.tv_sec - s.start.tv_sec) {
sent_Bps = sentBps(s);
recvd_Bps = recvdBps(s);
}
if (opt->verbose > 1)
fprintf(stdout,
"rx_sendmsg: TX: %zuB %fB/s %fGB/s RX: %zuB %fB/s %fGB/s %s\n",
s.bytes_sent, sent_Bps, sent_Bps/giga,
s.bytes_recvd, recvd_Bps, recvd_Bps/giga,
peek_flag ? "(peek_msg)" : "");
if (err && txmsg_cork)
err = 0;
exit(err ? 1 : 0);
} else if (rxpid == -1) {
perror("msg_loop_rx");
return errno;
}
if (opt->tx_wait_mem)
close(c2);
txpid = fork();
if (txpid == 0) {
if (opt->sendpage)
err = msg_loop_sendpage(c1, iov_buf, cnt, &s, opt);
else
err = msg_loop(c1, iov_count, iov_buf,
cnt, &s, true, opt);
if (err)
fprintf(stderr,
"msg_loop_tx: iov_count %i iov_buf %i cnt %i err %i\n",
iov_count, iov_buf, cnt, err);
if (s.end.tv_sec - s.start.tv_sec) {
sent_Bps = sentBps(s);
recvd_Bps = recvdBps(s);
}
if (opt->verbose > 1)
fprintf(stdout,
"tx_sendmsg: TX: %zuB %fB/s %f GB/s RX: %zuB %fB/s %fGB/s\n",
s.bytes_sent, sent_Bps, sent_Bps/giga,
s.bytes_recvd, recvd_Bps, recvd_Bps/giga);
exit(err ? 1 : 0);
} else if (txpid == -1) {
perror("msg_loop_tx");
return errno;
}
assert(waitpid(rxpid, &rx_status, 0) == rxpid);
assert(waitpid(txpid, &tx_status, 0) == txpid);
if (WIFEXITED(rx_status)) {
err = WEXITSTATUS(rx_status);
if (err) {
fprintf(stderr, "rx thread exited with err %d.\n", err);
goto out;
}
}
if (WIFEXITED(tx_status)) {
err = WEXITSTATUS(tx_status);
if (err)
fprintf(stderr, "tx thread exited with err %d.\n", err);
}
out:
return err;
}
static int forever_ping_pong(int rate, struct sockmap_options *opt)
{
struct timeval timeout;
char buf[1024] = {0};
int sc;
timeout.tv_sec = 10;
timeout.tv_usec = 0;
/* Ping/Pong data from client to server */
sc = send(c1, buf, sizeof(buf), 0);
if (sc < 0) {
perror("send failed()");
return sc;
}
do {
int s, rc, i, max_fd = p2;
fd_set w;
/* FD sets */
FD_ZERO(&w);
FD_SET(c1, &w);
FD_SET(c2, &w);
FD_SET(p1, &w);
FD_SET(p2, &w);
s = select(max_fd + 1, &w, NULL, NULL, &timeout);
if (s == -1) {
perror("select()");
break;
} else if (!s) {
fprintf(stderr, "unexpected timeout\n");
break;
}
for (i = 0; i <= max_fd && s > 0; ++i) {
if (!FD_ISSET(i, &w))
continue;
s--;
rc = recv(i, buf, sizeof(buf), 0);
if (rc < 0) {
if (errno != EWOULDBLOCK) {
perror("recv failed()");
return rc;
}
}
if (rc == 0) {
close(i);
break;
}
sc = send(i, buf, rc, 0);
if (sc < 0) {
perror("send failed()");
return sc;
}
}
if (rate)
sleep(rate);
if (opt->verbose) {
printf(".");
fflush(stdout);
}
} while (running);
return 0;
}
enum {
SELFTESTS,
PING_PONG,
SENDMSG,
BASE,
BASE_SENDPAGE,
SENDPAGE,
};
static int run_options(struct sockmap_options *options, int cg_fd, int test)
{
int i, key, next_key, err, tx_prog_fd = -1, zero = 0;
/* If base test skip BPF setup */
if (test == BASE || test == BASE_SENDPAGE)
goto run;
/* Attach programs to sockmap */
if (!txmsg_omit_skb_parser) {
err = bpf_prog_attach(prog_fd[0], map_fd[0],
BPF_SK_SKB_STREAM_PARSER, 0);
if (err) {
fprintf(stderr,
"ERROR: bpf_prog_attach (sockmap %i->%i): %d (%s)\n",
prog_fd[0], map_fd[0], err, strerror(errno));
return err;
}
}
err = bpf_prog_attach(prog_fd[1], map_fd[0],
BPF_SK_SKB_STREAM_VERDICT, 0);
if (err) {
fprintf(stderr, "ERROR: bpf_prog_attach (sockmap): %d (%s)\n",
err, strerror(errno));
return err;
}
/* Attach programs to TLS sockmap */
if (txmsg_ktls_skb) {
if (!txmsg_omit_skb_parser) {
err = bpf_prog_attach(prog_fd[0], map_fd[8],
BPF_SK_SKB_STREAM_PARSER, 0);
if (err) {
fprintf(stderr,
"ERROR: bpf_prog_attach (TLS sockmap %i->%i): %d (%s)\n",
prog_fd[0], map_fd[8], err, strerror(errno));
return err;
}
}
err = bpf_prog_attach(prog_fd[2], map_fd[8],
BPF_SK_SKB_STREAM_VERDICT, 0);
if (err) {
fprintf(stderr, "ERROR: bpf_prog_attach (TLS sockmap): %d (%s)\n",
err, strerror(errno));
return err;
}
}
/* Attach to cgroups */
err = bpf_prog_attach(prog_fd[3], cg_fd, BPF_CGROUP_SOCK_OPS, 0);
if (err) {
fprintf(stderr, "ERROR: bpf_prog_attach (groups): %d (%s)\n",
err, strerror(errno));
return err;
}
run:
err = sockmap_init_sockets(options->verbose);
if (err) {
fprintf(stderr, "ERROR: test socket failed: %d\n", err);
goto out;
}
/* Attach txmsg program to sockmap */
if (txmsg_pass)
tx_prog_fd = prog_fd[4];
else if (txmsg_redir)
tx_prog_fd = prog_fd[5];
else if (txmsg_apply)
tx_prog_fd = prog_fd[6];
else if (txmsg_cork)
tx_prog_fd = prog_fd[7];
else if (txmsg_drop)
tx_prog_fd = prog_fd[8];
else
tx_prog_fd = 0;
if (tx_prog_fd) {
int redir_fd, i = 0;
err = bpf_prog_attach(tx_prog_fd,
map_fd[1], BPF_SK_MSG_VERDICT, 0);
if (err) {
fprintf(stderr,
"ERROR: bpf_prog_attach (txmsg): %d (%s)\n",
err, strerror(errno));
goto out;
}
err = bpf_map_update_elem(map_fd[1], &i, &c1, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (txmsg): %d (%s\n",
err, strerror(errno));
goto out;
}
if (txmsg_redir)
redir_fd = c2;
else
redir_fd = c1;
err = bpf_map_update_elem(map_fd[2], &i, &redir_fd, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (txmsg): %d (%s\n",
err, strerror(errno));
goto out;
}
if (txmsg_apply) {
err = bpf_map_update_elem(map_fd[3],
&i, &txmsg_apply, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (apply_bytes): %d (%s\n",
err, strerror(errno));
goto out;
}
}
if (txmsg_cork) {
err = bpf_map_update_elem(map_fd[4],
&i, &txmsg_cork, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (cork_bytes): %d (%s\n",
err, strerror(errno));
goto out;
}
}
if (txmsg_start) {
err = bpf_map_update_elem(map_fd[5],
&i, &txmsg_start, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (txmsg_start): %d (%s)\n",
err, strerror(errno));
goto out;
}
}
if (txmsg_end) {
i = 1;
err = bpf_map_update_elem(map_fd[5],
&i, &txmsg_end, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (txmsg_end): %d (%s)\n",
err, strerror(errno));
goto out;
}
}
if (txmsg_start_push) {
i = 2;
err = bpf_map_update_elem(map_fd[5],
&i, &txmsg_start_push, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (txmsg_start_push): %d (%s)\n",
err, strerror(errno));
goto out;
}
}
if (txmsg_end_push) {
i = 3;
err = bpf_map_update_elem(map_fd[5],
&i, &txmsg_end_push, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem %i@%i (txmsg_end_push): %d (%s)\n",
txmsg_end_push, i, err, strerror(errno));
goto out;
}
}
if (txmsg_start_pop) {
i = 4;
err = bpf_map_update_elem(map_fd[5],
&i, &txmsg_start_pop, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem %i@%i (txmsg_start_pop): %d (%s)\n",
txmsg_start_pop, i, err, strerror(errno));
goto out;
}
} else {
i = 4;
bpf_map_update_elem(map_fd[5],
&i, &txmsg_start_pop, BPF_ANY);
}
if (txmsg_pop) {
i = 5;
err = bpf_map_update_elem(map_fd[5],
&i, &txmsg_pop, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem %i@%i (txmsg_pop): %d (%s)\n",
txmsg_pop, i, err, strerror(errno));
goto out;
}
} else {
i = 5;
bpf_map_update_elem(map_fd[5],
&i, &txmsg_pop, BPF_ANY);
}
if (txmsg_ingress) {
int in = BPF_F_INGRESS;
i = 0;
err = bpf_map_update_elem(map_fd[6], &i, &in, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (txmsg_ingress): %d (%s)\n",
err, strerror(errno));
}
i = 1;
err = bpf_map_update_elem(map_fd[1], &i, &p1, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (p1 txmsg): %d (%s)\n",
err, strerror(errno));
}
err = bpf_map_update_elem(map_fd[2], &i, &p1, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (p1 redir): %d (%s)\n",
err, strerror(errno));
}
i = 2;
err = bpf_map_update_elem(map_fd[2], &i, &p2, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (p2 txmsg): %d (%s)\n",
err, strerror(errno));
}
}
if (txmsg_ktls_skb) {
int ingress = BPF_F_INGRESS;
i = 0;
err = bpf_map_update_elem(map_fd[8], &i, &p2, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (c1 sockmap): %d (%s)\n",
err, strerror(errno));
}
if (txmsg_ktls_skb_redir) {
i = 1;
err = bpf_map_update_elem(map_fd[7],
&i, &ingress, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (txmsg_ingress): %d (%s)\n",
err, strerror(errno));
}
}
if (txmsg_ktls_skb_drop) {
i = 1;
err = bpf_map_update_elem(map_fd[7], &i, &i, BPF_ANY);
}
}
if (txmsg_redir_skb) {
int skb_fd = (test == SENDMSG || test == SENDPAGE) ?
p2 : p1;
int ingress = BPF_F_INGRESS;
i = 0;
err = bpf_map_update_elem(map_fd[7],
&i, &ingress, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (txmsg_ingress): %d (%s)\n",
err, strerror(errno));
}
i = 3;
err = bpf_map_update_elem(map_fd[0], &i, &skb_fd, BPF_ANY);
if (err) {
fprintf(stderr,
"ERROR: bpf_map_update_elem (c1 sockmap): %d (%s)\n",
err, strerror(errno));
}
}
}
if (skb_use_parser) {
i = 2;
err = bpf_map_update_elem(map_fd[7], &i, &skb_use_parser, BPF_ANY);
}
if (txmsg_drop)
options->drop_expected = true;
if (test == PING_PONG)
err = forever_ping_pong(options->rate, options);
else if (test == SENDMSG) {
options->base = false;
options->sendpage = false;
err = sendmsg_test(options);
} else if (test == SENDPAGE) {
options->base = false;
options->sendpage = true;
err = sendmsg_test(options);
} else if (test == BASE) {
options->base = true;
options->sendpage = false;
err = sendmsg_test(options);
} else if (test == BASE_SENDPAGE) {
options->base = true;
options->sendpage = true;
err = sendmsg_test(options);
} else
fprintf(stderr, "unknown test\n");
out:
/* Detatch and zero all the maps */
bpf_prog_detach2(prog_fd[3], cg_fd, BPF_CGROUP_SOCK_OPS);
bpf_prog_detach2(prog_fd[0], map_fd[0], BPF_SK_SKB_STREAM_PARSER);
bpf_prog_detach2(prog_fd[1], map_fd[0], BPF_SK_SKB_STREAM_VERDICT);
bpf_prog_detach2(prog_fd[0], map_fd[8], BPF_SK_SKB_STREAM_PARSER);
bpf_prog_detach2(prog_fd[2], map_fd[8], BPF_SK_SKB_STREAM_VERDICT);
if (tx_prog_fd >= 0)
bpf_prog_detach2(tx_prog_fd, map_fd[1], BPF_SK_MSG_VERDICT);
for (i = 0; i < 8; i++) {
key = next_key = 0;
bpf_map_update_elem(map_fd[i], &key, &zero, BPF_ANY);
while (bpf_map_get_next_key(map_fd[i], &key, &next_key) == 0) {
bpf_map_update_elem(map_fd[i], &key, &zero, BPF_ANY);
key = next_key;
}
}
close(s1);
close(s2);
close(p1);
close(p2);
close(c1);
close(c2);
return err;
}
static char *test_to_str(int test)
{
switch (test) {
case SENDMSG:
return "sendmsg";
case SENDPAGE:
return "sendpage";
}
return "unknown";
}
static void append_str(char *dst, const char *src, size_t dst_cap)
{
size_t avail = dst_cap - strlen(dst);
if (avail <= 1) /* just zero byte could be written */
return;
strncat(dst, src, avail - 1); /* strncat() adds + 1 for zero byte */
}
#define OPTSTRING 60
static void test_options(char *options)
{
char tstr[OPTSTRING];
memset(options, 0, OPTSTRING);
if (txmsg_pass)
append_str(options, "pass,", OPTSTRING);
if (txmsg_redir)
append_str(options, "redir,", OPTSTRING);
if (txmsg_drop)
append_str(options, "drop,", OPTSTRING);
if (txmsg_apply) {
snprintf(tstr, OPTSTRING, "apply %d,", txmsg_apply);
append_str(options, tstr, OPTSTRING);
}
if (txmsg_cork) {
snprintf(tstr, OPTSTRING, "cork %d,", txmsg_cork);
append_str(options, tstr, OPTSTRING);
}
if (txmsg_start) {
snprintf(tstr, OPTSTRING, "start %d,", txmsg_start);
append_str(options, tstr, OPTSTRING);
}
if (txmsg_end) {
snprintf(tstr, OPTSTRING, "end %d,", txmsg_end);
append_str(options, tstr, OPTSTRING);
}
if (txmsg_start_pop) {
snprintf(tstr, OPTSTRING, "pop (%d,%d),",
txmsg_start_pop, txmsg_start_pop + txmsg_pop);
append_str(options, tstr, OPTSTRING);
}
if (txmsg_ingress)
append_str(options, "ingress,", OPTSTRING);
if (txmsg_redir_skb)
append_str(options, "redir_skb,", OPTSTRING);
if (txmsg_ktls_skb)
append_str(options, "ktls_skb,", OPTSTRING);
if (ktls)
append_str(options, "ktls,", OPTSTRING);
if (peek_flag)
append_str(options, "peek,", OPTSTRING);
}
static int __test_exec(int cgrp, int test, struct sockmap_options *opt)
{
char *options = calloc(OPTSTRING, sizeof(char));
int err;
if (test == SENDPAGE)
opt->sendpage = true;
else
opt->sendpage = false;
if (txmsg_drop)
opt->drop_expected = true;
else
opt->drop_expected = false;
test_options(options);
if (opt->verbose) {
fprintf(stdout,
" [TEST %i]: (%i, %i, %i, %s, %s): ",
test_cnt, opt->rate, opt->iov_count, opt->iov_length,
test_to_str(test), options);
fflush(stdout);
}
err = run_options(opt, cgrp, test);
if (opt->verbose)
fprintf(stdout, " %s\n", !err ? "PASS" : "FAILED");
test_cnt++;
!err ? passed++ : failed++;
free(options);
return err;
}
static void test_exec(int cgrp, struct sockmap_options *opt)
{
int type = strcmp(opt->map, BPF_SOCKMAP_FILENAME);
int err;
if (type == 0) {
test_start();
err = __test_exec(cgrp, SENDMSG, opt);
if (err)
test_fail();
} else {
test_start();
err = __test_exec(cgrp, SENDPAGE, opt);
if (err)
test_fail();
}
}
static void test_send_one(struct sockmap_options *opt, int cgrp)
{
opt->iov_length = 1;
opt->iov_count = 1;
opt->rate = 1;
test_exec(cgrp, opt);
opt->iov_length = 1;
opt->iov_count = 1024;
opt->rate = 1;
test_exec(cgrp, opt);
opt->iov_length = 1024;
opt->iov_count = 1;
opt->rate = 1;
test_exec(cgrp, opt);
}
static void test_send_many(struct sockmap_options *opt, int cgrp)
{
opt->iov_length = 3;
opt->iov_count = 1;
opt->rate = 512;
test_exec(cgrp, opt);
opt->rate = 100;
opt->iov_count = 1;
opt->iov_length = 5;
test_exec(cgrp, opt);
}
static void test_send_large(struct sockmap_options *opt, int cgrp)
{
opt->iov_length = 256;
opt->iov_count = 1024;
opt->rate = 2;
test_exec(cgrp, opt);
}
static void test_send(struct sockmap_options *opt, int cgrp)
{
test_send_one(opt, cgrp);
test_send_many(opt, cgrp);
test_send_large(opt, cgrp);
sched_yield();
}
static void test_txmsg_pass(int cgrp, struct sockmap_options *opt)
{
/* Test small and large iov_count values with pass/redir/apply/cork */
txmsg_pass = 1;
test_send(opt, cgrp);
}
static void test_txmsg_redir(int cgrp, struct sockmap_options *opt)
{
txmsg_redir = 1;
test_send(opt, cgrp);
}
static void test_txmsg_redir_wait_sndmem(int cgrp, struct sockmap_options *opt)
{
txmsg_redir = 1;
opt->tx_wait_mem = true;
test_send_large(opt, cgrp);
opt->tx_wait_mem = false;
}
static void test_txmsg_drop(int cgrp, struct sockmap_options *opt)
{
txmsg_drop = 1;
test_send(opt, cgrp);
}
static void test_txmsg_ingress_redir(int cgrp, struct sockmap_options *opt)
{
txmsg_pass = txmsg_drop = 0;
txmsg_ingress = txmsg_redir = 1;
test_send(opt, cgrp);
}
static void test_txmsg_skb(int cgrp, struct sockmap_options *opt)
{
bool data = opt->data_test;
int k = ktls;
opt->data_test = true;
ktls = 1;
txmsg_pass = txmsg_drop = 0;
txmsg_ingress = txmsg_redir = 0;
txmsg_ktls_skb = 1;
txmsg_pass = 1;
/* Using data verification so ensure iov layout is
* expected from test receiver side. e.g. has enough
* bytes to write test code.
*/
opt->iov_length = 100;
opt->iov_count = 1;
opt->rate = 1;
test_exec(cgrp, opt);
txmsg_ktls_skb_drop = 1;
test_exec(cgrp, opt);
txmsg_ktls_skb_drop = 0;
txmsg_ktls_skb_redir = 1;
test_exec(cgrp, opt);
txmsg_ktls_skb_redir = 0;
/* Tests that omit skb_parser */
txmsg_omit_skb_parser = 1;
ktls = 0;
txmsg_ktls_skb = 0;
test_exec(cgrp, opt);
txmsg_ktls_skb_drop = 1;
test_exec(cgrp, opt);
txmsg_ktls_skb_drop = 0;
txmsg_ktls_skb_redir = 1;
test_exec(cgrp, opt);
ktls = 1;
test_exec(cgrp, opt);
txmsg_omit_skb_parser = 0;
opt->data_test = data;
ktls = k;
}
/* Test cork with hung data. This tests poor usage patterns where
* cork can leave data on the ring if user program is buggy and
* doesn't flush them somehow. They do take some time however
* because they wait for a timeout. Test pass, redir and cork with
* apply logic. Use cork size of 4097 with send_large to avoid
* aligning cork size with send size.
*/
static void test_txmsg_cork_hangs(int cgrp, struct sockmap_options *opt)
{
txmsg_pass = 1;
txmsg_redir = 0;
txmsg_cork = 4097;
txmsg_apply = 4097;
test_send_large(opt, cgrp);
txmsg_pass = 0;
txmsg_redir = 1;
txmsg_apply = 0;
txmsg_cork = 4097;
test_send_large(opt, cgrp);
txmsg_pass = 0;
txmsg_redir = 1;
txmsg_apply = 4097;
txmsg_cork = 4097;
test_send_large(opt, cgrp);
}
static void test_txmsg_pull(int cgrp, struct sockmap_options *opt)
{
/* Test basic start/end */
txmsg_start = 1;
txmsg_end = 2;
test_send(opt, cgrp);
/* Test >4k pull */
txmsg_start = 4096;
txmsg_end = 9182;
test_send_large(opt, cgrp);
/* Test pull + redirect */
txmsg_redir = 0;
txmsg_start = 1;
txmsg_end = 2;
test_send(opt, cgrp);
/* Test pull + cork */
txmsg_redir = 0;
txmsg_cork = 512;
txmsg_start = 1;
txmsg_end = 2;
test_send_many(opt, cgrp);
/* Test pull + cork + redirect */
txmsg_redir = 1;
txmsg_cork = 512;
txmsg_start = 1;
txmsg_end = 2;
test_send_many(opt, cgrp);
}
static void test_txmsg_pop(int cgrp, struct sockmap_options *opt)
{
/* Test basic pop */
txmsg_start_pop = 1;
txmsg_pop = 2;
test_send_many(opt, cgrp);
/* Test pop with >4k */
txmsg_start_pop = 4096;
txmsg_pop = 4096;
test_send_large(opt, cgrp);
/* Test pop + redirect */
txmsg_redir = 1;
txmsg_start_pop = 1;
txmsg_pop = 2;
test_send_many(opt, cgrp);
/* Test pop + cork */
txmsg_redir = 0;
txmsg_cork = 512;
txmsg_start_pop = 1;
txmsg_pop = 2;
test_send_many(opt, cgrp);
/* Test pop + redirect + cork */
txmsg_redir = 1;
txmsg_cork = 4;
txmsg_start_pop = 1;
txmsg_pop = 2;
test_send_many(opt, cgrp);
}
static void test_txmsg_push(int cgrp, struct sockmap_options *opt)
{
/* Test basic push */
txmsg_start_push = 1;
txmsg_end_push = 1;
test_send(opt, cgrp);
/* Test push 4kB >4k */
txmsg_start_push = 4096;
txmsg_end_push = 4096;
test_send_large(opt, cgrp);
/* Test push + redirect */
txmsg_redir = 1;
txmsg_start_push = 1;
txmsg_end_push = 2;
test_send_many(opt, cgrp);
/* Test push + cork */
txmsg_redir = 0;
txmsg_cork = 512;
txmsg_start_push = 1;
txmsg_end_push = 2;
test_send_many(opt, cgrp);
}
static void test_txmsg_push_pop(int cgrp, struct sockmap_options *opt)
{
txmsg_start_push = 1;
txmsg_end_push = 10;
txmsg_start_pop = 5;
txmsg_pop = 4;
test_send_large(opt, cgrp);
}
static void test_txmsg_apply(int cgrp, struct sockmap_options *opt)
{
txmsg_pass = 1;
txmsg_redir = 0;
txmsg_ingress = 0;
txmsg_apply = 1;
txmsg_cork = 0;
test_send_one(opt, cgrp);
txmsg_pass = 0;
txmsg_redir = 1;
txmsg_ingress = 0;
txmsg_apply = 1;
txmsg_cork = 0;
test_send_one(opt, cgrp);
txmsg_pass = 0;
txmsg_redir = 1;
txmsg_ingress = 1;
txmsg_apply = 1;
txmsg_cork = 0;
test_send_one(opt, cgrp);
txmsg_pass = 1;
txmsg_redir = 0;
txmsg_ingress = 0;
txmsg_apply = 1024;
txmsg_cork = 0;
test_send_large(opt, cgrp);
txmsg_pass = 0;
txmsg_redir = 1;
txmsg_ingress = 0;
txmsg_apply = 1024;
txmsg_cork = 0;
test_send_large(opt, cgrp);
txmsg_pass = 0;
txmsg_redir = 1;
txmsg_ingress = 1;
txmsg_apply = 1024;
txmsg_cork = 0;
test_send_large(opt, cgrp);
}
static void test_txmsg_cork(int cgrp, struct sockmap_options *opt)
{
txmsg_pass = 1;
txmsg_redir = 0;
txmsg_apply = 0;
txmsg_cork = 1;
test_send(opt, cgrp);
txmsg_pass = 1;
txmsg_redir = 0;
txmsg_apply = 1;
txmsg_cork = 1;
test_send(opt, cgrp);
}
static void test_txmsg_ingress_parser(int cgrp, struct sockmap_options *opt)
{
txmsg_pass = 1;
skb_use_parser = 512;
if (ktls == 1)
skb_use_parser = 570;
opt->iov_length = 256;
opt->iov_count = 1;
opt->rate = 2;
test_exec(cgrp, opt);
}
static void test_txmsg_ingress_parser2(int cgrp, struct sockmap_options *opt)
{
if (ktls == 1)
return;
skb_use_parser = 10;
opt->iov_length = 20;
opt->iov_count = 1;
opt->rate = 1;
opt->check_recved_len = true;
test_exec(cgrp, opt);
opt->check_recved_len = false;
}
char *map_names[] = {
"sock_map",
"sock_map_txmsg",
"sock_map_redir",
"sock_apply_bytes",
"sock_cork_bytes",
"sock_bytes",
"sock_redir_flags",
"sock_skb_opts",
"tls_sock_map",
};
int prog_attach_type[] = {
BPF_SK_SKB_STREAM_PARSER,
BPF_SK_SKB_STREAM_VERDICT,
BPF_SK_SKB_STREAM_VERDICT,
BPF_CGROUP_SOCK_OPS,
BPF_SK_MSG_VERDICT,
BPF_SK_MSG_VERDICT,
BPF_SK_MSG_VERDICT,
BPF_SK_MSG_VERDICT,
BPF_SK_MSG_VERDICT,
BPF_SK_MSG_VERDICT,
BPF_SK_MSG_VERDICT,
};
int prog_type[] = {
BPF_PROG_TYPE_SK_SKB,
BPF_PROG_TYPE_SK_SKB,
BPF_PROG_TYPE_SK_SKB,
BPF_PROG_TYPE_SOCK_OPS,
BPF_PROG_TYPE_SK_MSG,
BPF_PROG_TYPE_SK_MSG,
BPF_PROG_TYPE_SK_MSG,
BPF_PROG_TYPE_SK_MSG,
BPF_PROG_TYPE_SK_MSG,
BPF_PROG_TYPE_SK_MSG,
BPF_PROG_TYPE_SK_MSG,
};
static int populate_progs(char *bpf_file)
{
struct bpf_program *prog;
struct bpf_object *obj;
int i = 0;
long err;
obj = bpf_object__open(bpf_file);
err = libbpf_get_error(obj);
if (err) {
char err_buf[256];
libbpf_strerror(err, err_buf, sizeof(err_buf));
printf("Unable to load eBPF objects in file '%s' : %s\n",
bpf_file, err_buf);
return -1;
}
bpf_object__for_each_program(prog, obj) {
bpf_program__set_type(prog, prog_type[i]);
bpf_program__set_expected_attach_type(prog,
prog_attach_type[i]);
i++;
}
i = bpf_object__load(obj);
i = 0;
bpf_object__for_each_program(prog, obj) {
prog_fd[i] = bpf_program__fd(prog);
i++;
}
for (i = 0; i < ARRAY_SIZE(map_fd); i++) {
maps[i] = bpf_object__find_map_by_name(obj, map_names[i]);
map_fd[i] = bpf_map__fd(maps[i]);
if (map_fd[i] < 0) {
fprintf(stderr, "load_bpf_file: (%i) %s\n",
map_fd[i], strerror(errno));
return -1;
}
}
return 0;
}
struct _test test[] = {
{"txmsg test passthrough", test_txmsg_pass},
{"txmsg test redirect", test_txmsg_redir},
{"txmsg test redirect wait send mem", test_txmsg_redir_wait_sndmem},
{"txmsg test drop", test_txmsg_drop},
{"txmsg test ingress redirect", test_txmsg_ingress_redir},
{"txmsg test skb", test_txmsg_skb},
{"txmsg test apply", test_txmsg_apply},
{"txmsg test cork", test_txmsg_cork},
{"txmsg test hanging corks", test_txmsg_cork_hangs},
{"txmsg test push_data", test_txmsg_push},
{"txmsg test pull-data", test_txmsg_pull},
{"txmsg test pop-data", test_txmsg_pop},
{"txmsg test push/pop data", test_txmsg_push_pop},
{"txmsg test ingress parser", test_txmsg_ingress_parser},
{"txmsg test ingress parser2", test_txmsg_ingress_parser2},
};
static int check_whitelist(struct _test *t, struct sockmap_options *opt)
{
char *entry, *ptr;
if (!opt->whitelist)
return 0;
ptr = strdup(opt->whitelist);
if (!ptr)
return -ENOMEM;
entry = strtok(ptr, ",");
while (entry) {
if ((opt->prepend && strstr(opt->prepend, entry) != 0) ||
strstr(opt->map, entry) != 0 ||
strstr(t->title, entry) != 0)
return 0;
entry = strtok(NULL, ",");
}
return -EINVAL;
}
static int check_blacklist(struct _test *t, struct sockmap_options *opt)
{
char *entry, *ptr;
if (!opt->blacklist)
return -EINVAL;
ptr = strdup(opt->blacklist);
if (!ptr)
return -ENOMEM;
entry = strtok(ptr, ",");
while (entry) {
if ((opt->prepend && strstr(opt->prepend, entry) != 0) ||
strstr(opt->map, entry) != 0 ||
strstr(t->title, entry) != 0)
return 0;
entry = strtok(NULL, ",");
}
return -EINVAL;
}
static int __test_selftests(int cg_fd, struct sockmap_options *opt)
{
int i, err;
err = populate_progs(opt->map);
if (err < 0) {
fprintf(stderr, "ERROR: (%i) load bpf failed\n", err);
return err;
}
/* Tests basic commands and APIs */
for (i = 0; i < ARRAY_SIZE(test); i++) {
struct _test t = test[i];
if (check_whitelist(&t, opt) != 0)
continue;
if (check_blacklist(&t, opt) == 0)
continue;
test_start_subtest(&t, opt);
t.tester(cg_fd, opt);
test_end_subtest();
}
return err;
}
static void test_selftests_sockmap(int cg_fd, struct sockmap_options *opt)
{
opt->map = BPF_SOCKMAP_FILENAME;
__test_selftests(cg_fd, opt);
}
static void test_selftests_sockhash(int cg_fd, struct sockmap_options *opt)
{
opt->map = BPF_SOCKHASH_FILENAME;
__test_selftests(cg_fd, opt);
}
static void test_selftests_ktls(int cg_fd, struct sockmap_options *opt)
{
opt->map = BPF_SOCKHASH_FILENAME;
opt->prepend = "ktls";
ktls = 1;
__test_selftests(cg_fd, opt);
ktls = 0;
}
static int test_selftest(int cg_fd, struct sockmap_options *opt)
{
test_selftests_sockmap(cg_fd, opt);
test_selftests_sockhash(cg_fd, opt);
test_selftests_ktls(cg_fd, opt);
test_print_results();
return 0;
}
int main(int argc, char **argv)
{
int iov_count = 1, length = 1024, rate = 1;
struct sockmap_options options = {0};
int opt, longindex, err, cg_fd = 0;
char *bpf_file = BPF_SOCKMAP_FILENAME;
int test = SELFTESTS;
bool cg_created = 0;
while ((opt = getopt_long(argc, argv, ":dhv:c:r:i:l:t:p:q:n:b:",
long_options, &longindex)) != -1) {
switch (opt) {
case 's':
txmsg_start = atoi(optarg);
break;
case 'e':
txmsg_end = atoi(optarg);
break;
case 'p':
txmsg_start_push = atoi(optarg);
break;
case 'q':
txmsg_end_push = atoi(optarg);
break;
case 'w':
txmsg_start_pop = atoi(optarg);
break;
case 'x':
txmsg_pop = atoi(optarg);
break;
case 'a':
txmsg_apply = atoi(optarg);
break;
case 'k':
txmsg_cork = atoi(optarg);
break;
case 'c':
cg_fd = open(optarg, O_DIRECTORY, O_RDONLY);
if (cg_fd < 0) {
fprintf(stderr,
"ERROR: (%i) open cg path failed: %s\n",
cg_fd, optarg);
return cg_fd;
}
break;
case 'r':
rate = atoi(optarg);
break;
case 'v':
options.verbose = 1;
if (optarg)
options.verbose = atoi(optarg);
break;
case 'i':
iov_count = atoi(optarg);
break;
case 'l':
length = atoi(optarg);
break;
case 'd':
options.data_test = true;
break;
case 't':
if (strcmp(optarg, "ping") == 0) {
test = PING_PONG;
} else if (strcmp(optarg, "sendmsg") == 0) {
test = SENDMSG;
} else if (strcmp(optarg, "base") == 0) {
test = BASE;
} else if (strcmp(optarg, "base_sendpage") == 0) {
test = BASE_SENDPAGE;
} else if (strcmp(optarg, "sendpage") == 0) {
test = SENDPAGE;
} else {
usage(argv);
return -1;
}
break;
case 'n':
options.whitelist = strdup(optarg);
if (!options.whitelist)
return -ENOMEM;
break;
case 'b':
options.blacklist = strdup(optarg);
if (!options.blacklist)
return -ENOMEM;
case 0:
break;
case 'h':
default:
usage(argv);
return -1;
}
}
if (!cg_fd) {
cg_fd = cgroup_setup_and_join(CG_PATH);
if (cg_fd < 0)
return cg_fd;
cg_created = 1;
}
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
if (test == SELFTESTS) {
err = test_selftest(cg_fd, &options);
goto out;
}
err = populate_progs(bpf_file);
if (err) {
fprintf(stderr, "populate program: (%s) %s\n",
bpf_file, strerror(errno));
return 1;
}
running = 1;
/* catch SIGINT */
signal(SIGINT, running_handler);
options.iov_count = iov_count;
options.iov_length = length;
options.rate = rate;
err = run_options(&options, cg_fd, test);
out:
if (options.whitelist)
free(options.whitelist);
if (options.blacklist)
free(options.blacklist);
if (cg_created)
cleanup_cgroup_environment();
close(cg_fd);
return err;
}
void running_handler(int a)
{
running = 0;
}
| linux-master | tools/testing/selftests/bpf/test_sockmap.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <fcntl.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <linux/filter.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <bpf/bpf_endian.h>
#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "testing_helpers.h"
#define CG_PATH "/foo"
#define MAX_INSNS 512
#define FIXUP_SYSCTL_VALUE 0
char bpf_log_buf[BPF_LOG_BUF_SIZE];
struct sysctl_test {
const char *descr;
size_t fixup_value_insn;
struct bpf_insn insns[MAX_INSNS];
const char *prog_file;
enum bpf_attach_type attach_type;
const char *sysctl;
int open_flags;
int seek;
const char *newval;
const char *oldval;
enum {
LOAD_REJECT,
ATTACH_REJECT,
OP_EPERM,
SUCCESS,
} result;
};
static struct sysctl_test tests[] = {
{
.descr = "sysctl wrong attach_type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = 0,
.sysctl = "kernel/ostype",
.open_flags = O_RDONLY,
.result = ATTACH_REJECT,
},
{
.descr = "sysctl:read allow all",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/ostype",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
.descr = "sysctl:read deny all",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/ostype",
.open_flags = O_RDONLY,
.result = OP_EPERM,
},
{
.descr = "ctx:write sysctl:read read ok",
.insns = {
/* If (write) */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, write)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 1, 2),
/* return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(1),
/* else return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/ostype",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
.descr = "ctx:write sysctl:write read ok",
.insns = {
/* If (write) */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, write)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 1, 2),
/* return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(1),
/* else return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/domainname",
.open_flags = O_WRONLY,
.newval = "(none)", /* same as default, should fail anyway */
.result = OP_EPERM,
},
{
.descr = "ctx:write sysctl:write read ok narrow",
.insns = {
/* u64 w = (u16)write & 1; */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, write)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, write) + 2),
#endif
BPF_ALU64_IMM(BPF_AND, BPF_REG_7, 1),
/* return 1 - w; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/domainname",
.open_flags = O_WRONLY,
.newval = "(none)", /* same as default, should fail anyway */
.result = OP_EPERM,
},
{
.descr = "ctx:write sysctl:read write reject",
.insns = {
/* write = X */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sysctl, write)),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/ostype",
.open_flags = O_RDONLY,
.result = LOAD_REJECT,
},
{
.descr = "ctx:file_pos sysctl:read read ok",
.insns = {
/* If (file_pos == X) */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, file_pos)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 3, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/ostype",
.open_flags = O_RDONLY,
.seek = 3,
.result = SUCCESS,
},
{
.descr = "ctx:file_pos sysctl:read read ok narrow",
.insns = {
/* If (file_pos == X) */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, file_pos)),
#else
BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, file_pos) + 3),
#endif
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 4, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/ostype",
.open_flags = O_RDONLY,
.seek = 4,
.result = SUCCESS,
},
{
.descr = "ctx:file_pos sysctl:read write ok",
.insns = {
/* file_pos = X */
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sysctl, file_pos)),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/ostype",
.open_flags = O_RDONLY,
.oldval = "nux\n",
.result = SUCCESS,
},
{
.descr = "sysctl_get_name sysctl_value:base ok",
.insns = {
/* sysctl_get_name arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_name arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 8),
/* sysctl_get_name arg4 (flags) */
BPF_MOV64_IMM(BPF_REG_4, BPF_F_SYSCTL_BASE_NAME),
/* sysctl_get_name(ctx, buf, buf_len, flags) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_name),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, sizeof("tcp_mem") - 1, 6),
/* buf == "tcp_mem\0") */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x7463705f6d656d00ULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/tcp_mem",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
.descr = "sysctl_get_name sysctl_value:base E2BIG truncated",
.insns = {
/* sysctl_get_name arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_name arg3 (buf_len) too small */
BPF_MOV64_IMM(BPF_REG_3, 7),
/* sysctl_get_name arg4 (flags) */
BPF_MOV64_IMM(BPF_REG_4, BPF_F_SYSCTL_BASE_NAME),
/* sysctl_get_name(ctx, buf, buf_len, flags) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_name),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 6),
/* buf[0:7] == "tcp_me\0") */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x7463705f6d650000ULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/tcp_mem",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
.descr = "sysctl_get_name sysctl:full ok",
.insns = {
/* sysctl_get_name arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -24),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 8),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 16),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_name arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 17),
/* sysctl_get_name arg4 (flags) */
BPF_MOV64_IMM(BPF_REG_4, 0),
/* sysctl_get_name(ctx, buf, buf_len, flags) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_name),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 16, 14),
/* buf[0:8] == "net/ipv4" && */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x6e65742f69707634ULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 10),
/* buf[8:16] == "/tcp_mem" && */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x2f7463705f6d656dULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 8),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 6),
/* buf[16:24] == "\0") */
BPF_LD_IMM64(BPF_REG_8, 0x0ULL),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 16),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/tcp_mem",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
.descr = "sysctl_get_name sysctl:full E2BIG truncated",
.insns = {
/* sysctl_get_name arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -16),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_name arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 16),
/* sysctl_get_name arg4 (flags) */
BPF_MOV64_IMM(BPF_REG_4, 0),
/* sysctl_get_name(ctx, buf, buf_len, flags) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_name),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 10),
/* buf[0:8] == "net/ipv4" && */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x6e65742f69707634ULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 6),
/* buf[8:16] == "/tcp_me\0") */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x2f7463705f6d6500ULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 8),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/tcp_mem",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
.descr = "sysctl_get_name sysctl:full E2BIG truncated small",
.insns = {
/* sysctl_get_name arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_name arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 7),
/* sysctl_get_name arg4 (flags) */
BPF_MOV64_IMM(BPF_REG_4, 0),
/* sysctl_get_name(ctx, buf, buf_len, flags) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_name),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 6),
/* buf[0:8] == "net/ip\0") */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x6e65742f69700000ULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/tcp_mem",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
.descr = "sysctl_get_current_value sysctl:read ok, gt",
.insns = {
/* sysctl_get_current_value arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_current_value arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 8),
/* sysctl_get_current_value(ctx, buf, buf_len) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_current_value),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 6, 6),
/* buf[0:6] == "Linux\n\0") */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x4c696e75780a0000ULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/ostype",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
.descr = "sysctl_get_current_value sysctl:read ok, eq",
.insns = {
/* sysctl_get_current_value arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_7, BPF_REG_0, 7),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_current_value arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 7),
/* sysctl_get_current_value(ctx, buf, buf_len) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_current_value),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 6, 6),
/* buf[0:6] == "Linux\n\0") */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x4c696e75780a0000ULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/ostype",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
.descr = "sysctl_get_current_value sysctl:read E2BIG truncated",
.insns = {
/* sysctl_get_current_value arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_7, BPF_REG_0, 6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_current_value arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 6),
/* sysctl_get_current_value(ctx, buf, buf_len) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_current_value),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 6),
/* buf[0:6] == "Linux\0") */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x4c696e7578000000ULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/ostype",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
.descr = "sysctl_get_current_value sysctl:read EINVAL",
.insns = {
/* sysctl_get_current_value arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_current_value arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 8),
/* sysctl_get_current_value(ctx, buf, buf_len) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_current_value),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -EINVAL, 4),
/* buf[0:8] is NUL-filled) */
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2),
/* return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(1),
/* else return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv6/conf/lo/stable_secret", /* -EIO */
.open_flags = O_RDONLY,
.result = OP_EPERM,
},
{
.descr = "sysctl_get_current_value sysctl:write ok",
.fixup_value_insn = 6,
.insns = {
/* sysctl_get_current_value arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_current_value arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 8),
/* sysctl_get_current_value(ctx, buf, buf_len) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_current_value),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, 6),
/* buf[0:4] == expected) */
BPF_LD_IMM64(BPF_REG_8, FIXUP_SYSCTL_VALUE),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
/* return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(1),
/* else return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_WRONLY,
.newval = "600", /* same as default, should fail anyway */
.result = OP_EPERM,
},
{
.descr = "sysctl_get_new_value sysctl:read EINVAL",
.insns = {
/* sysctl_get_new_value arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_new_value arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 8),
/* sysctl_get_new_value(ctx, buf, buf_len) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_new_value),
/* if (ret == expected) */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -EINVAL, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/tcp_mem",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
.descr = "sysctl_get_new_value sysctl:write ok",
.insns = {
/* sysctl_get_new_value arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_new_value arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 4),
/* sysctl_get_new_value(ctx, buf, buf_len) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_new_value),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 3, 4),
/* buf[0:4] == "606\0") */
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_9,
bpf_ntohl(0x36303600), 2),
/* return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(1),
/* else return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_WRONLY,
.newval = "606",
.result = OP_EPERM,
},
{
.descr = "sysctl_get_new_value sysctl:write ok long",
.insns = {
/* sysctl_get_new_value arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -24),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_new_value arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 24),
/* sysctl_get_new_value(ctx, buf, buf_len) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_new_value),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 23, 14),
/* buf[0:8] == "3000000 " && */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x3330303030303020ULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 10),
/* buf[8:16] == "4000000 " && */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x3430303030303020ULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 8),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 6),
/* buf[16:24] == "6000000\0") */
BPF_LD_IMM64(BPF_REG_8,
bpf_be64_to_cpu(0x3630303030303000ULL)),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 16),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
/* return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(1),
/* else return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/tcp_mem",
.open_flags = O_WRONLY,
.newval = "3000000 4000000 6000000",
.result = OP_EPERM,
},
{
.descr = "sysctl_get_new_value sysctl:write E2BIG",
.insns = {
/* sysctl_get_new_value arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_7, BPF_REG_0, 3),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_get_new_value arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 3),
/* sysctl_get_new_value(ctx, buf, buf_len) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_get_new_value),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 4),
/* buf[0:3] == "60\0") */
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_9,
bpf_ntohl(0x36300000), 2),
/* return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(1),
/* else return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_WRONLY,
.newval = "606",
.result = OP_EPERM,
},
{
.descr = "sysctl_set_new_value sysctl:read EINVAL",
.insns = {
/* sysctl_set_new_value arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0,
bpf_ntohl(0x36303000)),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_set_new_value arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 3),
/* sysctl_set_new_value(ctx, buf, buf_len) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_set_new_value),
/* if (ret == expected) */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -EINVAL, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
.descr = "sysctl_set_new_value sysctl:write ok",
.fixup_value_insn = 2,
.insns = {
/* sysctl_set_new_value arg2 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_LD_IMM64(BPF_REG_0, FIXUP_SYSCTL_VALUE),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
/* sysctl_set_new_value arg3 (buf_len) */
BPF_MOV64_IMM(BPF_REG_3, 3),
/* sysctl_set_new_value(ctx, buf, buf_len) */
BPF_EMIT_CALL(BPF_FUNC_sysctl_set_new_value),
/* if (ret == expected) */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_WRONLY,
.newval = "606",
.result = SUCCESS,
},
{
"bpf_strtoul one number string",
.insns = {
/* arg1 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0,
bpf_ntohl(0x36303000)),
BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* arg2 (buf_len) */
BPF_MOV64_IMM(BPF_REG_2, 4),
/* arg3 (flags) */
BPF_MOV64_IMM(BPF_REG_3, 0),
/* arg4 (res) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
BPF_EMIT_CALL(BPF_FUNC_strtoul),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 3, 4),
/* res == expected) */
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 600, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
"bpf_strtoul multi number string",
.insns = {
/* arg1 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
/* "600 602\0" */
BPF_LD_IMM64(BPF_REG_0,
bpf_be64_to_cpu(0x3630302036303200ULL)),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* arg2 (buf_len) */
BPF_MOV64_IMM(BPF_REG_2, 8),
/* arg3 (flags) */
BPF_MOV64_IMM(BPF_REG_3, 0),
/* arg4 (res) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
BPF_EMIT_CALL(BPF_FUNC_strtoul),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 3, 18),
/* res == expected) */
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 600, 16),
/* arg1 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* arg2 (buf_len) */
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_0),
/* arg3 (flags) */
BPF_MOV64_IMM(BPF_REG_3, 0),
/* arg4 (res) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -16),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
BPF_EMIT_CALL(BPF_FUNC_strtoul),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, 4),
/* res == expected) */
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 602, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/tcp_mem",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
"bpf_strtoul buf_len = 0, reject",
.insns = {
/* arg1 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0,
bpf_ntohl(0x36303000)),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* arg2 (buf_len) */
BPF_MOV64_IMM(BPF_REG_2, 0),
/* arg3 (flags) */
BPF_MOV64_IMM(BPF_REG_3, 0),
/* arg4 (res) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
BPF_EMIT_CALL(BPF_FUNC_strtoul),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
.result = LOAD_REJECT,
},
{
"bpf_strtoul supported base, ok",
.insns = {
/* arg1 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0,
bpf_ntohl(0x30373700)),
BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* arg2 (buf_len) */
BPF_MOV64_IMM(BPF_REG_2, 4),
/* arg3 (flags) */
BPF_MOV64_IMM(BPF_REG_3, 8),
/* arg4 (res) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
BPF_EMIT_CALL(BPF_FUNC_strtoul),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 3, 4),
/* res == expected) */
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 63, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
"bpf_strtoul unsupported base, EINVAL",
.insns = {
/* arg1 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0,
bpf_ntohl(0x36303000)),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* arg2 (buf_len) */
BPF_MOV64_IMM(BPF_REG_2, 4),
/* arg3 (flags) */
BPF_MOV64_IMM(BPF_REG_3, 3),
/* arg4 (res) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
BPF_EMIT_CALL(BPF_FUNC_strtoul),
/* if (ret == expected) */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -EINVAL, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
"bpf_strtoul buf with spaces only, EINVAL",
.insns = {
/* arg1 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_MOV64_IMM(BPF_REG_0,
bpf_ntohl(0x0d0c0a09)),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* arg2 (buf_len) */
BPF_MOV64_IMM(BPF_REG_2, 4),
/* arg3 (flags) */
BPF_MOV64_IMM(BPF_REG_3, 0),
/* arg4 (res) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
BPF_EMIT_CALL(BPF_FUNC_strtoul),
/* if (ret == expected) */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -EINVAL, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
"bpf_strtoul negative number, EINVAL",
.insns = {
/* arg1 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
/* " -6\0" */
BPF_MOV64_IMM(BPF_REG_0,
bpf_ntohl(0x0a2d3600)),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* arg2 (buf_len) */
BPF_MOV64_IMM(BPF_REG_2, 4),
/* arg3 (flags) */
BPF_MOV64_IMM(BPF_REG_3, 0),
/* arg4 (res) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
BPF_EMIT_CALL(BPF_FUNC_strtoul),
/* if (ret == expected) */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -EINVAL, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
"bpf_strtol negative number, ok",
.insns = {
/* arg1 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
/* " -6\0" */
BPF_MOV64_IMM(BPF_REG_0,
bpf_ntohl(0x0a2d3600)),
BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* arg2 (buf_len) */
BPF_MOV64_IMM(BPF_REG_2, 4),
/* arg3 (flags) */
BPF_MOV64_IMM(BPF_REG_3, 10),
/* arg4 (res) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
BPF_EMIT_CALL(BPF_FUNC_strtol),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 3, 4),
/* res == expected) */
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_9, -6, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
"bpf_strtol hex number, ok",
.insns = {
/* arg1 (buf) */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
/* "0xfe" */
BPF_MOV64_IMM(BPF_REG_0,
bpf_ntohl(0x30786665)),
BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* arg2 (buf_len) */
BPF_MOV64_IMM(BPF_REG_2, 4),
/* arg3 (flags) */
BPF_MOV64_IMM(BPF_REG_3, 0),
/* arg4 (res) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
BPF_EMIT_CALL(BPF_FUNC_strtol),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, 4),
/* res == expected) */
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 254, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
"bpf_strtol max long",
.insns = {
/* arg1 (buf) 9223372036854775807 */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -24),
BPF_LD_IMM64(BPF_REG_0,
bpf_be64_to_cpu(0x3932323333373230ULL)),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0,
bpf_be64_to_cpu(0x3336383534373735ULL)),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 8),
BPF_LD_IMM64(BPF_REG_0,
bpf_be64_to_cpu(0x3830370000000000ULL)),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 16),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* arg2 (buf_len) */
BPF_MOV64_IMM(BPF_REG_2, 19),
/* arg3 (flags) */
BPF_MOV64_IMM(BPF_REG_3, 0),
/* arg4 (res) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
BPF_EMIT_CALL(BPF_FUNC_strtol),
/* if (ret == expected && */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 19, 6),
/* res == expected) */
BPF_LD_IMM64(BPF_REG_8, 0x7fffffffffffffffULL),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
"bpf_strtol overflow, ERANGE",
.insns = {
/* arg1 (buf) 9223372036854775808 */
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -24),
BPF_LD_IMM64(BPF_REG_0,
bpf_be64_to_cpu(0x3932323333373230ULL)),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0,
bpf_be64_to_cpu(0x3336383534373735ULL)),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 8),
BPF_LD_IMM64(BPF_REG_0,
bpf_be64_to_cpu(0x3830380000000000ULL)),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 16),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* arg2 (buf_len) */
BPF_MOV64_IMM(BPF_REG_2, 19),
/* arg3 (flags) */
BPF_MOV64_IMM(BPF_REG_3, 0),
/* arg4 (res) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
BPF_EMIT_CALL(BPF_FUNC_strtol),
/* if (ret == expected) */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -ERANGE, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
{
"C prog: deny all writes",
.prog_file = "./test_sysctl_prog.bpf.o",
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/tcp_mem",
.open_flags = O_WRONLY,
.newval = "123 456 789",
.result = OP_EPERM,
},
{
"C prog: deny access by name",
.prog_file = "./test_sysctl_prog.bpf.o",
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
.result = OP_EPERM,
},
{
"C prog: read tcp_mem",
.prog_file = "./test_sysctl_prog.bpf.o",
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/tcp_mem",
.open_flags = O_RDONLY,
.result = SUCCESS,
},
};
static size_t probe_prog_length(const struct bpf_insn *fp)
{
size_t len;
for (len = MAX_INSNS - 1; len > 0; --len)
if (fp[len].code != 0 || fp[len].imm != 0)
break;
return len + 1;
}
static int fixup_sysctl_value(const char *buf, size_t buf_len,
struct bpf_insn *prog, size_t insn_num)
{
union {
uint8_t raw[sizeof(uint64_t)];
uint64_t num;
} value = {};
if (buf_len > sizeof(value)) {
log_err("Value is too big (%zd) to use in fixup", buf_len);
return -1;
}
if (prog[insn_num].code != (BPF_LD | BPF_DW | BPF_IMM)) {
log_err("Can fixup only BPF_LD_IMM64 insns");
return -1;
}
memcpy(value.raw, buf, buf_len);
prog[insn_num].imm = (uint32_t)value.num;
prog[insn_num + 1].imm = (uint32_t)(value.num >> 32);
return 0;
}
static int load_sysctl_prog_insns(struct sysctl_test *test,
const char *sysctl_path)
{
struct bpf_insn *prog = test->insns;
LIBBPF_OPTS(bpf_prog_load_opts, opts);
int ret, insn_cnt;
insn_cnt = probe_prog_length(prog);
if (test->fixup_value_insn) {
char buf[128];
ssize_t len;
int fd;
fd = open(sysctl_path, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
log_err("open(%s) failed", sysctl_path);
return -1;
}
len = read(fd, buf, sizeof(buf));
if (len == -1) {
log_err("read(%s) failed", sysctl_path);
close(fd);
return -1;
}
close(fd);
if (fixup_sysctl_value(buf, len, prog, test->fixup_value_insn))
return -1;
}
opts.log_buf = bpf_log_buf;
opts.log_size = BPF_LOG_BUF_SIZE;
ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SYSCTL, NULL, "GPL", prog, insn_cnt, &opts);
if (ret < 0 && test->result != LOAD_REJECT) {
log_err(">>> Loading program error.\n"
">>> Verifier output:\n%s\n-------\n", bpf_log_buf);
}
return ret;
}
static int load_sysctl_prog_file(struct sysctl_test *test)
{
struct bpf_object *obj;
int prog_fd;
if (bpf_prog_test_load(test->prog_file, BPF_PROG_TYPE_CGROUP_SYSCTL, &obj, &prog_fd)) {
if (test->result != LOAD_REJECT)
log_err(">>> Loading program (%s) error.\n",
test->prog_file);
return -1;
}
return prog_fd;
}
static int load_sysctl_prog(struct sysctl_test *test, const char *sysctl_path)
{
return test->prog_file
? load_sysctl_prog_file(test)
: load_sysctl_prog_insns(test, sysctl_path);
}
static int access_sysctl(const char *sysctl_path,
const struct sysctl_test *test)
{
int err = 0;
int fd;
fd = open(sysctl_path, test->open_flags | O_CLOEXEC);
if (fd < 0)
return fd;
if (test->seek && lseek(fd, test->seek, SEEK_SET) == -1) {
log_err("lseek(%d) failed", test->seek);
goto err;
}
if (test->open_flags == O_RDONLY) {
char buf[128];
if (read(fd, buf, sizeof(buf)) == -1)
goto err;
if (test->oldval &&
strncmp(buf, test->oldval, strlen(test->oldval))) {
log_err("Read value %s != %s", buf, test->oldval);
goto err;
}
} else if (test->open_flags == O_WRONLY) {
if (!test->newval) {
log_err("New value for sysctl is not set");
goto err;
}
if (write(fd, test->newval, strlen(test->newval)) == -1)
goto err;
} else {
log_err("Unexpected sysctl access: neither read nor write");
goto err;
}
goto out;
err:
err = -1;
out:
close(fd);
return err;
}
static int run_test_case(int cgfd, struct sysctl_test *test)
{
enum bpf_attach_type atype = test->attach_type;
char sysctl_path[128];
int progfd = -1;
int err = 0;
printf("Test case: %s .. ", test->descr);
snprintf(sysctl_path, sizeof(sysctl_path), "/proc/sys/%s",
test->sysctl);
progfd = load_sysctl_prog(test, sysctl_path);
if (progfd < 0) {
if (test->result == LOAD_REJECT)
goto out;
else
goto err;
}
if (bpf_prog_attach(progfd, cgfd, atype, BPF_F_ALLOW_OVERRIDE) < 0) {
if (test->result == ATTACH_REJECT)
goto out;
else
goto err;
}
errno = 0;
if (access_sysctl(sysctl_path, test) == -1) {
if (test->result == OP_EPERM && errno == EPERM)
goto out;
else
goto err;
}
if (test->result != SUCCESS) {
log_err("Unexpected success");
goto err;
}
goto out;
err:
err = -1;
out:
/* Detaching w/o checking return code: best effort attempt. */
if (progfd != -1)
bpf_prog_detach(cgfd, atype);
close(progfd);
printf("[%s]\n", err ? "FAIL" : "PASS");
return err;
}
static int run_tests(int cgfd)
{
int passes = 0;
int fails = 0;
int i;
for (i = 0; i < ARRAY_SIZE(tests); ++i) {
if (run_test_case(cgfd, &tests[i]))
++fails;
else
++passes;
}
printf("Summary: %d PASSED, %d FAILED\n", passes, fails);
return fails ? -1 : 0;
}
int main(int argc, char **argv)
{
int cgfd = -1;
int err = 0;
cgfd = cgroup_setup_and_join(CG_PATH);
if (cgfd < 0)
goto err;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
if (run_tests(cgfd))
goto err;
goto out;
err:
err = -1;
out:
close(cgfd);
cleanup_cgroup_environment();
return err;
}
| linux-master | tools/testing/selftests/bpf/test_sysctl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Inject packets with all sorts of encapsulation into the kernel.
*
* IPv4/IPv6 outer layer 3
* GRE/GUE/BARE outer layer 4, where bare is IPIP/SIT/IPv4-in-IPv6/..
* IPv4/IPv6 inner layer 3
*/
#define _GNU_SOURCE
#include <stddef.h>
#include <arpa/inet.h>
#include <asm/byteorder.h>
#include <error.h>
#include <errno.h>
#include <linux/if_packet.h>
#include <linux/if_ether.h>
#include <linux/ipv6.h>
#include <netinet/ip.h>
#include <netinet/in.h>
#include <netinet/udp.h>
#include <poll.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#define CFG_PORT_INNER 8000
/* Add some protocol definitions that do not exist in userspace */
struct grehdr {
uint16_t unused;
uint16_t protocol;
} __attribute__((packed));
struct guehdr {
union {
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 hlen:5,
control:1,
version:2;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u8 version:2,
control:1,
hlen:5;
#else
#error "Please fix <asm/byteorder.h>"
#endif
__u8 proto_ctype;
__be16 flags;
};
__be32 word;
};
};
static uint8_t cfg_dsfield_inner;
static uint8_t cfg_dsfield_outer;
static uint8_t cfg_encap_proto;
static bool cfg_expect_failure = false;
static int cfg_l3_extra = AF_UNSPEC; /* optional SIT prefix */
static int cfg_l3_inner = AF_UNSPEC;
static int cfg_l3_outer = AF_UNSPEC;
static int cfg_num_pkt = 10;
static int cfg_num_secs = 0;
static char cfg_payload_char = 'a';
static int cfg_payload_len = 100;
static int cfg_port_gue = 6080;
static bool cfg_only_rx;
static bool cfg_only_tx;
static int cfg_src_port = 9;
static char buf[ETH_DATA_LEN];
#define INIT_ADDR4(name, addr4, port) \
static struct sockaddr_in name = { \
.sin_family = AF_INET, \
.sin_port = __constant_htons(port), \
.sin_addr.s_addr = __constant_htonl(addr4), \
};
#define INIT_ADDR6(name, addr6, port) \
static struct sockaddr_in6 name = { \
.sin6_family = AF_INET6, \
.sin6_port = __constant_htons(port), \
.sin6_addr = addr6, \
};
INIT_ADDR4(in_daddr4, INADDR_LOOPBACK, CFG_PORT_INNER)
INIT_ADDR4(in_saddr4, INADDR_LOOPBACK + 2, 0)
INIT_ADDR4(out_daddr4, INADDR_LOOPBACK, 0)
INIT_ADDR4(out_saddr4, INADDR_LOOPBACK + 1, 0)
INIT_ADDR4(extra_daddr4, INADDR_LOOPBACK, 0)
INIT_ADDR4(extra_saddr4, INADDR_LOOPBACK + 1, 0)
INIT_ADDR6(in_daddr6, IN6ADDR_LOOPBACK_INIT, CFG_PORT_INNER)
INIT_ADDR6(in_saddr6, IN6ADDR_LOOPBACK_INIT, 0)
INIT_ADDR6(out_daddr6, IN6ADDR_LOOPBACK_INIT, 0)
INIT_ADDR6(out_saddr6, IN6ADDR_LOOPBACK_INIT, 0)
INIT_ADDR6(extra_daddr6, IN6ADDR_LOOPBACK_INIT, 0)
INIT_ADDR6(extra_saddr6, IN6ADDR_LOOPBACK_INIT, 0)
static unsigned long util_gettime(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
}
static void util_printaddr(const char *msg, struct sockaddr *addr)
{
unsigned long off = 0;
char nbuf[INET6_ADDRSTRLEN];
switch (addr->sa_family) {
case PF_INET:
off = __builtin_offsetof(struct sockaddr_in, sin_addr);
break;
case PF_INET6:
off = __builtin_offsetof(struct sockaddr_in6, sin6_addr);
break;
default:
error(1, 0, "printaddr: unsupported family %u\n",
addr->sa_family);
}
if (!inet_ntop(addr->sa_family, ((void *) addr) + off, nbuf,
sizeof(nbuf)))
error(1, errno, "inet_ntop");
fprintf(stderr, "%s: %s\n", msg, nbuf);
}
static unsigned long add_csum_hword(const uint16_t *start, int num_u16)
{
unsigned long sum = 0;
int i;
for (i = 0; i < num_u16; i++)
sum += start[i];
return sum;
}
static uint16_t build_ip_csum(const uint16_t *start, int num_u16,
unsigned long sum)
{
sum += add_csum_hword(start, num_u16);
while (sum >> 16)
sum = (sum & 0xffff) + (sum >> 16);
return ~sum;
}
static void build_ipv4_header(void *header, uint8_t proto,
uint32_t src, uint32_t dst,
int payload_len, uint8_t tos)
{
struct iphdr *iph = header;
iph->ihl = 5;
iph->version = 4;
iph->tos = tos;
iph->ttl = 8;
iph->tot_len = htons(sizeof(*iph) + payload_len);
iph->id = htons(1337);
iph->protocol = proto;
iph->saddr = src;
iph->daddr = dst;
iph->check = build_ip_csum((void *) iph, iph->ihl << 1, 0);
}
static void ipv6_set_dsfield(struct ipv6hdr *ip6h, uint8_t dsfield)
{
uint16_t val, *ptr = (uint16_t *)ip6h;
val = ntohs(*ptr);
val &= 0xF00F;
val |= ((uint16_t) dsfield) << 4;
*ptr = htons(val);
}
static void build_ipv6_header(void *header, uint8_t proto,
struct sockaddr_in6 *src,
struct sockaddr_in6 *dst,
int payload_len, uint8_t dsfield)
{
struct ipv6hdr *ip6h = header;
ip6h->version = 6;
ip6h->payload_len = htons(payload_len);
ip6h->nexthdr = proto;
ip6h->hop_limit = 8;
ipv6_set_dsfield(ip6h, dsfield);
memcpy(&ip6h->saddr, &src->sin6_addr, sizeof(ip6h->saddr));
memcpy(&ip6h->daddr, &dst->sin6_addr, sizeof(ip6h->daddr));
}
static uint16_t build_udp_v4_csum(const struct iphdr *iph,
const struct udphdr *udph,
int num_words)
{
unsigned long pseudo_sum;
int num_u16 = sizeof(iph->saddr); /* halfwords: twice byte len */
pseudo_sum = add_csum_hword((void *) &iph->saddr, num_u16);
pseudo_sum += htons(IPPROTO_UDP);
pseudo_sum += udph->len;
return build_ip_csum((void *) udph, num_words, pseudo_sum);
}
static uint16_t build_udp_v6_csum(const struct ipv6hdr *ip6h,
const struct udphdr *udph,
int num_words)
{
unsigned long pseudo_sum;
int num_u16 = sizeof(ip6h->saddr); /* halfwords: twice byte len */
pseudo_sum = add_csum_hword((void *) &ip6h->saddr, num_u16);
pseudo_sum += htons(ip6h->nexthdr);
pseudo_sum += ip6h->payload_len;
return build_ip_csum((void *) udph, num_words, pseudo_sum);
}
static void build_udp_header(void *header, int payload_len,
uint16_t dport, int family)
{
struct udphdr *udph = header;
int len = sizeof(*udph) + payload_len;
udph->source = htons(cfg_src_port);
udph->dest = htons(dport);
udph->len = htons(len);
udph->check = 0;
if (family == AF_INET)
udph->check = build_udp_v4_csum(header - sizeof(struct iphdr),
udph, len >> 1);
else
udph->check = build_udp_v6_csum(header - sizeof(struct ipv6hdr),
udph, len >> 1);
}
static void build_gue_header(void *header, uint8_t proto)
{
struct guehdr *gueh = header;
gueh->proto_ctype = proto;
}
static void build_gre_header(void *header, uint16_t proto)
{
struct grehdr *greh = header;
greh->protocol = htons(proto);
}
static int l3_length(int family)
{
if (family == AF_INET)
return sizeof(struct iphdr);
else
return sizeof(struct ipv6hdr);
}
static int build_packet(void)
{
int ol3_len = 0, ol4_len = 0, il3_len = 0, il4_len = 0;
int el3_len = 0;
if (cfg_l3_extra)
el3_len = l3_length(cfg_l3_extra);
/* calculate header offsets */
if (cfg_encap_proto) {
ol3_len = l3_length(cfg_l3_outer);
if (cfg_encap_proto == IPPROTO_GRE)
ol4_len = sizeof(struct grehdr);
else if (cfg_encap_proto == IPPROTO_UDP)
ol4_len = sizeof(struct udphdr) + sizeof(struct guehdr);
}
il3_len = l3_length(cfg_l3_inner);
il4_len = sizeof(struct udphdr);
if (el3_len + ol3_len + ol4_len + il3_len + il4_len + cfg_payload_len >=
sizeof(buf))
error(1, 0, "packet too large\n");
/*
* Fill packet from inside out, to calculate correct checksums.
* But create ip before udp headers, as udp uses ip for pseudo-sum.
*/
memset(buf + el3_len + ol3_len + ol4_len + il3_len + il4_len,
cfg_payload_char, cfg_payload_len);
/* add zero byte for udp csum padding */
buf[el3_len + ol3_len + ol4_len + il3_len + il4_len + cfg_payload_len] = 0;
switch (cfg_l3_inner) {
case PF_INET:
build_ipv4_header(buf + el3_len + ol3_len + ol4_len,
IPPROTO_UDP,
in_saddr4.sin_addr.s_addr,
in_daddr4.sin_addr.s_addr,
il4_len + cfg_payload_len,
cfg_dsfield_inner);
break;
case PF_INET6:
build_ipv6_header(buf + el3_len + ol3_len + ol4_len,
IPPROTO_UDP,
&in_saddr6, &in_daddr6,
il4_len + cfg_payload_len,
cfg_dsfield_inner);
break;
}
build_udp_header(buf + el3_len + ol3_len + ol4_len + il3_len,
cfg_payload_len, CFG_PORT_INNER, cfg_l3_inner);
if (!cfg_encap_proto)
return il3_len + il4_len + cfg_payload_len;
switch (cfg_l3_outer) {
case PF_INET:
build_ipv4_header(buf + el3_len, cfg_encap_proto,
out_saddr4.sin_addr.s_addr,
out_daddr4.sin_addr.s_addr,
ol4_len + il3_len + il4_len + cfg_payload_len,
cfg_dsfield_outer);
break;
case PF_INET6:
build_ipv6_header(buf + el3_len, cfg_encap_proto,
&out_saddr6, &out_daddr6,
ol4_len + il3_len + il4_len + cfg_payload_len,
cfg_dsfield_outer);
break;
}
switch (cfg_encap_proto) {
case IPPROTO_UDP:
build_gue_header(buf + el3_len + ol3_len + ol4_len -
sizeof(struct guehdr),
cfg_l3_inner == PF_INET ? IPPROTO_IPIP
: IPPROTO_IPV6);
build_udp_header(buf + el3_len + ol3_len,
sizeof(struct guehdr) + il3_len + il4_len +
cfg_payload_len,
cfg_port_gue, cfg_l3_outer);
break;
case IPPROTO_GRE:
build_gre_header(buf + el3_len + ol3_len,
cfg_l3_inner == PF_INET ? ETH_P_IP
: ETH_P_IPV6);
break;
}
switch (cfg_l3_extra) {
case PF_INET:
build_ipv4_header(buf,
cfg_l3_outer == PF_INET ? IPPROTO_IPIP
: IPPROTO_IPV6,
extra_saddr4.sin_addr.s_addr,
extra_daddr4.sin_addr.s_addr,
ol3_len + ol4_len + il3_len + il4_len +
cfg_payload_len, 0);
break;
case PF_INET6:
build_ipv6_header(buf,
cfg_l3_outer == PF_INET ? IPPROTO_IPIP
: IPPROTO_IPV6,
&extra_saddr6, &extra_daddr6,
ol3_len + ol4_len + il3_len + il4_len +
cfg_payload_len, 0);
break;
}
return el3_len + ol3_len + ol4_len + il3_len + il4_len +
cfg_payload_len;
}
/* sender transmits encapsulated over RAW or unencap'd over UDP */
static int setup_tx(void)
{
int family, fd, ret;
if (cfg_l3_extra)
family = cfg_l3_extra;
else if (cfg_l3_outer)
family = cfg_l3_outer;
else
family = cfg_l3_inner;
fd = socket(family, SOCK_RAW, IPPROTO_RAW);
if (fd == -1)
error(1, errno, "socket tx");
if (cfg_l3_extra) {
if (cfg_l3_extra == PF_INET)
ret = connect(fd, (void *) &extra_daddr4,
sizeof(extra_daddr4));
else
ret = connect(fd, (void *) &extra_daddr6,
sizeof(extra_daddr6));
if (ret)
error(1, errno, "connect tx");
} else if (cfg_l3_outer) {
/* connect to destination if not encapsulated */
if (cfg_l3_outer == PF_INET)
ret = connect(fd, (void *) &out_daddr4,
sizeof(out_daddr4));
else
ret = connect(fd, (void *) &out_daddr6,
sizeof(out_daddr6));
if (ret)
error(1, errno, "connect tx");
} else {
/* otherwise using loopback */
if (cfg_l3_inner == PF_INET)
ret = connect(fd, (void *) &in_daddr4,
sizeof(in_daddr4));
else
ret = connect(fd, (void *) &in_daddr6,
sizeof(in_daddr6));
if (ret)
error(1, errno, "connect tx");
}
return fd;
}
/* receiver reads unencapsulated UDP */
static int setup_rx(void)
{
int fd, ret;
fd = socket(cfg_l3_inner, SOCK_DGRAM, 0);
if (fd == -1)
error(1, errno, "socket rx");
if (cfg_l3_inner == PF_INET)
ret = bind(fd, (void *) &in_daddr4, sizeof(in_daddr4));
else
ret = bind(fd, (void *) &in_daddr6, sizeof(in_daddr6));
if (ret)
error(1, errno, "bind rx");
return fd;
}
static int do_tx(int fd, const char *pkt, int len)
{
int ret;
ret = write(fd, pkt, len);
if (ret == -1)
error(1, errno, "send");
if (ret != len)
error(1, errno, "send: len (%d < %d)\n", ret, len);
return 1;
}
static int do_poll(int fd, short events, int timeout)
{
struct pollfd pfd;
int ret;
pfd.fd = fd;
pfd.events = events;
ret = poll(&pfd, 1, timeout);
if (ret == -1)
error(1, errno, "poll");
if (ret && !(pfd.revents & POLLIN))
error(1, errno, "poll: unexpected event 0x%x\n", pfd.revents);
return ret;
}
static int do_rx(int fd)
{
char rbuf;
int ret, num = 0;
while (1) {
ret = recv(fd, &rbuf, 1, MSG_DONTWAIT);
if (ret == -1 && errno == EAGAIN)
break;
if (ret == -1)
error(1, errno, "recv");
if (rbuf != cfg_payload_char)
error(1, 0, "recv: payload mismatch");
num++;
}
return num;
}
static int do_main(void)
{
unsigned long tstop, treport, tcur;
int fdt = -1, fdr = -1, len, tx = 0, rx = 0;
if (!cfg_only_tx)
fdr = setup_rx();
if (!cfg_only_rx)
fdt = setup_tx();
len = build_packet();
tcur = util_gettime();
treport = tcur + 1000;
tstop = tcur + (cfg_num_secs * 1000);
while (1) {
if (!cfg_only_rx)
tx += do_tx(fdt, buf, len);
if (!cfg_only_tx)
rx += do_rx(fdr);
if (cfg_num_secs) {
tcur = util_gettime();
if (tcur >= tstop)
break;
if (tcur >= treport) {
fprintf(stderr, "pkts: tx=%u rx=%u\n", tx, rx);
tx = 0;
rx = 0;
treport = tcur + 1000;
}
} else {
if (tx == cfg_num_pkt)
break;
}
}
/* read straggler packets, if any */
if (rx < tx) {
tstop = util_gettime() + 100;
while (rx < tx) {
tcur = util_gettime();
if (tcur >= tstop)
break;
do_poll(fdr, POLLIN, tstop - tcur);
rx += do_rx(fdr);
}
}
fprintf(stderr, "pkts: tx=%u rx=%u\n", tx, rx);
if (fdr != -1 && close(fdr))
error(1, errno, "close rx");
if (fdt != -1 && close(fdt))
error(1, errno, "close tx");
/*
* success (== 0) only if received all packets
* unless failure is expected, in which case none must arrive.
*/
if (cfg_expect_failure)
return rx != 0;
else
return rx != tx;
}
static void __attribute__((noreturn)) usage(const char *filepath)
{
fprintf(stderr, "Usage: %s [-e gre|gue|bare|none] [-i 4|6] [-l len] "
"[-O 4|6] [-o 4|6] [-n num] [-t secs] [-R] [-T] "
"[-s <osrc> [-d <odst>] [-S <isrc>] [-D <idst>] "
"[-x <otos>] [-X <itos>] [-f <isport>] [-F]\n",
filepath);
exit(1);
}
static void parse_addr(int family, void *addr, const char *optarg)
{
int ret;
ret = inet_pton(family, optarg, addr);
if (ret == -1)
error(1, errno, "inet_pton");
if (ret == 0)
error(1, 0, "inet_pton: bad string");
}
static void parse_addr4(struct sockaddr_in *addr, const char *optarg)
{
parse_addr(AF_INET, &addr->sin_addr, optarg);
}
static void parse_addr6(struct sockaddr_in6 *addr, const char *optarg)
{
parse_addr(AF_INET6, &addr->sin6_addr, optarg);
}
static int parse_protocol_family(const char *filepath, const char *optarg)
{
if (!strcmp(optarg, "4"))
return PF_INET;
if (!strcmp(optarg, "6"))
return PF_INET6;
usage(filepath);
}
static void parse_opts(int argc, char **argv)
{
int c;
while ((c = getopt(argc, argv, "d:D:e:f:Fhi:l:n:o:O:Rs:S:t:Tx:X:")) != -1) {
switch (c) {
case 'd':
if (cfg_l3_outer == AF_UNSPEC)
error(1, 0, "-d must be preceded by -o");
if (cfg_l3_outer == AF_INET)
parse_addr4(&out_daddr4, optarg);
else
parse_addr6(&out_daddr6, optarg);
break;
case 'D':
if (cfg_l3_inner == AF_UNSPEC)
error(1, 0, "-D must be preceded by -i");
if (cfg_l3_inner == AF_INET)
parse_addr4(&in_daddr4, optarg);
else
parse_addr6(&in_daddr6, optarg);
break;
case 'e':
if (!strcmp(optarg, "gre"))
cfg_encap_proto = IPPROTO_GRE;
else if (!strcmp(optarg, "gue"))
cfg_encap_proto = IPPROTO_UDP;
else if (!strcmp(optarg, "bare"))
cfg_encap_proto = IPPROTO_IPIP;
else if (!strcmp(optarg, "none"))
cfg_encap_proto = IPPROTO_IP; /* == 0 */
else
usage(argv[0]);
break;
case 'f':
cfg_src_port = strtol(optarg, NULL, 0);
break;
case 'F':
cfg_expect_failure = true;
break;
case 'h':
usage(argv[0]);
break;
case 'i':
if (!strcmp(optarg, "4"))
cfg_l3_inner = PF_INET;
else if (!strcmp(optarg, "6"))
cfg_l3_inner = PF_INET6;
else
usage(argv[0]);
break;
case 'l':
cfg_payload_len = strtol(optarg, NULL, 0);
break;
case 'n':
cfg_num_pkt = strtol(optarg, NULL, 0);
break;
case 'o':
cfg_l3_outer = parse_protocol_family(argv[0], optarg);
break;
case 'O':
cfg_l3_extra = parse_protocol_family(argv[0], optarg);
break;
case 'R':
cfg_only_rx = true;
break;
case 's':
if (cfg_l3_outer == AF_INET)
parse_addr4(&out_saddr4, optarg);
else
parse_addr6(&out_saddr6, optarg);
break;
case 'S':
if (cfg_l3_inner == AF_INET)
parse_addr4(&in_saddr4, optarg);
else
parse_addr6(&in_saddr6, optarg);
break;
case 't':
cfg_num_secs = strtol(optarg, NULL, 0);
break;
case 'T':
cfg_only_tx = true;
break;
case 'x':
cfg_dsfield_outer = strtol(optarg, NULL, 0);
break;
case 'X':
cfg_dsfield_inner = strtol(optarg, NULL, 0);
break;
}
}
if (cfg_only_rx && cfg_only_tx)
error(1, 0, "options: cannot combine rx-only and tx-only");
if (cfg_encap_proto && cfg_l3_outer == AF_UNSPEC)
error(1, 0, "options: must specify outer with encap");
else if ((!cfg_encap_proto) && cfg_l3_outer != AF_UNSPEC)
error(1, 0, "options: cannot combine no-encap and outer");
else if ((!cfg_encap_proto) && cfg_l3_extra != AF_UNSPEC)
error(1, 0, "options: cannot combine no-encap and extra");
if (cfg_l3_inner == AF_UNSPEC)
cfg_l3_inner = AF_INET6;
if (cfg_l3_inner == AF_INET6 && cfg_encap_proto == IPPROTO_IPIP)
cfg_encap_proto = IPPROTO_IPV6;
/* RFC 6040 4.2:
* on decap, if outer encountered congestion (CE == 0x3),
* but inner cannot encode ECN (NoECT == 0x0), then drop packet.
*/
if (((cfg_dsfield_outer & 0x3) == 0x3) &&
((cfg_dsfield_inner & 0x3) == 0x0))
cfg_expect_failure = true;
}
static void print_opts(void)
{
if (cfg_l3_inner == PF_INET6) {
util_printaddr("inner.dest6", (void *) &in_daddr6);
util_printaddr("inner.source6", (void *) &in_saddr6);
} else {
util_printaddr("inner.dest4", (void *) &in_daddr4);
util_printaddr("inner.source4", (void *) &in_saddr4);
}
if (!cfg_l3_outer)
return;
fprintf(stderr, "encap proto: %u\n", cfg_encap_proto);
if (cfg_l3_outer == PF_INET6) {
util_printaddr("outer.dest6", (void *) &out_daddr6);
util_printaddr("outer.source6", (void *) &out_saddr6);
} else {
util_printaddr("outer.dest4", (void *) &out_daddr4);
util_printaddr("outer.source4", (void *) &out_saddr4);
}
if (!cfg_l3_extra)
return;
if (cfg_l3_outer == PF_INET6) {
util_printaddr("extra.dest6", (void *) &extra_daddr6);
util_printaddr("extra.source6", (void *) &extra_saddr6);
} else {
util_printaddr("extra.dest4", (void *) &extra_daddr4);
util_printaddr("extra.source4", (void *) &extra_saddr4);
}
}
int main(int argc, char **argv)
{
parse_opts(argc, argv);
print_opts();
return do_main();
}
| linux-master | tools/testing/selftests/bpf/test_flow_dissector.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#define _SDT_HAS_SEMAPHORES 1
#include "sdt.h"
#define SEC(name) __attribute__((section(name), used))
unsigned short urandlib_read_with_sema_semaphore SEC(".probes");
void urandlib_read_with_sema(int iter_num, int iter_cnt, int read_sz)
{
STAP_PROBE3(urandlib, read_with_sema, iter_num, iter_cnt, read_sz);
}
| linux-master | tools/testing/selftests/bpf/urandom_read_lib1.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
// Copyright (c) 2019 Cloudflare
#include <limits.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
static int start_server(const struct sockaddr *addr, socklen_t len, bool dual)
{
int mode = !dual;
int fd;
fd = socket(addr->sa_family, SOCK_STREAM, 0);
if (fd == -1) {
log_err("Failed to create server socket");
goto out;
}
if (addr->sa_family == AF_INET6) {
if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *)&mode,
sizeof(mode)) == -1) {
log_err("Failed to set the dual-stack mode");
goto close_out;
}
}
if (bind(fd, addr, len) == -1) {
log_err("Failed to bind server socket");
goto close_out;
}
if (listen(fd, 128) == -1) {
log_err("Failed to listen on server socket");
goto close_out;
}
goto out;
close_out:
close(fd);
fd = -1;
out:
return fd;
}
static int connect_to_server(const struct sockaddr *addr, socklen_t len)
{
int fd = -1;
fd = socket(addr->sa_family, SOCK_STREAM, 0);
if (fd == -1) {
log_err("Failed to create client socket");
goto out;
}
if (connect(fd, (const struct sockaddr *)addr, len) == -1) {
log_err("Fail to connect to server");
goto close_out;
}
goto out;
close_out:
close(fd);
fd = -1;
out:
return fd;
}
static int get_map_fd_by_prog_id(int prog_id, bool *xdp)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
__u32 map_ids[1];
int prog_fd = -1;
int map_fd = -1;
prog_fd = bpf_prog_get_fd_by_id(prog_id);
if (prog_fd < 0) {
log_err("Failed to get fd by prog id %d", prog_id);
goto err;
}
info.nr_map_ids = 1;
info.map_ids = (__u64)(unsigned long)map_ids;
if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) {
log_err("Failed to get info by prog fd %d", prog_fd);
goto err;
}
if (!info.nr_map_ids) {
log_err("No maps found for prog fd %d", prog_fd);
goto err;
}
*xdp = info.type == BPF_PROG_TYPE_XDP;
map_fd = bpf_map_get_fd_by_id(map_ids[0]);
if (map_fd < 0)
log_err("Failed to get fd by map id %d", map_ids[0]);
err:
if (prog_fd >= 0)
close(prog_fd);
return map_fd;
}
static int run_test(int server_fd, int results_fd, bool xdp,
const struct sockaddr *addr, socklen_t len)
{
int client = -1, srv_client = -1;
int ret = 0;
__u32 key = 0;
__u32 key_gen = 1;
__u32 key_mss = 2;
__u32 value = 0;
__u32 value_gen = 0;
__u32 value_mss = 0;
if (bpf_map_update_elem(results_fd, &key, &value, 0) < 0) {
log_err("Can't clear results");
goto err;
}
if (bpf_map_update_elem(results_fd, &key_gen, &value_gen, 0) < 0) {
log_err("Can't clear results");
goto err;
}
if (bpf_map_update_elem(results_fd, &key_mss, &value_mss, 0) < 0) {
log_err("Can't clear results");
goto err;
}
client = connect_to_server(addr, len);
if (client == -1)
goto err;
srv_client = accept(server_fd, NULL, 0);
if (srv_client == -1) {
log_err("Can't accept connection");
goto err;
}
if (bpf_map_lookup_elem(results_fd, &key, &value) < 0) {
log_err("Can't lookup result");
goto err;
}
if (value == 0) {
log_err("Didn't match syncookie: %u", value);
goto err;
}
if (bpf_map_lookup_elem(results_fd, &key_gen, &value_gen) < 0) {
log_err("Can't lookup result");
goto err;
}
if (xdp && value_gen == 0) {
// SYN packets do not get passed through generic XDP, skip the
// rest of the test.
printf("Skipping XDP cookie check\n");
goto out;
}
if (bpf_map_lookup_elem(results_fd, &key_mss, &value_mss) < 0) {
log_err("Can't lookup result");
goto err;
}
if (value != value_gen) {
log_err("BPF generated cookie does not match kernel one");
goto err;
}
if (value_mss < 536 || value_mss > USHRT_MAX) {
log_err("Unexpected MSS retrieved");
goto err;
}
goto out;
err:
ret = 1;
out:
close(client);
close(srv_client);
return ret;
}
static bool get_port(int server_fd, in_port_t *port)
{
struct sockaddr_in addr;
socklen_t len = sizeof(addr);
if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
log_err("Failed to get server addr");
return false;
}
/* sin_port and sin6_port are located at the same offset. */
*port = addr.sin_port;
return true;
}
int main(int argc, char **argv)
{
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
struct sockaddr_in addr4dual;
struct sockaddr_in6 addr6dual;
int server = -1;
int server_v6 = -1;
int server_dual = -1;
int results = -1;
int err = 0;
bool xdp;
if (argc < 2) {
fprintf(stderr, "Usage: %s prog_id\n", argv[0]);
exit(1);
}
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
results = get_map_fd_by_prog_id(atoi(argv[1]), &xdp);
if (results < 0) {
log_err("Can't get map");
goto err;
}
memset(&addr4, 0, sizeof(addr4));
addr4.sin_family = AF_INET;
addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr4.sin_port = 0;
memcpy(&addr4dual, &addr4, sizeof(addr4dual));
memset(&addr6, 0, sizeof(addr6));
addr6.sin6_family = AF_INET6;
addr6.sin6_addr = in6addr_loopback;
addr6.sin6_port = 0;
memset(&addr6dual, 0, sizeof(addr6dual));
addr6dual.sin6_family = AF_INET6;
addr6dual.sin6_addr = in6addr_any;
addr6dual.sin6_port = 0;
server = start_server((const struct sockaddr *)&addr4, sizeof(addr4),
false);
if (server == -1 || !get_port(server, &addr4.sin_port))
goto err;
server_v6 = start_server((const struct sockaddr *)&addr6,
sizeof(addr6), false);
if (server_v6 == -1 || !get_port(server_v6, &addr6.sin6_port))
goto err;
server_dual = start_server((const struct sockaddr *)&addr6dual,
sizeof(addr6dual), true);
if (server_dual == -1 || !get_port(server_dual, &addr4dual.sin_port))
goto err;
if (run_test(server, results, xdp,
(const struct sockaddr *)&addr4, sizeof(addr4)))
goto err;
if (run_test(server_v6, results, xdp,
(const struct sockaddr *)&addr6, sizeof(addr6)))
goto err;
if (run_test(server_dual, results, xdp,
(const struct sockaddr *)&addr4dual, sizeof(addr4dual)))
goto err;
printf("ok\n");
goto out;
err:
err = 1;
out:
close(server);
close(server_v6);
close(server_dual);
close(results);
return err;
}
| linux-master | tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c |
// SPDX-License-Identifier: GPL-2.0
// test ir decoder
//
// Copyright (C) 2018 Sean Young <[email protected]>
// A lirc chardev is a device representing a consumer IR (cir) device which
// can receive infrared signals from remote control and/or transmit IR.
//
// IR is sent as a series of pulses and space somewhat like morse code. The
// BPF program can decode this into scancodes so that rc-core can translate
// this into input key codes using the rc keymap.
//
// This test works by sending IR over rc-loopback, so the IR is processed by
// BPF and then decoded into scancodes. The lirc chardev must be the one
// associated with rc-loopback, see the output of ir-keytable(1).
//
// The following CONFIG options must be enabled for the test to succeed:
// CONFIG_RC_CORE=y
// CONFIG_BPF_RAWIR_EVENT=y
// CONFIG_RC_LOOPBACK=y
// Steps:
// 1. Open the /dev/lircN device for rc-loopback (given on command line)
// 2. Attach bpf_lirc_mode2 program which decodes some IR.
// 3. Send some IR to the same IR device; since it is loopback, this will
// end up in the bpf program
// 4. bpf program should decode IR and report keycode
// 5. We can read keycode from same /dev/lirc device
#include <linux/bpf.h>
#include <linux/input.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <poll.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "bpf_util.h"
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "testing_helpers.h"
int main(int argc, char **argv)
{
struct bpf_object *obj;
int ret, lircfd, progfd, inputfd;
int testir1 = 0x1dead;
int testir2 = 0x20101;
u32 prog_ids[10], prog_flags[10], prog_cnt;
if (argc != 3) {
printf("Usage: %s /dev/lircN /dev/input/eventM\n", argv[0]);
return 2;
}
ret = bpf_prog_test_load("test_lirc_mode2_kern.bpf.o",
BPF_PROG_TYPE_LIRC_MODE2, &obj, &progfd);
if (ret) {
printf("Failed to load bpf program\n");
return 1;
}
lircfd = open(argv[1], O_RDWR | O_NONBLOCK);
if (lircfd == -1) {
printf("failed to open lirc device %s: %m\n", argv[1]);
return 1;
}
/* Let's try detach it before it was ever attached */
ret = bpf_prog_detach2(progfd, lircfd, BPF_LIRC_MODE2);
if (ret != -1 || errno != ENOENT) {
printf("bpf_prog_detach2 not attached should fail: %m\n");
return 1;
}
inputfd = open(argv[2], O_RDONLY | O_NONBLOCK);
if (inputfd == -1) {
printf("failed to open input device %s: %m\n", argv[1]);
return 1;
}
prog_cnt = 10;
ret = bpf_prog_query(lircfd, BPF_LIRC_MODE2, 0, prog_flags, prog_ids,
&prog_cnt);
if (ret) {
printf("Failed to query bpf programs on lirc device: %m\n");
return 1;
}
if (prog_cnt != 0) {
printf("Expected nothing to be attached\n");
return 1;
}
ret = bpf_prog_attach(progfd, lircfd, BPF_LIRC_MODE2, 0);
if (ret) {
printf("Failed to attach bpf to lirc device: %m\n");
return 1;
}
/* Write raw IR */
ret = write(lircfd, &testir1, sizeof(testir1));
if (ret != sizeof(testir1)) {
printf("Failed to send test IR message: %m\n");
return 1;
}
struct pollfd pfd = { .fd = inputfd, .events = POLLIN };
struct input_event event;
for (;;) {
poll(&pfd, 1, 100);
/* Read decoded IR */
ret = read(inputfd, &event, sizeof(event));
if (ret != sizeof(event)) {
printf("Failed to read decoded IR: %m\n");
return 1;
}
if (event.type == EV_MSC && event.code == MSC_SCAN &&
event.value == 0xdead) {
break;
}
}
/* Write raw IR */
ret = write(lircfd, &testir2, sizeof(testir2));
if (ret != sizeof(testir2)) {
printf("Failed to send test IR message: %m\n");
return 1;
}
for (;;) {
poll(&pfd, 1, 100);
/* Read decoded IR */
ret = read(inputfd, &event, sizeof(event));
if (ret != sizeof(event)) {
printf("Failed to read decoded IR: %m\n");
return 1;
}
if (event.type == EV_REL && event.code == REL_Y &&
event.value == 1 ) {
break;
}
}
prog_cnt = 10;
ret = bpf_prog_query(lircfd, BPF_LIRC_MODE2, 0, prog_flags, prog_ids,
&prog_cnt);
if (ret) {
printf("Failed to query bpf programs on lirc device: %m\n");
return 1;
}
if (prog_cnt != 1) {
printf("Expected one program to be attached\n");
return 1;
}
/* Let's try detaching it now it is actually attached */
ret = bpf_prog_detach2(progfd, lircfd, BPF_LIRC_MODE2);
if (ret) {
printf("bpf_prog_detach2: returned %m\n");
return 1;
}
return 0;
}
| linux-master | tools/testing/selftests/bpf/test_lirc_mode2_user.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Facebook
*/
#define _GNU_SOURCE
#include "test_progs.h"
#include "testing_helpers.h"
#include "cgroup_helpers.h"
#include <argp.h>
#include <pthread.h>
#include <sched.h>
#include <signal.h>
#include <string.h>
#include <execinfo.h> /* backtrace */
#include <sys/sysinfo.h> /* get_nprocs */
#include <netinet/in.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <bpf/btf.h>
#include "json_writer.h"
static bool verbose(void)
{
return env.verbosity > VERBOSE_NONE;
}
static void stdio_hijack_init(char **log_buf, size_t *log_cnt)
{
#ifdef __GLIBC__
if (verbose() && env.worker_id == -1) {
/* nothing to do, output to stdout by default */
return;
}
fflush(stdout);
fflush(stderr);
stdout = open_memstream(log_buf, log_cnt);
if (!stdout) {
stdout = env.stdout;
perror("open_memstream");
return;
}
if (env.subtest_state)
env.subtest_state->stdout = stdout;
else
env.test_state->stdout = stdout;
stderr = stdout;
#endif
}
static void stdio_hijack(char **log_buf, size_t *log_cnt)
{
#ifdef __GLIBC__
if (verbose() && env.worker_id == -1) {
/* nothing to do, output to stdout by default */
return;
}
env.stdout = stdout;
env.stderr = stderr;
stdio_hijack_init(log_buf, log_cnt);
#endif
}
static void stdio_restore_cleanup(void)
{
#ifdef __GLIBC__
if (verbose() && env.worker_id == -1) {
/* nothing to do, output to stdout by default */
return;
}
fflush(stdout);
if (env.subtest_state) {
fclose(env.subtest_state->stdout);
env.subtest_state->stdout = NULL;
stdout = env.test_state->stdout;
stderr = env.test_state->stdout;
} else {
fclose(env.test_state->stdout);
env.test_state->stdout = NULL;
}
#endif
}
static void stdio_restore(void)
{
#ifdef __GLIBC__
if (verbose() && env.worker_id == -1) {
/* nothing to do, output to stdout by default */
return;
}
if (stdout == env.stdout)
return;
stdio_restore_cleanup();
stdout = env.stdout;
stderr = env.stderr;
#endif
}
/* Adapted from perf/util/string.c */
static bool glob_match(const char *str, const char *pat)
{
while (*str && *pat && *pat != '*') {
if (*str != *pat)
return false;
str++;
pat++;
}
/* Check wild card */
if (*pat == '*') {
while (*pat == '*')
pat++;
if (!*pat) /* Tail wild card matches all */
return true;
while (*str)
if (glob_match(str++, pat))
return true;
}
return !*str && !*pat;
}
#define EXIT_NO_TEST 2
#define EXIT_ERR_SETUP_INFRA 3
/* defined in test_progs.h */
struct test_env env = {};
struct prog_test_def {
const char *test_name;
int test_num;
void (*run_test)(void);
void (*run_serial_test)(void);
bool should_run;
bool need_cgroup_cleanup;
};
/* Override C runtime library's usleep() implementation to ensure nanosleep()
* is always called. Usleep is frequently used in selftests as a way to
* trigger kprobe and tracepoints.
*/
int usleep(useconds_t usec)
{
struct timespec ts = {
.tv_sec = usec / 1000000,
.tv_nsec = (usec % 1000000) * 1000,
};
return syscall(__NR_nanosleep, &ts, NULL);
}
static bool should_run(struct test_selector *sel, int num, const char *name)
{
int i;
for (i = 0; i < sel->blacklist.cnt; i++) {
if (glob_match(name, sel->blacklist.tests[i].name) &&
!sel->blacklist.tests[i].subtest_cnt)
return false;
}
for (i = 0; i < sel->whitelist.cnt; i++) {
if (glob_match(name, sel->whitelist.tests[i].name))
return true;
}
if (!sel->whitelist.cnt && !sel->num_set)
return true;
return num < sel->num_set_len && sel->num_set[num];
}
static bool should_run_subtest(struct test_selector *sel,
struct test_selector *subtest_sel,
int subtest_num,
const char *test_name,
const char *subtest_name)
{
int i, j;
for (i = 0; i < sel->blacklist.cnt; i++) {
if (glob_match(test_name, sel->blacklist.tests[i].name)) {
if (!sel->blacklist.tests[i].subtest_cnt)
return false;
for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) {
if (glob_match(subtest_name,
sel->blacklist.tests[i].subtests[j]))
return false;
}
}
}
for (i = 0; i < sel->whitelist.cnt; i++) {
if (glob_match(test_name, sel->whitelist.tests[i].name)) {
if (!sel->whitelist.tests[i].subtest_cnt)
return true;
for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) {
if (glob_match(subtest_name,
sel->whitelist.tests[i].subtests[j]))
return true;
}
}
}
if (!sel->whitelist.cnt && !subtest_sel->num_set)
return true;
return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
}
static char *test_result(bool failed, bool skipped)
{
return failed ? "FAIL" : (skipped ? "SKIP" : "OK");
}
#define TEST_NUM_WIDTH 7
static void print_test_result(const struct prog_test_def *test, const struct test_state *test_state)
{
int skipped_cnt = test_state->skip_cnt;
int subtests_cnt = test_state->subtest_num;
fprintf(env.stdout, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name);
if (test_state->error_cnt)
fprintf(env.stdout, "FAIL");
else if (!skipped_cnt)
fprintf(env.stdout, "OK");
else if (skipped_cnt == subtests_cnt || !subtests_cnt)
fprintf(env.stdout, "SKIP");
else
fprintf(env.stdout, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt);
fprintf(env.stdout, "\n");
}
static void print_test_log(char *log_buf, size_t log_cnt)
{
log_buf[log_cnt] = '\0';
fprintf(env.stdout, "%s", log_buf);
if (log_buf[log_cnt - 1] != '\n')
fprintf(env.stdout, "\n");
}
static void print_subtest_name(int test_num, int subtest_num,
const char *test_name, char *subtest_name,
char *result)
{
char test_num_str[TEST_NUM_WIDTH + 1];
snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num);
fprintf(env.stdout, "#%-*s %s/%s",
TEST_NUM_WIDTH, test_num_str,
test_name, subtest_name);
if (result)
fprintf(env.stdout, ":%s", result);
fprintf(env.stdout, "\n");
}
static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt)
{
/* open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a
* null byte. Yet in parallel mode, log_buf will be NULL if there is no message.
*/
if (log_cnt) {
jsonw_string_field(w, "message", log_buf);
} else {
jsonw_string_field(w, "message", "");
}
}
static void dump_test_log(const struct prog_test_def *test,
const struct test_state *test_state,
bool skip_ok_subtests,
bool par_exec_result,
json_writer_t *w)
{
bool test_failed = test_state->error_cnt > 0;
bool force_log = test_state->force_log;
bool print_test = verbose() || force_log || test_failed;
int i;
struct subtest_state *subtest_state;
bool subtest_failed;
bool subtest_filtered;
bool print_subtest;
/* we do not print anything in the worker thread */
if (env.worker_id != -1)
return;
/* there is nothing to print when verbose log is used and execution
* is not in parallel mode
*/
if (verbose() && !par_exec_result)
return;
if (test_state->log_cnt && print_test)
print_test_log(test_state->log_buf, test_state->log_cnt);
if (w && print_test) {
jsonw_start_object(w);
jsonw_string_field(w, "name", test->test_name);
jsonw_uint_field(w, "number", test->test_num);
jsonw_write_log_message(w, test_state->log_buf, test_state->log_cnt);
jsonw_bool_field(w, "failed", test_failed);
jsonw_name(w, "subtests");
jsonw_start_array(w);
}
for (i = 0; i < test_state->subtest_num; i++) {
subtest_state = &test_state->subtest_states[i];
subtest_failed = subtest_state->error_cnt;
subtest_filtered = subtest_state->filtered;
print_subtest = verbose() || force_log || subtest_failed;
if ((skip_ok_subtests && !subtest_failed) || subtest_filtered)
continue;
if (subtest_state->log_cnt && print_subtest) {
print_test_log(subtest_state->log_buf,
subtest_state->log_cnt);
}
print_subtest_name(test->test_num, i + 1,
test->test_name, subtest_state->name,
test_result(subtest_state->error_cnt,
subtest_state->skipped));
if (w && print_subtest) {
jsonw_start_object(w);
jsonw_string_field(w, "name", subtest_state->name);
jsonw_uint_field(w, "number", i+1);
jsonw_write_log_message(w, subtest_state->log_buf, subtest_state->log_cnt);
jsonw_bool_field(w, "failed", subtest_failed);
jsonw_end_object(w);
}
}
if (w && print_test) {
jsonw_end_array(w);
jsonw_end_object(w);
}
print_test_result(test, test_state);
}
static void stdio_restore(void);
/* A bunch of tests set custom affinity per-thread and/or per-process. Reset
* it after each test/sub-test.
*/
static void reset_affinity(void)
{
cpu_set_t cpuset;
int i, err;
CPU_ZERO(&cpuset);
for (i = 0; i < env.nr_cpus; i++)
CPU_SET(i, &cpuset);
err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
if (err < 0) {
stdio_restore();
fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
exit(EXIT_ERR_SETUP_INFRA);
}
err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
if (err < 0) {
stdio_restore();
fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
exit(EXIT_ERR_SETUP_INFRA);
}
}
static void save_netns(void)
{
env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
if (env.saved_netns_fd == -1) {
perror("open(/proc/self/ns/net)");
exit(EXIT_ERR_SETUP_INFRA);
}
}
static void restore_netns(void)
{
if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
stdio_restore();
perror("setns(CLONE_NEWNS)");
exit(EXIT_ERR_SETUP_INFRA);
}
}
void test__end_subtest(void)
{
struct prog_test_def *test = env.test;
struct test_state *test_state = env.test_state;
struct subtest_state *subtest_state = env.subtest_state;
if (subtest_state->error_cnt) {
test_state->error_cnt++;
} else {
if (!subtest_state->skipped)
test_state->sub_succ_cnt++;
else
test_state->skip_cnt++;
}
if (verbose() && !env.workers)
print_subtest_name(test->test_num, test_state->subtest_num,
test->test_name, subtest_state->name,
test_result(subtest_state->error_cnt,
subtest_state->skipped));
stdio_restore_cleanup();
env.subtest_state = NULL;
}
bool test__start_subtest(const char *subtest_name)
{
struct prog_test_def *test = env.test;
struct test_state *state = env.test_state;
struct subtest_state *subtest_state;
size_t sub_state_size = sizeof(*subtest_state);
if (env.subtest_state)
test__end_subtest();
state->subtest_num++;
state->subtest_states =
realloc(state->subtest_states,
state->subtest_num * sub_state_size);
if (!state->subtest_states) {
fprintf(stderr, "Not enough memory to allocate subtest result\n");
return false;
}
subtest_state = &state->subtest_states[state->subtest_num - 1];
memset(subtest_state, 0, sub_state_size);
if (!subtest_name || !subtest_name[0]) {
fprintf(env.stderr,
"Subtest #%d didn't provide sub-test name!\n",
state->subtest_num);
return false;
}
subtest_state->name = strdup(subtest_name);
if (!subtest_state->name) {
fprintf(env.stderr,
"Subtest #%d: failed to copy subtest name!\n",
state->subtest_num);
return false;
}
if (!should_run_subtest(&env.test_selector,
&env.subtest_selector,
state->subtest_num,
test->test_name,
subtest_name)) {
subtest_state->filtered = true;
return false;
}
env.subtest_state = subtest_state;
stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt);
return true;
}
void test__force_log(void)
{
env.test_state->force_log = true;
}
void test__skip(void)
{
if (env.subtest_state)
env.subtest_state->skipped = true;
else
env.test_state->skip_cnt++;
}
void test__fail(void)
{
if (env.subtest_state)
env.subtest_state->error_cnt++;
else
env.test_state->error_cnt++;
}
int test__join_cgroup(const char *path)
{
int fd;
if (!env.test->need_cgroup_cleanup) {
if (setup_cgroup_environment()) {
fprintf(stderr,
"#%d %s: Failed to setup cgroup environment\n",
env.test->test_num, env.test->test_name);
return -1;
}
env.test->need_cgroup_cleanup = true;
}
fd = create_and_get_cgroup(path);
if (fd < 0) {
fprintf(stderr,
"#%d %s: Failed to create cgroup '%s' (errno=%d)\n",
env.test->test_num, env.test->test_name, path, errno);
return fd;
}
if (join_cgroup(path)) {
fprintf(stderr,
"#%d %s: Failed to join cgroup '%s' (errno=%d)\n",
env.test->test_num, env.test->test_name, path, errno);
return -1;
}
return fd;
}
int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
{
struct bpf_map *map;
map = bpf_object__find_map_by_name(obj, name);
if (!map) {
fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name);
test__fail();
return -1;
}
return bpf_map__fd(map);
}
static bool is_jit_enabled(void)
{
const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
bool enabled = false;
int sysctl_fd;
sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
if (sysctl_fd != -1) {
char tmpc;
if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
enabled = (tmpc != '0');
close(sysctl_fd);
}
return enabled;
}
int compare_map_keys(int map1_fd, int map2_fd)
{
__u32 key, next_key;
char val_buf[PERF_MAX_STACK_DEPTH *
sizeof(struct bpf_stack_build_id)];
int err;
err = bpf_map_get_next_key(map1_fd, NULL, &key);
if (err)
return err;
err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
if (err)
return err;
while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
if (err)
return err;
key = next_key;
}
if (errno != ENOENT)
return -1;
return 0;
}
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
{
__u32 key, next_key, *cur_key_p, *next_key_p;
char *val_buf1, *val_buf2;
int i, err = 0;
val_buf1 = malloc(stack_trace_len);
val_buf2 = malloc(stack_trace_len);
cur_key_p = NULL;
next_key_p = &key;
while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
if (err)
goto out;
err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
if (err)
goto out;
for (i = 0; i < stack_trace_len; i++) {
if (val_buf1[i] != val_buf2[i]) {
err = -1;
goto out;
}
}
key = *next_key_p;
cur_key_p = &key;
next_key_p = &next_key;
}
if (errno != ENOENT)
err = -1;
out:
free(val_buf1);
free(val_buf2);
return err;
}
/* extern declarations for test funcs */
#define DEFINE_TEST(name) \
extern void test_##name(void) __weak; \
extern void serial_test_##name(void) __weak;
#include <prog_tests/tests.h>
#undef DEFINE_TEST
static struct prog_test_def prog_test_defs[] = {
#define DEFINE_TEST(name) { \
.test_name = #name, \
.run_test = &test_##name, \
.run_serial_test = &serial_test_##name, \
},
#include <prog_tests/tests.h>
#undef DEFINE_TEST
};
static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
const char *argp_program_version = "test_progs 0.1";
const char *argp_program_bug_address = "<[email protected]>";
static const char argp_program_doc[] =
"BPF selftests test runner\v"
"Options accepting the NAMES parameter take either a comma-separated list\n"
"of test names, or a filename prefixed with @. The file contains one name\n"
"(or wildcard pattern) per line, and comments beginning with # are ignored.\n"
"\n"
"These options can be passed repeatedly to read multiple files.\n";
enum ARG_KEYS {
ARG_TEST_NUM = 'n',
ARG_TEST_NAME = 't',
ARG_TEST_NAME_BLACKLIST = 'b',
ARG_VERIFIER_STATS = 's',
ARG_VERBOSE = 'v',
ARG_GET_TEST_CNT = 'c',
ARG_LIST_TEST_NAMES = 'l',
ARG_TEST_NAME_GLOB_ALLOWLIST = 'a',
ARG_TEST_NAME_GLOB_DENYLIST = 'd',
ARG_NUM_WORKERS = 'j',
ARG_DEBUG = -1,
ARG_JSON_SUMMARY = 'J'
};
static const struct argp_option opts[] = {
{ "num", ARG_TEST_NUM, "NUM", 0,
"Run test number NUM only " },
{ "name", ARG_TEST_NAME, "NAMES", 0,
"Run tests with names containing any string from NAMES list" },
{ "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0,
"Don't run tests with names containing any string from NAMES list" },
{ "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
"Output verifier statistics", },
{ "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
"Verbose output (use -vv or -vvv for progressively verbose output)" },
{ "count", ARG_GET_TEST_CNT, NULL, 0,
"Get number of selected top-level tests " },
{ "list", ARG_LIST_TEST_NAMES, NULL, 0,
"List test names that would run (without running them) " },
{ "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0,
"Run tests with name matching the pattern (supports '*' wildcard)." },
{ "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0,
"Don't run tests with name matching the pattern (supports '*' wildcard)." },
{ "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL,
"Number of workers to run in parallel, default to number of cpus." },
{ "debug", ARG_DEBUG, NULL, 0,
"print extra debug information for test_progs." },
{ "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."},
{},
};
static int libbpf_print_fn(enum libbpf_print_level level,
const char *format, va_list args)
{
if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
return 0;
vfprintf(stdout, format, args);
return 0;
}
static void free_test_filter_set(const struct test_filter_set *set)
{
int i, j;
if (!set)
return;
for (i = 0; i < set->cnt; i++) {
free((void *)set->tests[i].name);
for (j = 0; j < set->tests[i].subtest_cnt; j++)
free((void *)set->tests[i].subtests[j]);
free((void *)set->tests[i].subtests);
}
free((void *)set->tests);
}
static void free_test_selector(struct test_selector *test_selector)
{
free_test_filter_set(&test_selector->blacklist);
free_test_filter_set(&test_selector->whitelist);
free(test_selector->num_set);
}
extern int extra_prog_load_log_flags;
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
struct test_env *env = state->input;
int err = 0;
switch (key) {
case ARG_TEST_NUM: {
char *subtest_str = strchr(arg, '/');
if (subtest_str) {
*subtest_str = '\0';
if (parse_num_list(subtest_str + 1,
&env->subtest_selector.num_set,
&env->subtest_selector.num_set_len)) {
fprintf(stderr,
"Failed to parse subtest numbers.\n");
return -EINVAL;
}
}
if (parse_num_list(arg, &env->test_selector.num_set,
&env->test_selector.num_set_len)) {
fprintf(stderr, "Failed to parse test numbers.\n");
return -EINVAL;
}
break;
}
case ARG_TEST_NAME_GLOB_ALLOWLIST:
case ARG_TEST_NAME: {
if (arg[0] == '@')
err = parse_test_list_file(arg + 1,
&env->test_selector.whitelist,
key == ARG_TEST_NAME_GLOB_ALLOWLIST);
else
err = parse_test_list(arg,
&env->test_selector.whitelist,
key == ARG_TEST_NAME_GLOB_ALLOWLIST);
break;
}
case ARG_TEST_NAME_GLOB_DENYLIST:
case ARG_TEST_NAME_BLACKLIST: {
if (arg[0] == '@')
err = parse_test_list_file(arg + 1,
&env->test_selector.blacklist,
key == ARG_TEST_NAME_GLOB_DENYLIST);
else
err = parse_test_list(arg,
&env->test_selector.blacklist,
key == ARG_TEST_NAME_GLOB_DENYLIST);
break;
}
case ARG_VERIFIER_STATS:
env->verifier_stats = true;
break;
case ARG_VERBOSE:
env->verbosity = VERBOSE_NORMAL;
if (arg) {
if (strcmp(arg, "v") == 0) {
env->verbosity = VERBOSE_VERY;
extra_prog_load_log_flags = 1;
} else if (strcmp(arg, "vv") == 0) {
env->verbosity = VERBOSE_SUPER;
extra_prog_load_log_flags = 2;
} else {
fprintf(stderr,
"Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
arg);
return -EINVAL;
}
}
if (verbose()) {
if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) {
fprintf(stderr,
"Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)",
errno);
return -EINVAL;
}
}
break;
case ARG_GET_TEST_CNT:
env->get_test_cnt = true;
break;
case ARG_LIST_TEST_NAMES:
env->list_test_names = true;
break;
case ARG_NUM_WORKERS:
if (arg) {
env->workers = atoi(arg);
if (!env->workers) {
fprintf(stderr, "Invalid number of worker: %s.", arg);
return -EINVAL;
}
} else {
env->workers = get_nprocs();
}
break;
case ARG_DEBUG:
env->debug = true;
break;
case ARG_JSON_SUMMARY:
env->json = fopen(arg, "w");
if (env->json == NULL) {
perror("Failed to open json summary file");
return -errno;
}
break;
case ARGP_KEY_ARG:
argp_usage(state);
break;
case ARGP_KEY_END:
break;
default:
return ARGP_ERR_UNKNOWN;
}
return err;
}
/*
* Determine if test_progs is running as a "flavored" test runner and switch
* into corresponding sub-directory to load correct BPF objects.
*
* This is done by looking at executable name. If it contains "-flavor"
* suffix, then we are running as a flavored test runner.
*/
int cd_flavor_subdir(const char *exec_name)
{
/* General form of argv[0] passed here is:
* some/path/to/test_progs[-flavor], where -flavor part is optional.
* First cut out "test_progs[-flavor]" part, then extract "flavor"
* part, if it's there.
*/
const char *flavor = strrchr(exec_name, '/');
if (!flavor)
flavor = exec_name;
else
flavor++;
flavor = strrchr(flavor, '-');
if (!flavor)
return 0;
flavor++;
if (verbose())
fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
return chdir(flavor);
}
int trigger_module_test_read(int read_sz)
{
int fd, err;
fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY);
err = -errno;
if (!ASSERT_GE(fd, 0, "testmod_file_open"))
return err;
read(fd, NULL, read_sz);
close(fd);
return 0;
}
int trigger_module_test_write(int write_sz)
{
int fd, err;
char *buf = malloc(write_sz);
if (!buf)
return -ENOMEM;
memset(buf, 'a', write_sz);
buf[write_sz-1] = '\0';
fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY);
err = -errno;
if (!ASSERT_GE(fd, 0, "testmod_file_open")) {
free(buf);
return err;
}
write(fd, buf, write_sz);
close(fd);
free(buf);
return 0;
}
int write_sysctl(const char *sysctl, const char *value)
{
int fd, err, len;
fd = open(sysctl, O_WRONLY);
if (!ASSERT_NEQ(fd, -1, "open sysctl"))
return -1;
len = strlen(value);
err = write(fd, value, len);
close(fd);
if (!ASSERT_EQ(err, len, "write sysctl"))
return -1;
return 0;
}
int get_bpf_max_tramp_links_from(struct btf *btf)
{
const struct btf_enum *e;
const struct btf_type *t;
__u32 i, type_cnt;
const char *name;
__u16 j, vlen;
for (i = 1, type_cnt = btf__type_cnt(btf); i < type_cnt; i++) {
t = btf__type_by_id(btf, i);
if (!t || !btf_is_enum(t) || t->name_off)
continue;
e = btf_enum(t);
for (j = 0, vlen = btf_vlen(t); j < vlen; j++, e++) {
name = btf__str_by_offset(btf, e->name_off);
if (name && !strcmp(name, "BPF_MAX_TRAMP_LINKS"))
return e->val;
}
}
return -1;
}
int get_bpf_max_tramp_links(void)
{
struct btf *vmlinux_btf;
int ret;
vmlinux_btf = btf__load_vmlinux_btf();
if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux btf"))
return -1;
ret = get_bpf_max_tramp_links_from(vmlinux_btf);
btf__free(vmlinux_btf);
return ret;
}
#define MAX_BACKTRACE_SZ 128
void crash_handler(int signum)
{
void *bt[MAX_BACKTRACE_SZ];
size_t sz;
sz = backtrace(bt, ARRAY_SIZE(bt));
if (env.stdout)
stdio_restore();
if (env.test) {
env.test_state->error_cnt++;
dump_test_log(env.test, env.test_state, true, false, NULL);
}
if (env.worker_id != -1)
fprintf(stderr, "[%d]: ", env.worker_id);
fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
backtrace_symbols_fd(bt, sz, STDERR_FILENO);
}
static void sigint_handler(int signum)
{
int i;
for (i = 0; i < env.workers; i++)
if (env.worker_socks[i] > 0)
close(env.worker_socks[i]);
}
static int current_test_idx;
static pthread_mutex_t current_test_lock;
static pthread_mutex_t stdout_output_lock;
static inline const char *str_msg(const struct msg *msg, char *buf)
{
switch (msg->type) {
case MSG_DO_TEST:
sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num);
break;
case MSG_TEST_DONE:
sprintf(buf, "MSG_TEST_DONE %d (log: %d)",
msg->test_done.num,
msg->test_done.have_log);
break;
case MSG_SUBTEST_DONE:
sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)",
msg->subtest_done.num,
msg->subtest_done.have_log);
break;
case MSG_TEST_LOG:
sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)",
strlen(msg->test_log.log_buf),
msg->test_log.is_last);
break;
case MSG_EXIT:
sprintf(buf, "MSG_EXIT");
break;
default:
sprintf(buf, "UNKNOWN");
break;
}
return buf;
}
static int send_message(int sock, const struct msg *msg)
{
char buf[256];
if (env.debug)
fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf));
return send(sock, msg, sizeof(*msg), 0);
}
static int recv_message(int sock, struct msg *msg)
{
int ret;
char buf[256];
memset(msg, 0, sizeof(*msg));
ret = recv(sock, msg, sizeof(*msg), 0);
if (ret >= 0) {
if (env.debug)
fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf));
}
return ret;
}
static void run_one_test(int test_num)
{
struct prog_test_def *test = &prog_test_defs[test_num];
struct test_state *state = &test_states[test_num];
env.test = test;
env.test_state = state;
stdio_hijack(&state->log_buf, &state->log_cnt);
if (test->run_test)
test->run_test();
else if (test->run_serial_test)
test->run_serial_test();
/* ensure last sub-test is finalized properly */
if (env.subtest_state)
test__end_subtest();
state->tested = true;
if (verbose() && env.worker_id == -1)
print_test_result(test, state);
reset_affinity();
restore_netns();
if (test->need_cgroup_cleanup)
cleanup_cgroup_environment();
stdio_restore();
dump_test_log(test, state, false, false, NULL);
}
struct dispatch_data {
int worker_id;
int sock_fd;
};
static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type)
{
if (recv_message(sock_fd, msg) < 0)
return 1;
if (msg->type != type) {
printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type);
return 1;
}
return 0;
}
static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt)
{
FILE *log_fp = NULL;
int result = 0;
log_fp = open_memstream(log_buf, log_cnt);
if (!log_fp)
return 1;
while (true) {
struct msg msg;
if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) {
result = 1;
goto out;
}
fprintf(log_fp, "%s", msg.test_log.log_buf);
if (msg.test_log.is_last)
break;
}
out:
fclose(log_fp);
log_fp = NULL;
return result;
}
static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state)
{
struct msg msg;
struct subtest_state *subtest_state;
int subtest_num = state->subtest_num;
state->subtest_states = malloc(subtest_num * sizeof(*subtest_state));
for (int i = 0; i < subtest_num; i++) {
subtest_state = &state->subtest_states[i];
memset(subtest_state, 0, sizeof(*subtest_state));
if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE))
return 1;
subtest_state->name = strdup(msg.subtest_done.name);
subtest_state->error_cnt = msg.subtest_done.error_cnt;
subtest_state->skipped = msg.subtest_done.skipped;
subtest_state->filtered = msg.subtest_done.filtered;
/* collect all logs */
if (msg.subtest_done.have_log)
if (dispatch_thread_read_log(sock_fd,
&subtest_state->log_buf,
&subtest_state->log_cnt))
return 1;
}
return 0;
}
static void *dispatch_thread(void *ctx)
{
struct dispatch_data *data = ctx;
int sock_fd;
sock_fd = data->sock_fd;
while (true) {
int test_to_run = -1;
struct prog_test_def *test;
struct test_state *state;
/* grab a test */
{
pthread_mutex_lock(¤t_test_lock);
if (current_test_idx >= prog_test_cnt) {
pthread_mutex_unlock(¤t_test_lock);
goto done;
}
test = &prog_test_defs[current_test_idx];
test_to_run = current_test_idx;
current_test_idx++;
pthread_mutex_unlock(¤t_test_lock);
}
if (!test->should_run || test->run_serial_test)
continue;
/* run test through worker */
{
struct msg msg_do_test;
memset(&msg_do_test, 0, sizeof(msg_do_test));
msg_do_test.type = MSG_DO_TEST;
msg_do_test.do_test.num = test_to_run;
if (send_message(sock_fd, &msg_do_test) < 0) {
perror("Fail to send command");
goto done;
}
env.worker_current_test[data->worker_id] = test_to_run;
}
/* wait for test done */
do {
struct msg msg;
if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE))
goto error;
if (test_to_run != msg.test_done.num)
goto error;
state = &test_states[test_to_run];
state->tested = true;
state->error_cnt = msg.test_done.error_cnt;
state->skip_cnt = msg.test_done.skip_cnt;
state->sub_succ_cnt = msg.test_done.sub_succ_cnt;
state->subtest_num = msg.test_done.subtest_num;
/* collect all logs */
if (msg.test_done.have_log) {
if (dispatch_thread_read_log(sock_fd,
&state->log_buf,
&state->log_cnt))
goto error;
}
/* collect all subtests and subtest logs */
if (!state->subtest_num)
break;
if (dispatch_thread_send_subtests(sock_fd, state))
goto error;
} while (false);
pthread_mutex_lock(&stdout_output_lock);
dump_test_log(test, state, false, true, NULL);
pthread_mutex_unlock(&stdout_output_lock);
} /* while (true) */
error:
if (env.debug)
fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno));
done:
{
struct msg msg_exit;
msg_exit.type = MSG_EXIT;
if (send_message(sock_fd, &msg_exit) < 0) {
if (env.debug)
fprintf(stderr, "[%d]: send_message msg_exit: %s.\n",
data->worker_id, strerror(errno));
}
}
return NULL;
}
static void calculate_summary_and_print_errors(struct test_env *env)
{
int i;
int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0;
json_writer_t *w = NULL;
for (i = 0; i < prog_test_cnt; i++) {
struct test_state *state = &test_states[i];
if (!state->tested)
continue;
sub_succ_cnt += state->sub_succ_cnt;
skip_cnt += state->skip_cnt;
if (state->error_cnt)
fail_cnt++;
else
succ_cnt++;
}
if (env->json) {
w = jsonw_new(env->json);
if (!w)
fprintf(env->stderr, "Failed to create new JSON stream.");
}
if (w) {
jsonw_start_object(w);
jsonw_uint_field(w, "success", succ_cnt);
jsonw_uint_field(w, "success_subtest", sub_succ_cnt);
jsonw_uint_field(w, "skipped", skip_cnt);
jsonw_uint_field(w, "failed", fail_cnt);
jsonw_name(w, "results");
jsonw_start_array(w);
}
/*
* We only print error logs summary when there are failed tests and
* verbose mode is not enabled. Otherwise, results may be incosistent.
*
*/
if (!verbose() && fail_cnt) {
printf("\nAll error logs:\n");
/* print error logs again */
for (i = 0; i < prog_test_cnt; i++) {
struct prog_test_def *test = &prog_test_defs[i];
struct test_state *state = &test_states[i];
if (!state->tested || !state->error_cnt)
continue;
dump_test_log(test, state, true, true, w);
}
}
if (w) {
jsonw_end_array(w);
jsonw_end_object(w);
jsonw_destroy(&w);
}
if (env->json)
fclose(env->json);
printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
env->succ_cnt = succ_cnt;
env->sub_succ_cnt = sub_succ_cnt;
env->fail_cnt = fail_cnt;
env->skip_cnt = skip_cnt;
}
static void server_main(void)
{
pthread_t *dispatcher_threads;
struct dispatch_data *data;
struct sigaction sigact_int = {
.sa_handler = sigint_handler,
.sa_flags = SA_RESETHAND,
};
int i;
sigaction(SIGINT, &sigact_int, NULL);
dispatcher_threads = calloc(sizeof(pthread_t), env.workers);
data = calloc(sizeof(struct dispatch_data), env.workers);
env.worker_current_test = calloc(sizeof(int), env.workers);
for (i = 0; i < env.workers; i++) {
int rc;
data[i].worker_id = i;
data[i].sock_fd = env.worker_socks[i];
rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]);
if (rc < 0) {
perror("Failed to launch dispatcher thread");
exit(EXIT_ERR_SETUP_INFRA);
}
}
/* wait for all dispatcher to finish */
for (i = 0; i < env.workers; i++) {
while (true) {
int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL);
if (!ret) {
break;
} else if (ret == EBUSY) {
if (env.debug)
fprintf(stderr, "Still waiting for thread %d (test %d).\n",
i, env.worker_current_test[i] + 1);
usleep(1000 * 1000);
continue;
} else {
fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret);
break;
}
}
}
free(dispatcher_threads);
free(env.worker_current_test);
free(data);
/* run serial tests */
save_netns();
for (int i = 0; i < prog_test_cnt; i++) {
struct prog_test_def *test = &prog_test_defs[i];
if (!test->should_run || !test->run_serial_test)
continue;
run_one_test(i);
}
/* generate summary */
fflush(stderr);
fflush(stdout);
calculate_summary_and_print_errors(&env);
/* reap all workers */
for (i = 0; i < env.workers; i++) {
int wstatus, pid;
pid = waitpid(env.worker_pids[i], &wstatus, 0);
if (pid != env.worker_pids[i])
perror("Unable to reap worker");
}
}
static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt)
{
char *src;
size_t slen;
src = log_buf;
slen = log_cnt;
while (slen) {
struct msg msg_log;
char *dest;
size_t len;
memset(&msg_log, 0, sizeof(msg_log));
msg_log.type = MSG_TEST_LOG;
dest = msg_log.test_log.log_buf;
len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen;
memcpy(dest, src, len);
src += len;
slen -= len;
if (!slen)
msg_log.test_log.is_last = true;
assert(send_message(sock, &msg_log) >= 0);
}
}
static void free_subtest_state(struct subtest_state *state)
{
if (state->log_buf) {
free(state->log_buf);
state->log_buf = NULL;
state->log_cnt = 0;
}
free(state->name);
state->name = NULL;
}
static int worker_main_send_subtests(int sock, struct test_state *state)
{
int i, result = 0;
struct msg msg;
struct subtest_state *subtest_state;
memset(&msg, 0, sizeof(msg));
msg.type = MSG_SUBTEST_DONE;
for (i = 0; i < state->subtest_num; i++) {
subtest_state = &state->subtest_states[i];
msg.subtest_done.num = i;
strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME);
msg.subtest_done.error_cnt = subtest_state->error_cnt;
msg.subtest_done.skipped = subtest_state->skipped;
msg.subtest_done.filtered = subtest_state->filtered;
msg.subtest_done.have_log = false;
if (verbose() || state->force_log || subtest_state->error_cnt) {
if (subtest_state->log_cnt)
msg.subtest_done.have_log = true;
}
if (send_message(sock, &msg) < 0) {
perror("Fail to send message done");
result = 1;
goto out;
}
/* send logs */
if (msg.subtest_done.have_log)
worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt);
free_subtest_state(subtest_state);
free(subtest_state->name);
}
out:
for (; i < state->subtest_num; i++)
free_subtest_state(&state->subtest_states[i]);
free(state->subtest_states);
return result;
}
static int worker_main(int sock)
{
save_netns();
while (true) {
/* receive command */
struct msg msg;
if (recv_message(sock, &msg) < 0)
goto out;
switch (msg.type) {
case MSG_EXIT:
if (env.debug)
fprintf(stderr, "[%d]: worker exit.\n",
env.worker_id);
goto out;
case MSG_DO_TEST: {
int test_to_run = msg.do_test.num;
struct prog_test_def *test = &prog_test_defs[test_to_run];
struct test_state *state = &test_states[test_to_run];
struct msg msg;
if (env.debug)
fprintf(stderr, "[%d]: #%d:%s running.\n",
env.worker_id,
test_to_run + 1,
test->test_name);
run_one_test(test_to_run);
memset(&msg, 0, sizeof(msg));
msg.type = MSG_TEST_DONE;
msg.test_done.num = test_to_run;
msg.test_done.error_cnt = state->error_cnt;
msg.test_done.skip_cnt = state->skip_cnt;
msg.test_done.sub_succ_cnt = state->sub_succ_cnt;
msg.test_done.subtest_num = state->subtest_num;
msg.test_done.have_log = false;
if (verbose() || state->force_log || state->error_cnt) {
if (state->log_cnt)
msg.test_done.have_log = true;
}
if (send_message(sock, &msg) < 0) {
perror("Fail to send message done");
goto out;
}
/* send logs */
if (msg.test_done.have_log)
worker_main_send_log(sock, state->log_buf, state->log_cnt);
if (state->log_buf) {
free(state->log_buf);
state->log_buf = NULL;
state->log_cnt = 0;
}
if (state->subtest_num)
if (worker_main_send_subtests(sock, state))
goto out;
if (env.debug)
fprintf(stderr, "[%d]: #%d:%s done.\n",
env.worker_id,
test_to_run + 1,
test->test_name);
break;
} /* case MSG_DO_TEST */
default:
if (env.debug)
fprintf(stderr, "[%d]: unknown message.\n", env.worker_id);
return -1;
}
}
out:
return 0;
}
static void free_test_states(void)
{
int i, j;
for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) {
struct test_state *test_state = &test_states[i];
for (j = 0; j < test_state->subtest_num; j++)
free_subtest_state(&test_state->subtest_states[j]);
free(test_state->subtest_states);
free(test_state->log_buf);
test_state->subtest_states = NULL;
test_state->log_buf = NULL;
}
}
int main(int argc, char **argv)
{
static const struct argp argp = {
.options = opts,
.parser = parse_arg,
.doc = argp_program_doc,
};
struct sigaction sigact = {
.sa_handler = crash_handler,
.sa_flags = SA_RESETHAND,
};
int err, i;
sigaction(SIGSEGV, &sigact, NULL);
err = argp_parse(&argp, argc, argv, 0, NULL, &env);
if (err)
return err;
err = cd_flavor_subdir(argv[0]);
if (err)
return err;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
libbpf_set_print(libbpf_print_fn);
srand(time(NULL));
env.jit_enabled = is_jit_enabled();
env.nr_cpus = libbpf_num_possible_cpus();
if (env.nr_cpus < 0) {
fprintf(stderr, "Failed to get number of CPUs: %d!\n",
env.nr_cpus);
return -1;
}
env.stdout = stdout;
env.stderr = stderr;
env.has_testmod = true;
if (!env.list_test_names) {
/* ensure previous instance of the module is unloaded */
unload_bpf_testmod(verbose());
if (load_bpf_testmod(verbose())) {
fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
env.has_testmod = false;
}
}
/* initializing tests */
for (i = 0; i < prog_test_cnt; i++) {
struct prog_test_def *test = &prog_test_defs[i];
test->test_num = i + 1;
test->should_run = should_run(&env.test_selector,
test->test_num, test->test_name);
if ((test->run_test == NULL && test->run_serial_test == NULL) ||
(test->run_test != NULL && test->run_serial_test != NULL)) {
fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n",
test->test_num, test->test_name, test->test_name, test->test_name);
exit(EXIT_ERR_SETUP_INFRA);
}
}
/* ignore workers if we are just listing */
if (env.get_test_cnt || env.list_test_names)
env.workers = 0;
/* launch workers if requested */
env.worker_id = -1; /* main process */
if (env.workers) {
env.worker_pids = calloc(sizeof(__pid_t), env.workers);
env.worker_socks = calloc(sizeof(int), env.workers);
if (env.debug)
fprintf(stdout, "Launching %d workers.\n", env.workers);
for (i = 0; i < env.workers; i++) {
int sv[2];
pid_t pid;
if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) {
perror("Fail to create worker socket");
return -1;
}
pid = fork();
if (pid < 0) {
perror("Failed to fork worker");
return -1;
} else if (pid != 0) { /* main process */
close(sv[1]);
env.worker_pids[i] = pid;
env.worker_socks[i] = sv[0];
} else { /* inside each worker process */
close(sv[0]);
env.worker_id = i;
return worker_main(sv[1]);
}
}
if (env.worker_id == -1) {
server_main();
goto out;
}
}
/* The rest of the main process */
/* on single mode */
save_netns();
for (i = 0; i < prog_test_cnt; i++) {
struct prog_test_def *test = &prog_test_defs[i];
if (!test->should_run)
continue;
if (env.get_test_cnt) {
env.succ_cnt++;
continue;
}
if (env.list_test_names) {
fprintf(env.stdout, "%s\n", test->test_name);
env.succ_cnt++;
continue;
}
run_one_test(i);
}
if (env.get_test_cnt) {
printf("%d\n", env.succ_cnt);
goto out;
}
if (env.list_test_names)
goto out;
calculate_summary_and_print_errors(&env);
close(env.saved_netns_fd);
out:
if (!env.list_test_names && env.has_testmod)
unload_bpf_testmod(verbose());
free_test_selector(&env.test_selector);
free_test_selector(&env.subtest_selector);
free_test_states();
if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
return EXIT_NO_TEST;
return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
}
| linux-master | tools/testing/selftests/bpf/test_progs.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <linux/if_link.h>
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <net/if.h>
#include <unistd.h>
#include <libgen.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include "bpf_util.h"
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#define MAX_IFACE_NUM 32
#define MAX_INDEX_NUM 1024
static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
static int ifaces[MAX_IFACE_NUM] = {};
static void int_exit(int sig)
{
__u32 prog_id = 0;
int i;
for (i = 0; ifaces[i] > 0; i++) {
if (bpf_xdp_query_id(ifaces[i], xdp_flags, &prog_id)) {
printf("bpf_xdp_query_id failed\n");
exit(1);
}
if (prog_id)
bpf_xdp_detach(ifaces[i], xdp_flags, NULL);
}
exit(0);
}
static int get_mac_addr(unsigned int ifindex, void *mac_addr)
{
char ifname[IF_NAMESIZE];
struct ifreq ifr;
int fd, ret = -1;
fd = socket(AF_INET, SOCK_DGRAM, 0);
if (fd < 0)
return ret;
if (!if_indextoname(ifindex, ifname))
goto err_out;
strcpy(ifr.ifr_name, ifname);
if (ioctl(fd, SIOCGIFHWADDR, &ifr) != 0)
goto err_out;
memcpy(mac_addr, ifr.ifr_hwaddr.sa_data, 6 * sizeof(char));
ret = 0;
err_out:
close(fd);
return ret;
}
static void usage(const char *prog)
{
fprintf(stderr,
"usage: %s [OPTS] <IFNAME|IFINDEX> <IFNAME|IFINDEX> ...\n"
"OPTS:\n"
" -S use skb-mode\n"
" -N enforce native mode\n"
" -F force loading prog\n"
" -X load xdp program on egress\n",
prog);
}
int main(int argc, char **argv)
{
int prog_fd, group_all, mac_map;
struct bpf_program *ingress_prog, *egress_prog;
int i, err, ret, opt, egress_prog_fd = 0;
struct bpf_devmap_val devmap_val;
bool attach_egress_prog = false;
unsigned char mac_addr[6];
char ifname[IF_NAMESIZE];
struct bpf_object *obj;
unsigned int ifindex;
char filename[256];
while ((opt = getopt(argc, argv, "SNFX")) != -1) {
switch (opt) {
case 'S':
xdp_flags |= XDP_FLAGS_SKB_MODE;
break;
case 'N':
/* default, set below */
break;
case 'F':
xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
break;
case 'X':
attach_egress_prog = true;
break;
default:
usage(basename(argv[0]));
return 1;
}
}
if (!(xdp_flags & XDP_FLAGS_SKB_MODE)) {
xdp_flags |= XDP_FLAGS_DRV_MODE;
} else if (attach_egress_prog) {
printf("Load xdp program on egress with SKB mode not supported yet\n");
goto err_out;
}
if (optind == argc) {
printf("usage: %s <IFNAME|IFINDEX> <IFNAME|IFINDEX> ...\n", argv[0]);
goto err_out;
}
printf("Get interfaces:");
for (i = 0; i < MAX_IFACE_NUM && argv[optind + i]; i++) {
ifaces[i] = if_nametoindex(argv[optind + i]);
if (!ifaces[i])
ifaces[i] = strtoul(argv[optind + i], NULL, 0);
if (!if_indextoname(ifaces[i], ifname)) {
perror("Invalid interface name or i");
goto err_out;
}
if (ifaces[i] > MAX_INDEX_NUM) {
printf(" interface index too large\n");
goto err_out;
}
printf(" %d", ifaces[i]);
}
printf("\n");
snprintf(filename, sizeof(filename), "%s_kern.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
err = libbpf_get_error(obj);
if (err)
goto err_out;
err = bpf_object__load(obj);
if (err)
goto err_out;
prog_fd = bpf_program__fd(bpf_object__next_program(obj, NULL));
if (attach_egress_prog)
group_all = bpf_object__find_map_fd_by_name(obj, "map_egress");
else
group_all = bpf_object__find_map_fd_by_name(obj, "map_all");
mac_map = bpf_object__find_map_fd_by_name(obj, "mac_map");
if (group_all < 0 || mac_map < 0) {
printf("bpf_object__find_map_fd_by_name failed\n");
goto err_out;
}
if (attach_egress_prog) {
/* Find ingress/egress prog for 2nd xdp prog */
ingress_prog = bpf_object__find_program_by_name(obj, "xdp_redirect_map_all_prog");
egress_prog = bpf_object__find_program_by_name(obj, "xdp_devmap_prog");
if (!ingress_prog || !egress_prog) {
printf("finding ingress/egress_prog in obj file failed\n");
goto err_out;
}
prog_fd = bpf_program__fd(ingress_prog);
egress_prog_fd = bpf_program__fd(egress_prog);
if (prog_fd < 0 || egress_prog_fd < 0) {
printf("find egress_prog fd failed\n");
goto err_out;
}
}
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
/* Init forward multicast groups and exclude group */
for (i = 0; ifaces[i] > 0; i++) {
ifindex = ifaces[i];
if (attach_egress_prog) {
ret = get_mac_addr(ifindex, mac_addr);
if (ret < 0) {
printf("get interface %d mac failed\n", ifindex);
goto err_out;
}
ret = bpf_map_update_elem(mac_map, &ifindex, mac_addr, 0);
if (ret) {
perror("bpf_update_elem mac_map failed\n");
goto err_out;
}
}
/* Add all the interfaces to group all */
devmap_val.ifindex = ifindex;
devmap_val.bpf_prog.fd = egress_prog_fd;
ret = bpf_map_update_elem(group_all, &ifindex, &devmap_val, 0);
if (ret) {
perror("bpf_map_update_elem");
goto err_out;
}
/* bind prog_fd to each interface */
ret = bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL);
if (ret) {
printf("Set xdp fd failed on %d\n", ifindex);
goto err_out;
}
}
/* sleep some time for testing */
sleep(999);
return 0;
err_out:
return 1;
}
| linux-master | tools/testing/selftests/bpf/xdp_redirect_multi.c |
// SPDX-License-Identifier: GPL-2.0
#include <error.h>
#include <errno.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "flow_dissector_load.h"
const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
const char *cfg_map_name = "jmp_table";
bool cfg_attach = true;
char *cfg_prog_name;
char *cfg_path_name;
static void load_and_attach_program(void)
{
int prog_fd, ret;
struct bpf_object *obj;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
ret = bpf_flow_load(&obj, cfg_path_name, cfg_prog_name,
cfg_map_name, NULL, &prog_fd, NULL);
if (ret)
error(1, 0, "bpf_flow_load %s", cfg_path_name);
ret = bpf_prog_attach(prog_fd, 0 /* Ignore */, BPF_FLOW_DISSECTOR, 0);
if (ret)
error(1, 0, "bpf_prog_attach %s", cfg_path_name);
ret = bpf_object__pin(obj, cfg_pin_path);
if (ret)
error(1, 0, "bpf_object__pin %s", cfg_pin_path);
}
static void detach_program(void)
{
char command[64];
int ret;
ret = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
if (ret)
error(1, 0, "bpf_prog_detach");
/* To unpin, it is necessary and sufficient to just remove this dir */
sprintf(command, "rm -r %s", cfg_pin_path);
ret = system(command);
if (ret)
error(1, errno, "%s", command);
}
static void parse_opts(int argc, char **argv)
{
bool attach = false;
bool detach = false;
int c;
while ((c = getopt(argc, argv, "adp:s:")) != -1) {
switch (c) {
case 'a':
if (detach)
error(1, 0, "attach/detach are exclusive");
attach = true;
break;
case 'd':
if (attach)
error(1, 0, "attach/detach are exclusive");
detach = true;
break;
case 'p':
if (cfg_path_name)
error(1, 0, "only one path can be given");
cfg_path_name = optarg;
break;
case 's':
if (cfg_prog_name)
error(1, 0, "only one prog can be given");
cfg_prog_name = optarg;
break;
}
}
if (detach)
cfg_attach = false;
if (cfg_attach && !cfg_path_name)
error(1, 0, "must provide a path to the BPF program");
if (cfg_attach && !cfg_prog_name)
error(1, 0, "must provide a section name");
}
int main(int argc, char **argv)
{
parse_opts(argc, argv);
if (cfg_attach)
load_and_attach_program();
else
detach_program();
return 0;
}
| linux-master | tools/testing/selftests/bpf/flow_dissector_load.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <string.h>
#include <sdt.h>
#define __PASTE(a, b) a##b
#define PASTE(a, b) __PASTE(a, b)
#define NAME(name, idx) PASTE(name, idx)
#define DEF(name, idx) int NAME(name, idx)(void) { return 0; }
#define CALL(name, idx) NAME(name, idx)();
#define F(body, name, idx) body(name, idx)
#define F10(body, name, idx) \
F(body, PASTE(name, idx), 0) F(body, PASTE(name, idx), 1) F(body, PASTE(name, idx), 2) \
F(body, PASTE(name, idx), 3) F(body, PASTE(name, idx), 4) F(body, PASTE(name, idx), 5) \
F(body, PASTE(name, idx), 6) F(body, PASTE(name, idx), 7) F(body, PASTE(name, idx), 8) \
F(body, PASTE(name, idx), 9)
#define F100(body, name, idx) \
F10(body, PASTE(name, idx), 0) F10(body, PASTE(name, idx), 1) F10(body, PASTE(name, idx), 2) \
F10(body, PASTE(name, idx), 3) F10(body, PASTE(name, idx), 4) F10(body, PASTE(name, idx), 5) \
F10(body, PASTE(name, idx), 6) F10(body, PASTE(name, idx), 7) F10(body, PASTE(name, idx), 8) \
F10(body, PASTE(name, idx), 9)
#define F1000(body, name, idx) \
F100(body, PASTE(name, idx), 0) F100(body, PASTE(name, idx), 1) F100(body, PASTE(name, idx), 2) \
F100(body, PASTE(name, idx), 3) F100(body, PASTE(name, idx), 4) F100(body, PASTE(name, idx), 5) \
F100(body, PASTE(name, idx), 6) F100(body, PASTE(name, idx), 7) F100(body, PASTE(name, idx), 8) \
F100(body, PASTE(name, idx), 9)
#define F10000(body, name, idx) \
F1000(body, PASTE(name, idx), 0) F1000(body, PASTE(name, idx), 1) F1000(body, PASTE(name, idx), 2) \
F1000(body, PASTE(name, idx), 3) F1000(body, PASTE(name, idx), 4) F1000(body, PASTE(name, idx), 5) \
F1000(body, PASTE(name, idx), 6) F1000(body, PASTE(name, idx), 7) F1000(body, PASTE(name, idx), 8) \
F1000(body, PASTE(name, idx), 9)
F10000(DEF, uprobe_multi_func_, 0)
F10000(DEF, uprobe_multi_func_, 1)
F10000(DEF, uprobe_multi_func_, 2)
F10000(DEF, uprobe_multi_func_, 3)
F10000(DEF, uprobe_multi_func_, 4)
static int bench(void)
{
F10000(CALL, uprobe_multi_func_, 0)
F10000(CALL, uprobe_multi_func_, 1)
F10000(CALL, uprobe_multi_func_, 2)
F10000(CALL, uprobe_multi_func_, 3)
F10000(CALL, uprobe_multi_func_, 4)
return 0;
}
#define PROBE STAP_PROBE(test, usdt);
#define PROBE10 PROBE PROBE PROBE PROBE PROBE \
PROBE PROBE PROBE PROBE PROBE
#define PROBE100 PROBE10 PROBE10 PROBE10 PROBE10 PROBE10 \
PROBE10 PROBE10 PROBE10 PROBE10 PROBE10
#define PROBE1000 PROBE100 PROBE100 PROBE100 PROBE100 PROBE100 \
PROBE100 PROBE100 PROBE100 PROBE100 PROBE100
#define PROBE10000 PROBE1000 PROBE1000 PROBE1000 PROBE1000 PROBE1000 \
PROBE1000 PROBE1000 PROBE1000 PROBE1000 PROBE1000
static int usdt(void)
{
PROBE10000
PROBE10000
PROBE10000
PROBE10000
PROBE10000
return 0;
}
int main(int argc, char **argv)
{
if (argc != 2)
goto error;
if (!strcmp("bench", argv[1]))
return bench();
if (!strcmp("usdt", argv[1]))
return usdt();
error:
fprintf(stderr, "usage: %s <bench|usdt>\n", argv[0]);
return -1;
}
| linux-master | tools/testing/selftests/bpf/uprobe_multi.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define _GNU_SOURCE
#include <argp.h>
#include <linux/compiler.h>
#include <sys/time.h>
#include <sched.h>
#include <fcntl.h>
#include <pthread.h>
#include <sys/sysinfo.h>
#include <signal.h>
#include "bench.h"
#include "testing_helpers.h"
struct env env = {
.warmup_sec = 1,
.duration_sec = 5,
.affinity = false,
.quiet = false,
.consumer_cnt = 0,
.producer_cnt = 1,
};
static int libbpf_print_fn(enum libbpf_print_level level,
const char *format, va_list args)
{
if (level == LIBBPF_DEBUG && !env.verbose)
return 0;
return vfprintf(stderr, format, args);
}
void setup_libbpf(void)
{
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
libbpf_set_print(libbpf_print_fn);
}
void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns)
{
long total = res->false_hits + res->hits + res->drops;
printf("Iter %3d (%7.3lfus): ",
iter, (delta_ns - 1000000000) / 1000.0);
printf("%ld false hits of %ld total operations. Percentage = %2.2f %%\n",
res->false_hits, total, ((float)res->false_hits / total) * 100);
}
void false_hits_report_final(struct bench_res res[], int res_cnt)
{
long total_hits = 0, total_drops = 0, total_false_hits = 0, total_ops = 0;
int i;
for (i = 0; i < res_cnt; i++) {
total_hits += res[i].hits;
total_false_hits += res[i].false_hits;
total_drops += res[i].drops;
}
total_ops = total_hits + total_false_hits + total_drops;
printf("Summary: %ld false hits of %ld total operations. ",
total_false_hits, total_ops);
printf("Percentage = %2.2f %%\n",
((float)total_false_hits / total_ops) * 100);
}
void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns)
{
double hits_per_sec, drops_per_sec;
double hits_per_prod;
hits_per_sec = res->hits / 1000000.0 / (delta_ns / 1000000000.0);
hits_per_prod = hits_per_sec / env.producer_cnt;
drops_per_sec = res->drops / 1000000.0 / (delta_ns / 1000000000.0);
printf("Iter %3d (%7.3lfus): ",
iter, (delta_ns - 1000000000) / 1000.0);
printf("hits %8.3lfM/s (%7.3lfM/prod), drops %8.3lfM/s, total operations %8.3lfM/s\n",
hits_per_sec, hits_per_prod, drops_per_sec, hits_per_sec + drops_per_sec);
}
void
grace_period_latency_basic_stats(struct bench_res res[], int res_cnt, struct basic_stats *gp_stat)
{
int i;
memset(gp_stat, 0, sizeof(struct basic_stats));
for (i = 0; i < res_cnt; i++)
gp_stat->mean += res[i].gp_ns / 1000.0 / (double)res[i].gp_ct / (0.0 + res_cnt);
#define IT_MEAN_DIFF (res[i].gp_ns / 1000.0 / (double)res[i].gp_ct - gp_stat->mean)
if (res_cnt > 1) {
for (i = 0; i < res_cnt; i++)
gp_stat->stddev += (IT_MEAN_DIFF * IT_MEAN_DIFF) / (res_cnt - 1.0);
}
gp_stat->stddev = sqrt(gp_stat->stddev);
#undef IT_MEAN_DIFF
}
void
grace_period_ticks_basic_stats(struct bench_res res[], int res_cnt, struct basic_stats *gp_stat)
{
int i;
memset(gp_stat, 0, sizeof(struct basic_stats));
for (i = 0; i < res_cnt; i++)
gp_stat->mean += res[i].stime / (double)res[i].gp_ct / (0.0 + res_cnt);
#define IT_MEAN_DIFF (res[i].stime / (double)res[i].gp_ct - gp_stat->mean)
if (res_cnt > 1) {
for (i = 0; i < res_cnt; i++)
gp_stat->stddev += (IT_MEAN_DIFF * IT_MEAN_DIFF) / (res_cnt - 1.0);
}
gp_stat->stddev = sqrt(gp_stat->stddev);
#undef IT_MEAN_DIFF
}
void hits_drops_report_final(struct bench_res res[], int res_cnt)
{
int i;
double hits_mean = 0.0, drops_mean = 0.0, total_ops_mean = 0.0;
double hits_stddev = 0.0, drops_stddev = 0.0, total_ops_stddev = 0.0;
double total_ops;
for (i = 0; i < res_cnt; i++) {
hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt);
drops_mean += res[i].drops / 1000000.0 / (0.0 + res_cnt);
}
total_ops_mean = hits_mean + drops_mean;
if (res_cnt > 1) {
for (i = 0; i < res_cnt; i++) {
hits_stddev += (hits_mean - res[i].hits / 1000000.0) *
(hits_mean - res[i].hits / 1000000.0) /
(res_cnt - 1.0);
drops_stddev += (drops_mean - res[i].drops / 1000000.0) *
(drops_mean - res[i].drops / 1000000.0) /
(res_cnt - 1.0);
total_ops = res[i].hits + res[i].drops;
total_ops_stddev += (total_ops_mean - total_ops / 1000000.0) *
(total_ops_mean - total_ops / 1000000.0) /
(res_cnt - 1.0);
}
hits_stddev = sqrt(hits_stddev);
drops_stddev = sqrt(drops_stddev);
total_ops_stddev = sqrt(total_ops_stddev);
}
printf("Summary: hits %8.3lf \u00B1 %5.3lfM/s (%7.3lfM/prod), ",
hits_mean, hits_stddev, hits_mean / env.producer_cnt);
printf("drops %8.3lf \u00B1 %5.3lfM/s, ",
drops_mean, drops_stddev);
printf("total operations %8.3lf \u00B1 %5.3lfM/s\n",
total_ops_mean, total_ops_stddev);
}
void ops_report_progress(int iter, struct bench_res *res, long delta_ns)
{
double hits_per_sec, hits_per_prod;
hits_per_sec = res->hits / 1000000.0 / (delta_ns / 1000000000.0);
hits_per_prod = hits_per_sec / env.producer_cnt;
printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0);
printf("hits %8.3lfM/s (%7.3lfM/prod)\n", hits_per_sec, hits_per_prod);
}
void ops_report_final(struct bench_res res[], int res_cnt)
{
double hits_mean = 0.0, hits_stddev = 0.0;
int i;
for (i = 0; i < res_cnt; i++)
hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt);
if (res_cnt > 1) {
for (i = 0; i < res_cnt; i++)
hits_stddev += (hits_mean - res[i].hits / 1000000.0) *
(hits_mean - res[i].hits / 1000000.0) /
(res_cnt - 1.0);
hits_stddev = sqrt(hits_stddev);
}
printf("Summary: throughput %8.3lf \u00B1 %5.3lf M ops/s (%7.3lfM ops/prod), ",
hits_mean, hits_stddev, hits_mean / env.producer_cnt);
printf("latency %8.3lf ns/op\n", 1000.0 / hits_mean * env.producer_cnt);
}
void local_storage_report_progress(int iter, struct bench_res *res,
long delta_ns)
{
double important_hits_per_sec, hits_per_sec;
double delta_sec = delta_ns / 1000000000.0;
hits_per_sec = res->hits / 1000000.0 / delta_sec;
important_hits_per_sec = res->important_hits / 1000000.0 / delta_sec;
printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0);
printf("hits %8.3lfM/s ", hits_per_sec);
printf("important_hits %8.3lfM/s\n", important_hits_per_sec);
}
void local_storage_report_final(struct bench_res res[], int res_cnt)
{
double important_hits_mean = 0.0, important_hits_stddev = 0.0;
double hits_mean = 0.0, hits_stddev = 0.0;
int i;
for (i = 0; i < res_cnt; i++) {
hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt);
important_hits_mean += res[i].important_hits / 1000000.0 / (0.0 + res_cnt);
}
if (res_cnt > 1) {
for (i = 0; i < res_cnt; i++) {
hits_stddev += (hits_mean - res[i].hits / 1000000.0) *
(hits_mean - res[i].hits / 1000000.0) /
(res_cnt - 1.0);
important_hits_stddev +=
(important_hits_mean - res[i].important_hits / 1000000.0) *
(important_hits_mean - res[i].important_hits / 1000000.0) /
(res_cnt - 1.0);
}
hits_stddev = sqrt(hits_stddev);
important_hits_stddev = sqrt(important_hits_stddev);
}
printf("Summary: hits throughput %8.3lf \u00B1 %5.3lf M ops/s, ",
hits_mean, hits_stddev);
printf("hits latency %8.3lf ns/op, ", 1000.0 / hits_mean);
printf("important_hits throughput %8.3lf \u00B1 %5.3lf M ops/s\n",
important_hits_mean, important_hits_stddev);
}
const char *argp_program_version = "benchmark";
const char *argp_program_bug_address = "<[email protected]>";
const char argp_program_doc[] =
"benchmark Generic benchmarking framework.\n"
"\n"
"This tool runs benchmarks.\n"
"\n"
"USAGE: benchmark <bench-name>\n"
"\n"
"EXAMPLES:\n"
" # run 'count-local' benchmark with 1 producer and 1 consumer\n"
" benchmark count-local\n"
" # run 'count-local' with 16 producer and 8 consumer thread, pinned to CPUs\n"
" benchmark -p16 -c8 -a count-local\n";
enum {
ARG_PROD_AFFINITY_SET = 1000,
ARG_CONS_AFFINITY_SET = 1001,
};
static const struct argp_option opts[] = {
{ "list", 'l', NULL, 0, "List available benchmarks"},
{ "duration", 'd', "SEC", 0, "Duration of benchmark, seconds"},
{ "warmup", 'w', "SEC", 0, "Warm-up period, seconds"},
{ "producers", 'p', "NUM", 0, "Number of producer threads"},
{ "consumers", 'c', "NUM", 0, "Number of consumer threads"},
{ "verbose", 'v', NULL, 0, "Verbose debug output"},
{ "affinity", 'a', NULL, 0, "Set consumer/producer thread affinity"},
{ "quiet", 'q', NULL, 0, "Be more quiet"},
{ "prod-affinity", ARG_PROD_AFFINITY_SET, "CPUSET", 0,
"Set of CPUs for producer threads; implies --affinity"},
{ "cons-affinity", ARG_CONS_AFFINITY_SET, "CPUSET", 0,
"Set of CPUs for consumer threads; implies --affinity"},
{},
};
extern struct argp bench_ringbufs_argp;
extern struct argp bench_bloom_map_argp;
extern struct argp bench_bpf_loop_argp;
extern struct argp bench_local_storage_argp;
extern struct argp bench_local_storage_rcu_tasks_trace_argp;
extern struct argp bench_strncmp_argp;
extern struct argp bench_hashmap_lookup_argp;
extern struct argp bench_local_storage_create_argp;
extern struct argp bench_htab_mem_argp;
static const struct argp_child bench_parsers[] = {
{ &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 },
{ &bench_bloom_map_argp, 0, "Bloom filter map benchmark", 0 },
{ &bench_bpf_loop_argp, 0, "bpf_loop helper benchmark", 0 },
{ &bench_local_storage_argp, 0, "local_storage benchmark", 0 },
{ &bench_strncmp_argp, 0, "bpf_strncmp helper benchmark", 0 },
{ &bench_local_storage_rcu_tasks_trace_argp, 0,
"local_storage RCU Tasks Trace slowdown benchmark", 0 },
{ &bench_hashmap_lookup_argp, 0, "Hashmap lookup benchmark", 0 },
{ &bench_local_storage_create_argp, 0, "local-storage-create benchmark", 0 },
{ &bench_htab_mem_argp, 0, "hash map memory benchmark", 0 },
{},
};
/* Make pos_args global, so that we can run argp_parse twice, if necessary */
static int pos_args;
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
switch (key) {
case 'v':
env.verbose = true;
break;
case 'l':
env.list = true;
break;
case 'd':
env.duration_sec = strtol(arg, NULL, 10);
if (env.duration_sec <= 0) {
fprintf(stderr, "Invalid duration: %s\n", arg);
argp_usage(state);
}
break;
case 'w':
env.warmup_sec = strtol(arg, NULL, 10);
if (env.warmup_sec <= 0) {
fprintf(stderr, "Invalid warm-up duration: %s\n", arg);
argp_usage(state);
}
break;
case 'p':
env.producer_cnt = strtol(arg, NULL, 10);
if (env.producer_cnt <= 0) {
fprintf(stderr, "Invalid producer count: %s\n", arg);
argp_usage(state);
}
break;
case 'c':
env.consumer_cnt = strtol(arg, NULL, 10);
if (env.consumer_cnt <= 0) {
fprintf(stderr, "Invalid consumer count: %s\n", arg);
argp_usage(state);
}
break;
case 'a':
env.affinity = true;
break;
case 'q':
env.quiet = true;
break;
case ARG_PROD_AFFINITY_SET:
env.affinity = true;
if (parse_num_list(arg, &env.prod_cpus.cpus,
&env.prod_cpus.cpus_len)) {
fprintf(stderr, "Invalid format of CPU set for producers.");
argp_usage(state);
}
break;
case ARG_CONS_AFFINITY_SET:
env.affinity = true;
if (parse_num_list(arg, &env.cons_cpus.cpus,
&env.cons_cpus.cpus_len)) {
fprintf(stderr, "Invalid format of CPU set for consumers.");
argp_usage(state);
}
break;
case ARGP_KEY_ARG:
if (pos_args++) {
fprintf(stderr,
"Unrecognized positional argument: %s\n", arg);
argp_usage(state);
}
env.bench_name = strdup(arg);
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
static void parse_cmdline_args_init(int argc, char **argv)
{
static const struct argp argp = {
.options = opts,
.parser = parse_arg,
.doc = argp_program_doc,
.children = bench_parsers,
};
if (argp_parse(&argp, argc, argv, 0, NULL, NULL))
exit(1);
}
static void parse_cmdline_args_final(int argc, char **argv)
{
struct argp_child bench_parsers[2] = {};
const struct argp argp = {
.options = opts,
.parser = parse_arg,
.doc = argp_program_doc,
.children = bench_parsers,
};
/* Parse arguments the second time with the correct set of parsers */
if (bench->argp) {
bench_parsers[0].argp = bench->argp;
bench_parsers[0].header = bench->name;
pos_args = 0;
if (argp_parse(&argp, argc, argv, 0, NULL, NULL))
exit(1);
}
}
static void collect_measurements(long delta_ns);
static __u64 last_time_ns;
static void sigalarm_handler(int signo)
{
long new_time_ns = get_time_ns();
long delta_ns = new_time_ns - last_time_ns;
collect_measurements(delta_ns);
last_time_ns = new_time_ns;
}
/* set up periodic 1-second timer */
static void setup_timer()
{
static struct sigaction sigalarm_action = {
.sa_handler = sigalarm_handler,
};
struct itimerval timer_settings = {};
int err;
last_time_ns = get_time_ns();
err = sigaction(SIGALRM, &sigalarm_action, NULL);
if (err < 0) {
fprintf(stderr, "failed to install SIGALRM handler: %d\n", -errno);
exit(1);
}
timer_settings.it_interval.tv_sec = 1;
timer_settings.it_value.tv_sec = 1;
err = setitimer(ITIMER_REAL, &timer_settings, NULL);
if (err < 0) {
fprintf(stderr, "failed to arm interval timer: %d\n", -errno);
exit(1);
}
}
static void set_thread_affinity(pthread_t thread, int cpu)
{
cpu_set_t cpuset;
int err;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
err = pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset);
if (err) {
fprintf(stderr, "setting affinity to CPU #%d failed: %d\n",
cpu, -err);
exit(1);
}
}
static int next_cpu(struct cpu_set *cpu_set)
{
if (cpu_set->cpus) {
int i;
/* find next available CPU */
for (i = cpu_set->next_cpu; i < cpu_set->cpus_len; i++) {
if (cpu_set->cpus[i]) {
cpu_set->next_cpu = i + 1;
return i;
}
}
fprintf(stderr, "Not enough CPUs specified, need CPU #%d or higher.\n", i);
exit(1);
}
return cpu_set->next_cpu++ % env.nr_cpus;
}
static struct bench_state {
int res_cnt;
struct bench_res *results;
pthread_t *consumers;
pthread_t *producers;
} state;
const struct bench *bench = NULL;
extern const struct bench bench_count_global;
extern const struct bench bench_count_local;
extern const struct bench bench_rename_base;
extern const struct bench bench_rename_kprobe;
extern const struct bench bench_rename_kretprobe;
extern const struct bench bench_rename_rawtp;
extern const struct bench bench_rename_fentry;
extern const struct bench bench_rename_fexit;
extern const struct bench bench_trig_base;
extern const struct bench bench_trig_tp;
extern const struct bench bench_trig_rawtp;
extern const struct bench bench_trig_kprobe;
extern const struct bench bench_trig_fentry;
extern const struct bench bench_trig_fentry_sleep;
extern const struct bench bench_trig_fmodret;
extern const struct bench bench_trig_uprobe_base;
extern const struct bench bench_trig_uprobe_with_nop;
extern const struct bench bench_trig_uretprobe_with_nop;
extern const struct bench bench_trig_uprobe_without_nop;
extern const struct bench bench_trig_uretprobe_without_nop;
extern const struct bench bench_rb_libbpf;
extern const struct bench bench_rb_custom;
extern const struct bench bench_pb_libbpf;
extern const struct bench bench_pb_custom;
extern const struct bench bench_bloom_lookup;
extern const struct bench bench_bloom_update;
extern const struct bench bench_bloom_false_positive;
extern const struct bench bench_hashmap_without_bloom;
extern const struct bench bench_hashmap_with_bloom;
extern const struct bench bench_bpf_loop;
extern const struct bench bench_strncmp_no_helper;
extern const struct bench bench_strncmp_helper;
extern const struct bench bench_bpf_hashmap_full_update;
extern const struct bench bench_local_storage_cache_seq_get;
extern const struct bench bench_local_storage_cache_interleaved_get;
extern const struct bench bench_local_storage_cache_hashmap_control;
extern const struct bench bench_local_storage_tasks_trace;
extern const struct bench bench_bpf_hashmap_lookup;
extern const struct bench bench_local_storage_create;
extern const struct bench bench_htab_mem;
static const struct bench *benchs[] = {
&bench_count_global,
&bench_count_local,
&bench_rename_base,
&bench_rename_kprobe,
&bench_rename_kretprobe,
&bench_rename_rawtp,
&bench_rename_fentry,
&bench_rename_fexit,
&bench_trig_base,
&bench_trig_tp,
&bench_trig_rawtp,
&bench_trig_kprobe,
&bench_trig_fentry,
&bench_trig_fentry_sleep,
&bench_trig_fmodret,
&bench_trig_uprobe_base,
&bench_trig_uprobe_with_nop,
&bench_trig_uretprobe_with_nop,
&bench_trig_uprobe_without_nop,
&bench_trig_uretprobe_without_nop,
&bench_rb_libbpf,
&bench_rb_custom,
&bench_pb_libbpf,
&bench_pb_custom,
&bench_bloom_lookup,
&bench_bloom_update,
&bench_bloom_false_positive,
&bench_hashmap_without_bloom,
&bench_hashmap_with_bloom,
&bench_bpf_loop,
&bench_strncmp_no_helper,
&bench_strncmp_helper,
&bench_bpf_hashmap_full_update,
&bench_local_storage_cache_seq_get,
&bench_local_storage_cache_interleaved_get,
&bench_local_storage_cache_hashmap_control,
&bench_local_storage_tasks_trace,
&bench_bpf_hashmap_lookup,
&bench_local_storage_create,
&bench_htab_mem,
};
static void find_benchmark(void)
{
int i;
if (!env.bench_name) {
fprintf(stderr, "benchmark name is not specified\n");
exit(1);
}
for (i = 0; i < ARRAY_SIZE(benchs); i++) {
if (strcmp(benchs[i]->name, env.bench_name) == 0) {
bench = benchs[i];
break;
}
}
if (!bench) {
fprintf(stderr, "benchmark '%s' not found\n", env.bench_name);
exit(1);
}
}
static void setup_benchmark(void)
{
int i, err;
if (!env.quiet)
printf("Setting up benchmark '%s'...\n", bench->name);
state.producers = calloc(env.producer_cnt, sizeof(*state.producers));
state.consumers = calloc(env.consumer_cnt, sizeof(*state.consumers));
state.results = calloc(env.duration_sec + env.warmup_sec + 2,
sizeof(*state.results));
if (!state.producers || !state.consumers || !state.results)
exit(1);
if (bench->validate)
bench->validate();
if (bench->setup)
bench->setup();
for (i = 0; i < env.consumer_cnt; i++) {
err = pthread_create(&state.consumers[i], NULL,
bench->consumer_thread, (void *)(long)i);
if (err) {
fprintf(stderr, "failed to create consumer thread #%d: %d\n",
i, -err);
exit(1);
}
if (env.affinity)
set_thread_affinity(state.consumers[i],
next_cpu(&env.cons_cpus));
}
/* unless explicit producer CPU list is specified, continue after
* last consumer CPU
*/
if (!env.prod_cpus.cpus)
env.prod_cpus.next_cpu = env.cons_cpus.next_cpu;
for (i = 0; i < env.producer_cnt; i++) {
err = pthread_create(&state.producers[i], NULL,
bench->producer_thread, (void *)(long)i);
if (err) {
fprintf(stderr, "failed to create producer thread #%d: %d\n",
i, -err);
exit(1);
}
if (env.affinity)
set_thread_affinity(state.producers[i],
next_cpu(&env.prod_cpus));
}
if (!env.quiet)
printf("Benchmark '%s' started.\n", bench->name);
}
static pthread_mutex_t bench_done_mtx = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t bench_done = PTHREAD_COND_INITIALIZER;
static void collect_measurements(long delta_ns) {
int iter = state.res_cnt++;
struct bench_res *res = &state.results[iter];
bench->measure(res);
if (bench->report_progress)
bench->report_progress(iter, res, delta_ns);
if (iter == env.duration_sec + env.warmup_sec) {
pthread_mutex_lock(&bench_done_mtx);
pthread_cond_signal(&bench_done);
pthread_mutex_unlock(&bench_done_mtx);
}
}
int main(int argc, char **argv)
{
env.nr_cpus = get_nprocs();
parse_cmdline_args_init(argc, argv);
if (env.list) {
int i;
printf("Available benchmarks:\n");
for (i = 0; i < ARRAY_SIZE(benchs); i++) {
printf("- %s\n", benchs[i]->name);
}
return 0;
}
find_benchmark();
parse_cmdline_args_final(argc, argv);
setup_benchmark();
setup_timer();
pthread_mutex_lock(&bench_done_mtx);
pthread_cond_wait(&bench_done, &bench_done_mtx);
pthread_mutex_unlock(&bench_done_mtx);
if (bench->report_final)
/* skip first sample */
bench->report_final(state.results + env.warmup_sec,
state.res_cnt - env.warmup_sec);
return 0;
}
| linux-master | tools/testing/selftests/bpf/bench.c |
// SPDX-License-Identifier: GPL-2.0-only
#define _GNU_SOURCE
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sched.h>
#include <arpa/inet.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <linux/err.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/limits.h>
#include "bpf_util.h"
#include "network_helpers.h"
#include "test_progs.h"
#ifndef IPPROTO_MPTCP
#define IPPROTO_MPTCP 262
#endif
#define clean_errno() (errno == 0 ? "None" : strerror(errno))
#define log_err(MSG, ...) ({ \
int __save = errno; \
fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
__FILE__, __LINE__, clean_errno(), \
##__VA_ARGS__); \
errno = __save; \
})
struct ipv4_packet pkt_v4 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = IPPROTO_TCP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
.tcp.doff = 5,
};
struct ipv6_packet pkt_v6 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.iph.nexthdr = IPPROTO_TCP,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
.tcp.doff = 5,
};
int settimeo(int fd, int timeout_ms)
{
struct timeval timeout = { .tv_sec = 3 };
if (timeout_ms > 0) {
timeout.tv_sec = timeout_ms / 1000;
timeout.tv_usec = (timeout_ms % 1000) * 1000;
}
if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeout,
sizeof(timeout))) {
log_err("Failed to set SO_RCVTIMEO");
return -1;
}
if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeout,
sizeof(timeout))) {
log_err("Failed to set SO_SNDTIMEO");
return -1;
}
return 0;
}
#define save_errno_close(fd) ({ int __save = errno; close(fd); errno = __save; })
static int __start_server(int type, int protocol, const struct sockaddr *addr,
socklen_t addrlen, int timeout_ms, bool reuseport)
{
int on = 1;
int fd;
fd = socket(addr->sa_family, type, protocol);
if (fd < 0) {
log_err("Failed to create server socket");
return -1;
}
if (settimeo(fd, timeout_ms))
goto error_close;
if (reuseport &&
setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on))) {
log_err("Failed to set SO_REUSEPORT");
goto error_close;
}
if (bind(fd, addr, addrlen) < 0) {
log_err("Failed to bind socket");
goto error_close;
}
if (type == SOCK_STREAM) {
if (listen(fd, 1) < 0) {
log_err("Failed to listed on socket");
goto error_close;
}
}
return fd;
error_close:
save_errno_close(fd);
return -1;
}
static int start_server_proto(int family, int type, int protocol,
const char *addr_str, __u16 port, int timeout_ms)
{
struct sockaddr_storage addr;
socklen_t addrlen;
if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
return -1;
return __start_server(type, protocol, (struct sockaddr *)&addr,
addrlen, timeout_ms, false);
}
int start_server(int family, int type, const char *addr_str, __u16 port,
int timeout_ms)
{
return start_server_proto(family, type, 0, addr_str, port, timeout_ms);
}
int start_mptcp_server(int family, const char *addr_str, __u16 port,
int timeout_ms)
{
return start_server_proto(family, SOCK_STREAM, IPPROTO_MPTCP, addr_str,
port, timeout_ms);
}
int *start_reuseport_server(int family, int type, const char *addr_str,
__u16 port, int timeout_ms, unsigned int nr_listens)
{
struct sockaddr_storage addr;
unsigned int nr_fds = 0;
socklen_t addrlen;
int *fds;
if (!nr_listens)
return NULL;
if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
return NULL;
fds = malloc(sizeof(*fds) * nr_listens);
if (!fds)
return NULL;
fds[0] = __start_server(type, 0, (struct sockaddr *)&addr, addrlen,
timeout_ms, true);
if (fds[0] == -1)
goto close_fds;
nr_fds = 1;
if (getsockname(fds[0], (struct sockaddr *)&addr, &addrlen))
goto close_fds;
for (; nr_fds < nr_listens; nr_fds++) {
fds[nr_fds] = __start_server(type, 0, (struct sockaddr *)&addr,
addrlen, timeout_ms, true);
if (fds[nr_fds] == -1)
goto close_fds;
}
return fds;
close_fds:
free_fds(fds, nr_fds);
return NULL;
}
void free_fds(int *fds, unsigned int nr_close_fds)
{
if (fds) {
while (nr_close_fds)
close(fds[--nr_close_fds]);
free(fds);
}
}
int fastopen_connect(int server_fd, const char *data, unsigned int data_len,
int timeout_ms)
{
struct sockaddr_storage addr;
socklen_t addrlen = sizeof(addr);
struct sockaddr_in *addr_in;
int fd, ret;
if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) {
log_err("Failed to get server addr");
return -1;
}
addr_in = (struct sockaddr_in *)&addr;
fd = socket(addr_in->sin_family, SOCK_STREAM, 0);
if (fd < 0) {
log_err("Failed to create client socket");
return -1;
}
if (settimeo(fd, timeout_ms))
goto error_close;
ret = sendto(fd, data, data_len, MSG_FASTOPEN, (struct sockaddr *)&addr,
addrlen);
if (ret != data_len) {
log_err("sendto(data, %u) != %d\n", data_len, ret);
goto error_close;
}
return fd;
error_close:
save_errno_close(fd);
return -1;
}
static int connect_fd_to_addr(int fd,
const struct sockaddr_storage *addr,
socklen_t addrlen, const bool must_fail)
{
int ret;
errno = 0;
ret = connect(fd, (const struct sockaddr *)addr, addrlen);
if (must_fail) {
if (!ret) {
log_err("Unexpected success to connect to server");
return -1;
}
if (errno != EPERM) {
log_err("Unexpected error from connect to server");
return -1;
}
} else {
if (ret) {
log_err("Failed to connect to server");
return -1;
}
}
return 0;
}
static const struct network_helper_opts default_opts;
int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
{
struct sockaddr_storage addr;
struct sockaddr_in *addr_in;
socklen_t addrlen, optlen;
int fd, type, protocol;
if (!opts)
opts = &default_opts;
optlen = sizeof(type);
if (opts->type) {
type = opts->type;
} else {
if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) {
log_err("getsockopt(SOL_TYPE)");
return -1;
}
}
if (opts->proto) {
protocol = opts->proto;
} else {
if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) {
log_err("getsockopt(SOL_PROTOCOL)");
return -1;
}
}
addrlen = sizeof(addr);
if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) {
log_err("Failed to get server addr");
return -1;
}
addr_in = (struct sockaddr_in *)&addr;
fd = socket(addr_in->sin_family, type, protocol);
if (fd < 0) {
log_err("Failed to create client socket");
return -1;
}
if (settimeo(fd, opts->timeout_ms))
goto error_close;
if (opts->cc && opts->cc[0] &&
setsockopt(fd, SOL_TCP, TCP_CONGESTION, opts->cc,
strlen(opts->cc) + 1))
goto error_close;
if (!opts->noconnect)
if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail))
goto error_close;
return fd;
error_close:
save_errno_close(fd);
return -1;
}
int connect_to_fd(int server_fd, int timeout_ms)
{
struct network_helper_opts opts = {
.timeout_ms = timeout_ms,
};
return connect_to_fd_opts(server_fd, &opts);
}
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
{
struct sockaddr_storage addr;
socklen_t len = sizeof(addr);
if (settimeo(client_fd, timeout_ms))
return -1;
if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
log_err("Failed to get server addr");
return -1;
}
if (connect_fd_to_addr(client_fd, &addr, len, false))
return -1;
return 0;
}
int make_sockaddr(int family, const char *addr_str, __u16 port,
struct sockaddr_storage *addr, socklen_t *len)
{
if (family == AF_INET) {
struct sockaddr_in *sin = (void *)addr;
memset(addr, 0, sizeof(*sin));
sin->sin_family = AF_INET;
sin->sin_port = htons(port);
if (addr_str &&
inet_pton(AF_INET, addr_str, &sin->sin_addr) != 1) {
log_err("inet_pton(AF_INET, %s)", addr_str);
return -1;
}
if (len)
*len = sizeof(*sin);
return 0;
} else if (family == AF_INET6) {
struct sockaddr_in6 *sin6 = (void *)addr;
memset(addr, 0, sizeof(*sin6));
sin6->sin6_family = AF_INET6;
sin6->sin6_port = htons(port);
if (addr_str &&
inet_pton(AF_INET6, addr_str, &sin6->sin6_addr) != 1) {
log_err("inet_pton(AF_INET6, %s)", addr_str);
return -1;
}
if (len)
*len = sizeof(*sin6);
return 0;
}
return -1;
}
char *ping_command(int family)
{
if (family == AF_INET6) {
/* On some systems 'ping' doesn't support IPv6, so use ping6 if it is present. */
if (!system("which ping6 >/dev/null 2>&1"))
return "ping6";
else
return "ping -6";
}
return "ping";
}
struct nstoken {
int orig_netns_fd;
};
struct nstoken *open_netns(const char *name)
{
int nsfd;
char nspath[PATH_MAX];
int err;
struct nstoken *token;
token = calloc(1, sizeof(struct nstoken));
if (!ASSERT_OK_PTR(token, "malloc token"))
return NULL;
token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY);
if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net"))
goto fail;
snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name);
nsfd = open(nspath, O_RDONLY | O_CLOEXEC);
if (!ASSERT_GE(nsfd, 0, "open netns fd"))
goto fail;
err = setns(nsfd, CLONE_NEWNET);
close(nsfd);
if (!ASSERT_OK(err, "setns"))
goto fail;
return token;
fail:
free(token);
return NULL;
}
void close_netns(struct nstoken *token)
{
if (!token)
return;
ASSERT_OK(setns(token->orig_netns_fd, CLONE_NEWNET), "setns");
close(token->orig_netns_fd);
free(token);
}
int get_socket_local_port(int sock_fd)
{
struct sockaddr_storage addr;
socklen_t addrlen = sizeof(addr);
int err;
err = getsockname(sock_fd, (struct sockaddr *)&addr, &addrlen);
if (err < 0)
return err;
if (addr.ss_family == AF_INET) {
struct sockaddr_in *sin = (struct sockaddr_in *)&addr;
return sin->sin_port;
} else if (addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&addr;
return sin->sin6_port;
}
return -1;
}
| linux-master | tools/testing/selftests/bpf/network_helpers.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "sdt.h"
void urand_read_without_sema(int iter_num, int iter_cnt, int read_sz)
{
/* semaphore-less USDT */
STAP_PROBE3(urand, read_without_sema, iter_num, iter_cnt, read_sz);
}
| linux-master | tools/testing/selftests/bpf/urandom_read_aux.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <stdio.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <linux/filter.h>
#include <bpf/bpf.h>
#include "cgroup_helpers.h"
#include <bpf/bpf_endian.h>
#include "bpf_util.h"
#define CG_PATH "/foo"
#define MAX_INSNS 512
char bpf_log_buf[BPF_LOG_BUF_SIZE];
static bool verbose = false;
struct sock_test {
const char *descr;
/* BPF prog properties */
struct bpf_insn insns[MAX_INSNS];
enum bpf_attach_type expected_attach_type;
enum bpf_attach_type attach_type;
/* Socket properties */
int domain;
int type;
/* Endpoint to bind() to */
const char *ip;
unsigned short port;
unsigned short port_retry;
/* Expected test result */
enum {
LOAD_REJECT,
ATTACH_REJECT,
BIND_REJECT,
SUCCESS,
RETRY_SUCCESS,
RETRY_REJECT
} result;
};
static struct sock_test tests[] = {
{
.descr = "bind4 load with invalid access: src_ip6",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_ip6[0])),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.attach_type = BPF_CGROUP_INET4_POST_BIND,
.result = LOAD_REJECT,
},
{
.descr = "bind4 load with invalid access: mark",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, mark)),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.attach_type = BPF_CGROUP_INET4_POST_BIND,
.result = LOAD_REJECT,
},
{
.descr = "bind6 load with invalid access: src_ip4",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_ip4)),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
.attach_type = BPF_CGROUP_INET6_POST_BIND,
.result = LOAD_REJECT,
},
{
.descr = "sock_create load with invalid access: src_port",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_port)),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
.attach_type = BPF_CGROUP_INET_SOCK_CREATE,
.result = LOAD_REJECT,
},
{
.descr = "sock_create load w/o expected_attach_type (compat mode)",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = 0,
.attach_type = BPF_CGROUP_INET_SOCK_CREATE,
.domain = AF_INET,
.type = SOCK_STREAM,
.ip = "127.0.0.1",
.port = 8097,
.result = SUCCESS,
},
{
.descr = "sock_create load w/ expected_attach_type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
.attach_type = BPF_CGROUP_INET_SOCK_CREATE,
.domain = AF_INET,
.type = SOCK_STREAM,
.ip = "127.0.0.1",
.port = 8097,
.result = SUCCESS,
},
{
.descr = "attach type mismatch bind4 vs bind6",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.attach_type = BPF_CGROUP_INET6_POST_BIND,
.result = ATTACH_REJECT,
},
{
.descr = "attach type mismatch bind6 vs bind4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
.attach_type = BPF_CGROUP_INET4_POST_BIND,
.result = ATTACH_REJECT,
},
{
.descr = "attach type mismatch default vs bind4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = 0,
.attach_type = BPF_CGROUP_INET4_POST_BIND,
.result = ATTACH_REJECT,
},
{
.descr = "attach type mismatch bind6 vs sock_create",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
.attach_type = BPF_CGROUP_INET_SOCK_CREATE,
.result = ATTACH_REJECT,
},
{
.descr = "bind4 reject all",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.attach_type = BPF_CGROUP_INET4_POST_BIND,
.domain = AF_INET,
.type = SOCK_STREAM,
.ip = "0.0.0.0",
.result = BIND_REJECT,
},
{
.descr = "bind6 reject all",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
.attach_type = BPF_CGROUP_INET6_POST_BIND,
.domain = AF_INET6,
.type = SOCK_STREAM,
.ip = "::",
.result = BIND_REJECT,
},
{
.descr = "bind6 deny specific IP & port",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* if (ip == expected && port == expected) */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_ip6[3])),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
__bpf_constant_ntohl(0x00000001), 4),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_port)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
/* return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(1),
/* else return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
.attach_type = BPF_CGROUP_INET6_POST_BIND,
.domain = AF_INET6,
.type = SOCK_STREAM,
.ip = "::1",
.port = 8193,
.result = BIND_REJECT,
},
{
.descr = "bind4 allow specific IP & port",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* if (ip == expected && port == expected) */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_ip4)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
__bpf_constant_ntohl(0x7F000001), 4),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_port)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
/* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_A(1),
/* else return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.attach_type = BPF_CGROUP_INET4_POST_BIND,
.domain = AF_INET,
.type = SOCK_STREAM,
.ip = "127.0.0.1",
.port = 4098,
.result = SUCCESS,
},
{
.descr = "bind4 deny specific IP & port of TCP, and retry",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* if (ip == expected && port == expected) */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_ip4)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
__bpf_constant_ntohl(0x7F000001), 4),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_port)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
/* return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(1),
/* else return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.attach_type = BPF_CGROUP_INET4_POST_BIND,
.domain = AF_INET,
.type = SOCK_STREAM,
.ip = "127.0.0.1",
.port = 4098,
.port_retry = 5000,
.result = RETRY_SUCCESS,
},
{
.descr = "bind4 deny specific IP & port of UDP, and retry",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* if (ip == expected && port == expected) */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_ip4)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
__bpf_constant_ntohl(0x7F000001), 4),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_port)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
/* return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(1),
/* else return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.attach_type = BPF_CGROUP_INET4_POST_BIND,
.domain = AF_INET,
.type = SOCK_DGRAM,
.ip = "127.0.0.1",
.port = 4098,
.port_retry = 5000,
.result = RETRY_SUCCESS,
},
{
.descr = "bind6 deny specific IP & port, and retry",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* if (ip == expected && port == expected) */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_ip6[3])),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
__bpf_constant_ntohl(0x00000001), 4),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_port)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
/* return DENY; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(1),
/* else return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
.attach_type = BPF_CGROUP_INET6_POST_BIND,
.domain = AF_INET6,
.type = SOCK_STREAM,
.ip = "::1",
.port = 8193,
.port_retry = 9000,
.result = RETRY_SUCCESS,
},
{
.descr = "bind4 allow all",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.attach_type = BPF_CGROUP_INET4_POST_BIND,
.domain = AF_INET,
.type = SOCK_STREAM,
.ip = "0.0.0.0",
.result = SUCCESS,
},
{
.descr = "bind6 allow all",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
.attach_type = BPF_CGROUP_INET6_POST_BIND,
.domain = AF_INET6,
.type = SOCK_STREAM,
.ip = "::",
.result = SUCCESS,
},
};
static size_t probe_prog_length(const struct bpf_insn *fp)
{
size_t len;
for (len = MAX_INSNS - 1; len > 0; --len)
if (fp[len].code != 0 || fp[len].imm != 0)
break;
return len + 1;
}
static int load_sock_prog(const struct bpf_insn *prog,
enum bpf_attach_type attach_type)
{
LIBBPF_OPTS(bpf_prog_load_opts, opts);
int ret, insn_cnt;
insn_cnt = probe_prog_length(prog);
opts.expected_attach_type = attach_type;
opts.log_buf = bpf_log_buf;
opts.log_size = BPF_LOG_BUF_SIZE;
opts.log_level = 2;
ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", prog, insn_cnt, &opts);
if (verbose && ret < 0)
fprintf(stderr, "%s\n", bpf_log_buf);
return ret;
}
static int attach_sock_prog(int cgfd, int progfd,
enum bpf_attach_type attach_type)
{
return bpf_prog_attach(progfd, cgfd, attach_type, BPF_F_ALLOW_OVERRIDE);
}
static int bind_sock(int domain, int type, const char *ip,
unsigned short port, unsigned short port_retry)
{
struct sockaddr_storage addr;
struct sockaddr_in6 *addr6;
struct sockaddr_in *addr4;
int sockfd = -1;
socklen_t len;
int res = SUCCESS;
sockfd = socket(domain, type, 0);
if (sockfd < 0)
goto err;
memset(&addr, 0, sizeof(addr));
if (domain == AF_INET) {
len = sizeof(struct sockaddr_in);
addr4 = (struct sockaddr_in *)&addr;
addr4->sin_family = domain;
addr4->sin_port = htons(port);
if (inet_pton(domain, ip, (void *)&addr4->sin_addr) != 1)
goto err;
} else if (domain == AF_INET6) {
len = sizeof(struct sockaddr_in6);
addr6 = (struct sockaddr_in6 *)&addr;
addr6->sin6_family = domain;
addr6->sin6_port = htons(port);
if (inet_pton(domain, ip, (void *)&addr6->sin6_addr) != 1)
goto err;
} else {
goto err;
}
if (bind(sockfd, (const struct sockaddr *)&addr, len) == -1) {
/* sys_bind() may fail for different reasons, errno has to be
* checked to confirm that BPF program rejected it.
*/
if (errno != EPERM)
goto err;
if (port_retry)
goto retry;
res = BIND_REJECT;
goto out;
}
goto out;
retry:
if (domain == AF_INET)
addr4->sin_port = htons(port_retry);
else
addr6->sin6_port = htons(port_retry);
if (bind(sockfd, (const struct sockaddr *)&addr, len) == -1) {
if (errno != EPERM)
goto err;
res = RETRY_REJECT;
} else {
res = RETRY_SUCCESS;
}
goto out;
err:
res = -1;
out:
close(sockfd);
return res;
}
static int run_test_case(int cgfd, const struct sock_test *test)
{
int progfd = -1;
int err = 0;
int res;
printf("Test case: %s .. ", test->descr);
progfd = load_sock_prog(test->insns, test->expected_attach_type);
if (progfd < 0) {
if (test->result == LOAD_REJECT)
goto out;
else
goto err;
}
if (attach_sock_prog(cgfd, progfd, test->attach_type) < 0) {
if (test->result == ATTACH_REJECT)
goto out;
else
goto err;
}
res = bind_sock(test->domain, test->type, test->ip, test->port,
test->port_retry);
if (res > 0 && test->result == res)
goto out;
err:
err = -1;
out:
/* Detaching w/o checking return code: best effort attempt. */
if (progfd != -1)
bpf_prog_detach(cgfd, test->attach_type);
close(progfd);
printf("[%s]\n", err ? "FAIL" : "PASS");
return err;
}
static int run_tests(int cgfd)
{
int passes = 0;
int fails = 0;
int i;
for (i = 0; i < ARRAY_SIZE(tests); ++i) {
if (run_test_case(cgfd, &tests[i]))
++fails;
else
++passes;
}
printf("Summary: %d PASSED, %d FAILED\n", passes, fails);
return fails ? -1 : 0;
}
int main(int argc, char **argv)
{
int cgfd = -1;
int err = 0;
cgfd = cgroup_setup_and_join(CG_PATH);
if (cgfd < 0)
goto err;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
if (run_tests(cgfd))
goto err;
goto out;
err:
err = -1;
out:
close(cgfd);
cleanup_cgroup_environment();
return err;
}
| linux-master | tools/testing/selftests/bpf/test_sock.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2020 Intel Corporation. */
/*
* Some functions in this program are taken from
* Linux kernel samples/bpf/xdpsock* and modified
* for use.
*
* See test_xsk.sh for detailed information on test topology
* and prerequisite network setup.
*
* This test program contains two threads, each thread is single socket with
* a unique UMEM. It validates in-order packet delivery and packet content
* by sending packets to each other.
*
* Tests Information:
* ------------------
* These selftests test AF_XDP SKB and Native/DRV modes using veth
* Virtual Ethernet interfaces.
*
* For each mode, the following tests are run:
* a. nopoll - soft-irq processing in run-to-completion mode
* b. poll - using poll() syscall
* c. Socket Teardown
* Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy
* both sockets, then repeat multiple times. Only nopoll mode is used
* d. Bi-directional sockets
* Configure sockets as bi-directional tx/rx sockets, sets up fill and
* completion rings on each socket, tx/rx in both directions. Only nopoll
* mode is used
* e. Statistics
* Trigger some error conditions and ensure that the appropriate statistics
* are incremented. Within this test, the following statistics are tested:
* i. rx dropped
* Increase the UMEM frame headroom to a value which results in
* insufficient space in the rx buffer for both the packet and the headroom.
* ii. tx invalid
* Set the 'len' field of tx descriptors to an invalid value (umem frame
* size + 1).
* iii. rx ring full
* Reduce the size of the RX ring to a fraction of the fill ring size.
* iv. fill queue empty
* Do not populate the fill queue and then try to receive pkts.
* f. bpf_link resource persistence
* Configure sockets at indexes 0 and 1, run a traffic on queue ids 0,
* then remove xsk sockets from queue 0 on both veth interfaces and
* finally run a traffic on queues ids 1
* g. unaligned mode
* h. tests for invalid and corner case Tx descriptors so that the correct ones
* are discarded and let through, respectively.
* i. 2K frame size tests
* j. If multi-buffer is supported, send 9k packets divided into 3 frames
* k. If multi-buffer and huge pages are supported, send 9k packets in a single frame
* using unaligned mode
* l. If multi-buffer is supported, try various nasty combinations of descriptors to
* check if they pass the validation or not
*
* Flow:
* -----
* - Single process spawns two threads: Tx and Rx
* - Each of these two threads attach to a veth interface
* - Each thread creates one AF_XDP socket connected to a unique umem for each
* veth interface
* - Tx thread Transmits a number of packets from veth<xxxx> to veth<yyyy>
* - Rx thread verifies if all packets were received and delivered in-order,
* and have the right content
*
* Enable/disable packet dump mode:
* --------------------------
* To enable L2 - L4 headers and payload dump of each packet on STDOUT, add
* parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D")
*/
#define _GNU_SOURCE
#include <assert.h>
#include <fcntl.h>
#include <errno.h>
#include <getopt.h>
#include <linux/if_link.h>
#include <linux/if_ether.h>
#include <linux/mman.h>
#include <linux/netdev.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <locale.h>
#include <poll.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include "xsk_xdp_progs.skel.h"
#include "xsk.h"
#include "xskxceiver.h"
#include <bpf/bpf.h>
#include <linux/filter.h>
#include "../kselftest.h"
#include "xsk_xdp_metadata.h"
static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
static void __exit_with_error(int error, const char *file, const char *func, int line)
{
ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error,
strerror(error));
ksft_exit_xfail();
}
#define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
#define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : ""
static char *mode_string(struct test_spec *test)
{
switch (test->mode) {
case TEST_MODE_SKB:
return "SKB";
case TEST_MODE_DRV:
return "DRV";
case TEST_MODE_ZC:
return "ZC";
default:
return "BOGUS";
}
}
static void report_failure(struct test_spec *test)
{
if (test->fail)
return;
ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test),
test->name);
test->fail = true;
}
/* The payload is a word consisting of a packet sequence number in the upper
* 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's
* 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0.
*/
static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size)
{
u32 *ptr = (u32 *)dest, i;
start /= sizeof(*ptr);
size /= sizeof(*ptr);
for (i = 0; i < size; i++)
ptr[i] = htonl(pkt_nb << 16 | (i + start));
}
static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr)
{
memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN);
memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN);
eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
}
static bool is_umem_valid(struct ifobject *ifobj)
{
return !!ifobj->umem->umem;
}
static u32 mode_to_xdp_flags(enum test_mode mode)
{
return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
}
static u64 umem_size(struct xsk_umem_info *umem)
{
return umem->num_frames * umem->frame_size;
}
static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer,
u64 size)
{
struct xsk_umem_config cfg = {
.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
.comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
.frame_size = umem->frame_size,
.frame_headroom = umem->frame_headroom,
.flags = XSK_UMEM__DEFAULT_FLAGS
};
int ret;
if (umem->unaligned_mode)
cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
ret = xsk_umem__create(&umem->umem, buffer, size,
&umem->fq, &umem->cq, &cfg);
if (ret)
return ret;
umem->buffer = buffer;
if (ifobj->shared_umem && ifobj->rx_on) {
umem->base_addr = umem_size(umem);
umem->next_buffer = umem_size(umem);
}
return 0;
}
static u64 umem_alloc_buffer(struct xsk_umem_info *umem)
{
u64 addr;
addr = umem->next_buffer;
umem->next_buffer += umem->frame_size;
if (umem->next_buffer >= umem->base_addr + umem_size(umem))
umem->next_buffer = umem->base_addr;
return addr;
}
static void umem_reset_alloc(struct xsk_umem_info *umem)
{
umem->next_buffer = 0;
}
static void enable_busy_poll(struct xsk_socket_info *xsk)
{
int sock_opt;
sock_opt = 1;
if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
(void *)&sock_opt, sizeof(sock_opt)) < 0)
exit_with_error(errno);
sock_opt = 20;
if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
(void *)&sock_opt, sizeof(sock_opt)) < 0)
exit_with_error(errno);
sock_opt = BATCH_SIZE;
if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
(void *)&sock_opt, sizeof(sock_opt)) < 0)
exit_with_error(errno);
}
static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
struct ifobject *ifobject, bool shared)
{
struct xsk_socket_config cfg = {};
struct xsk_ring_cons *rxr;
struct xsk_ring_prod *txr;
xsk->umem = umem;
cfg.rx_size = xsk->rxqsize;
cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
cfg.bind_flags = ifobject->bind_flags;
if (shared)
cfg.bind_flags |= XDP_SHARED_UMEM;
if (ifobject->pkt_stream && ifobject->mtu > MAX_ETH_PKT_SIZE)
cfg.bind_flags |= XDP_USE_SG;
txr = ifobject->tx_on ? &xsk->tx : NULL;
rxr = ifobject->rx_on ? &xsk->rx : NULL;
return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
}
static bool ifobj_zc_avail(struct ifobject *ifobject)
{
size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
struct xsk_socket_info *xsk;
struct xsk_umem_info *umem;
bool zc_avail = false;
void *bufs;
int ret;
bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
if (bufs == MAP_FAILED)
exit_with_error(errno);
umem = calloc(1, sizeof(struct xsk_umem_info));
if (!umem) {
munmap(bufs, umem_sz);
exit_with_error(ENOMEM);
}
umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz);
if (ret)
exit_with_error(-ret);
xsk = calloc(1, sizeof(struct xsk_socket_info));
if (!xsk)
goto out;
ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY;
ifobject->rx_on = true;
xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
ret = __xsk_configure_socket(xsk, umem, ifobject, false);
if (!ret)
zc_avail = true;
xsk_socket__delete(xsk->xsk);
free(xsk);
out:
munmap(umem->buffer, umem_sz);
xsk_umem__delete(umem->umem);
free(umem);
return zc_avail;
}
static struct option long_options[] = {
{"interface", required_argument, 0, 'i'},
{"busy-poll", no_argument, 0, 'b'},
{"verbose", no_argument, 0, 'v'},
{0, 0, 0, 0}
};
static void usage(const char *prog)
{
const char *str =
" Usage: %s [OPTIONS]\n"
" Options:\n"
" -i, --interface Use interface\n"
" -v, --verbose Verbose output\n"
" -b, --busy-poll Enable busy poll\n";
ksft_print_msg(str, prog);
}
static bool validate_interface(struct ifobject *ifobj)
{
if (!strcmp(ifobj->ifname, ""))
return false;
return true;
}
static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc,
char **argv)
{
struct ifobject *ifobj;
u32 interface_nb = 0;
int option_index, c;
opterr = 0;
for (;;) {
c = getopt_long(argc, argv, "i:vb", long_options, &option_index);
if (c == -1)
break;
switch (c) {
case 'i':
if (interface_nb == 0)
ifobj = ifobj_tx;
else if (interface_nb == 1)
ifobj = ifobj_rx;
else
break;
memcpy(ifobj->ifname, optarg,
min_t(size_t, MAX_INTERFACE_NAME_CHARS, strlen(optarg)));
ifobj->ifindex = if_nametoindex(ifobj->ifname);
if (!ifobj->ifindex)
exit_with_error(errno);
interface_nb++;
break;
case 'v':
opt_verbose = true;
break;
case 'b':
ifobj_tx->busy_poll = true;
ifobj_rx->busy_poll = true;
break;
default:
usage(basename(argv[0]));
ksft_exit_xfail();
}
}
}
static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
struct ifobject *ifobj_rx)
{
u32 i, j;
for (i = 0; i < MAX_INTERFACES; i++) {
struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
ifobj->xsk = &ifobj->xsk_arr[0];
ifobj->use_poll = false;
ifobj->use_fill_ring = true;
ifobj->release_rx = true;
ifobj->validation_func = NULL;
ifobj->use_metadata = false;
if (i == 0) {
ifobj->rx_on = false;
ifobj->tx_on = true;
ifobj->pkt_stream = test->tx_pkt_stream_default;
} else {
ifobj->rx_on = true;
ifobj->tx_on = false;
ifobj->pkt_stream = test->rx_pkt_stream_default;
}
memset(ifobj->umem, 0, sizeof(*ifobj->umem));
ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
for (j = 0; j < MAX_SOCKETS; j++) {
memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
}
}
test->ifobj_tx = ifobj_tx;
test->ifobj_rx = ifobj_rx;
test->current_step = 0;
test->total_steps = 1;
test->nb_sockets = 1;
test->fail = false;
test->mtu = MAX_ETH_PKT_SIZE;
test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog;
test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk;
test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog;
test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk;
}
static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
struct ifobject *ifobj_rx, enum test_mode mode)
{
struct pkt_stream *tx_pkt_stream;
struct pkt_stream *rx_pkt_stream;
u32 i;
tx_pkt_stream = test->tx_pkt_stream_default;
rx_pkt_stream = test->rx_pkt_stream_default;
memset(test, 0, sizeof(*test));
test->tx_pkt_stream_default = tx_pkt_stream;
test->rx_pkt_stream_default = rx_pkt_stream;
for (i = 0; i < MAX_INTERFACES; i++) {
struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
if (mode == TEST_MODE_ZC)
ifobj->bind_flags |= XDP_ZEROCOPY;
else
ifobj->bind_flags |= XDP_COPY;
}
test->mode = mode;
__test_spec_init(test, ifobj_tx, ifobj_rx);
}
static void test_spec_reset(struct test_spec *test)
{
__test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
}
static void test_spec_set_name(struct test_spec *test, const char *name)
{
strncpy(test->name, name, MAX_TEST_NAME_SIZE);
}
static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
struct bpf_map *xskmap_tx)
{
test->xdp_prog_rx = xdp_prog_rx;
test->xdp_prog_tx = xdp_prog_tx;
test->xskmap_rx = xskmap_rx;
test->xskmap_tx = xskmap_tx;
}
static int test_spec_set_mtu(struct test_spec *test, int mtu)
{
int err;
if (test->ifobj_rx->mtu != mtu) {
err = xsk_set_mtu(test->ifobj_rx->ifindex, mtu);
if (err)
return err;
test->ifobj_rx->mtu = mtu;
}
if (test->ifobj_tx->mtu != mtu) {
err = xsk_set_mtu(test->ifobj_tx->ifindex, mtu);
if (err)
return err;
test->ifobj_tx->mtu = mtu;
}
return 0;
}
static void pkt_stream_reset(struct pkt_stream *pkt_stream)
{
if (pkt_stream)
pkt_stream->current_pkt_nb = 0;
}
static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream)
{
if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts)
return NULL;
return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
}
static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
{
while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
(*pkts_sent)++;
if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid)
return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
pkt_stream->current_pkt_nb++;
}
return NULL;
}
static void pkt_stream_delete(struct pkt_stream *pkt_stream)
{
free(pkt_stream->pkts);
free(pkt_stream);
}
static void pkt_stream_restore_default(struct test_spec *test)
{
struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream;
struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream;
if (tx_pkt_stream != test->tx_pkt_stream_default) {
pkt_stream_delete(test->ifobj_tx->pkt_stream);
test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default;
}
if (rx_pkt_stream != test->rx_pkt_stream_default) {
pkt_stream_delete(test->ifobj_rx->pkt_stream);
test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default;
}
}
static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
{
struct pkt_stream *pkt_stream;
pkt_stream = calloc(1, sizeof(*pkt_stream));
if (!pkt_stream)
return NULL;
pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
if (!pkt_stream->pkts) {
free(pkt_stream);
return NULL;
}
pkt_stream->nb_pkts = nb_pkts;
return pkt_stream;
}
static bool pkt_continues(u32 options)
{
return options & XDP_PKT_CONTD;
}
static u32 ceil_u32(u32 a, u32 b)
{
return (a + b - 1) / b;
}
static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pkt *pkt)
{
u32 nb_frags = 1, next_frag;
if (!pkt)
return 1;
if (!pkt_stream->verbatim) {
if (!pkt->valid || !pkt->len)
return 1;
return ceil_u32(pkt->len, frame_size);
}
/* Search for the end of the packet in verbatim mode */
if (!pkt_continues(pkt->options))
return nb_frags;
next_frag = pkt_stream->current_pkt_nb;
pkt++;
while (next_frag++ < pkt_stream->nb_pkts) {
nb_frags++;
if (!pkt_continues(pkt->options) || !pkt->valid)
break;
pkt++;
}
return nb_frags;
}
static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len)
{
pkt->offset = offset;
pkt->len = len;
if (len > MAX_ETH_JUMBO_SIZE)
pkt->valid = false;
else
pkt->valid = true;
}
static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
{
return ceil_u32(len, umem->frame_size) * umem->frame_size;
}
static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len)
{
struct pkt_stream *pkt_stream;
u32 i;
pkt_stream = __pkt_stream_alloc(nb_pkts);
if (!pkt_stream)
exit_with_error(ENOMEM);
pkt_stream->nb_pkts = nb_pkts;
pkt_stream->max_pkt_len = pkt_len;
for (i = 0; i < nb_pkts; i++) {
struct pkt *pkt = &pkt_stream->pkts[i];
pkt_set(umem, pkt, 0, pkt_len);
pkt->pkt_nb = i;
}
return pkt_stream;
}
static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem,
struct pkt_stream *pkt_stream)
{
return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
}
static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
{
struct pkt_stream *pkt_stream;
pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len);
test->ifobj_tx->pkt_stream = pkt_stream;
pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len);
test->ifobj_rx->pkt_stream = pkt_stream;
}
static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
int offset)
{
struct xsk_umem_info *umem = ifobj->umem;
struct pkt_stream *pkt_stream;
u32 i;
pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream);
for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2)
pkt_set(umem, &pkt_stream->pkts[i], offset, pkt_len);
ifobj->pkt_stream = pkt_stream;
}
static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
{
__pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
__pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
}
static void pkt_stream_receive_half(struct test_spec *test)
{
struct xsk_umem_info *umem = test->ifobj_rx->umem;
struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream;
u32 i;
test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts,
pkt_stream->pkts[0].len);
pkt_stream = test->ifobj_rx->pkt_stream;
for (i = 1; i < pkt_stream->nb_pkts; i += 2)
pkt_stream->pkts[i].valid = false;
}
static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
{
if (!pkt->valid)
return pkt->offset;
return pkt->offset + umem_alloc_buffer(umem);
}
static void pkt_stream_cancel(struct pkt_stream *pkt_stream)
{
pkt_stream->current_pkt_nb--;
}
static void pkt_generate(struct ifobject *ifobject, u64 addr, u32 len, u32 pkt_nb,
u32 bytes_written)
{
void *data = xsk_umem__get_data(ifobject->umem->buffer, addr);
if (len < MIN_PKT_SIZE)
return;
if (!bytes_written) {
gen_eth_hdr(ifobject, data);
len -= PKT_HDR_SIZE;
data += PKT_HDR_SIZE;
} else {
bytes_written -= PKT_HDR_SIZE;
}
write_payload(data, pkt_nb, bytes_written, len);
}
static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, struct pkt *frames,
u32 nb_frames, bool verbatim)
{
u32 i, len = 0, pkt_nb = 0, payload = 0;
struct pkt_stream *pkt_stream;
pkt_stream = __pkt_stream_alloc(nb_frames);
if (!pkt_stream)
exit_with_error(ENOMEM);
for (i = 0; i < nb_frames; i++) {
struct pkt *pkt = &pkt_stream->pkts[pkt_nb];
struct pkt *frame = &frames[i];
pkt->offset = frame->offset;
if (verbatim) {
*pkt = *frame;
pkt->pkt_nb = payload;
if (!frame->valid || !pkt_continues(frame->options))
payload++;
} else {
if (frame->valid)
len += frame->len;
if (frame->valid && pkt_continues(frame->options))
continue;
pkt->pkt_nb = pkt_nb;
pkt->len = len;
pkt->valid = frame->valid;
pkt->options = 0;
len = 0;
}
if (pkt->valid && pkt->len > pkt_stream->max_pkt_len)
pkt_stream->max_pkt_len = pkt->len;
pkt_nb++;
}
pkt_stream->nb_pkts = pkt_nb;
pkt_stream->verbatim = verbatim;
return pkt_stream;
}
static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
{
struct pkt_stream *pkt_stream;
pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true);
test->ifobj_tx->pkt_stream = pkt_stream;
pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false);
test->ifobj_rx->pkt_stream = pkt_stream;
}
static void pkt_print_data(u32 *data, u32 cnt)
{
u32 i;
for (i = 0; i < cnt; i++) {
u32 seqnum, pkt_nb;
seqnum = ntohl(*data) & 0xffff;
pkt_nb = ntohl(*data) >> 16;
fprintf(stdout, "%u:%u ", pkt_nb, seqnum);
data++;
}
}
static void pkt_dump(void *pkt, u32 len, bool eth_header)
{
struct ethhdr *ethhdr = pkt;
u32 i, *data;
if (eth_header) {
/*extract L2 frame */
fprintf(stdout, "DEBUG>> L2: dst mac: ");
for (i = 0; i < ETH_ALEN; i++)
fprintf(stdout, "%02X", ethhdr->h_dest[i]);
fprintf(stdout, "\nDEBUG>> L2: src mac: ");
for (i = 0; i < ETH_ALEN; i++)
fprintf(stdout, "%02X", ethhdr->h_source[i]);
data = pkt + PKT_HDR_SIZE;
} else {
data = pkt;
}
/*extract L5 frame */
fprintf(stdout, "\nDEBUG>> L5: seqnum: ");
pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
fprintf(stdout, "....");
if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
fprintf(stdout, "\n.... ");
pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
PKT_DUMP_NB_TO_PRINT);
}
fprintf(stdout, "\n---------------------------------------\n");
}
static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
{
u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
u32 offset = addr % umem->frame_size, expected_offset;
int pkt_offset = pkt->valid ? pkt->offset : 0;
if (!umem->unaligned_mode)
pkt_offset = 0;
expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
if (offset == expected_offset)
return true;
ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
return false;
}
static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
{
void *data = xsk_umem__get_data(buffer, addr);
struct xdp_info *meta = data - sizeof(struct xdp_info);
if (meta->count != pkt->pkt_nb) {
ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n",
__func__, pkt->pkt_nb, meta->count);
return false;
}
return true;
}
static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb,
u32 bytes_processed)
{
u32 seqnum, pkt_nb, *pkt_data, words_to_end, expected_seqnum;
void *data = xsk_umem__get_data(umem->buffer, addr);
addr -= umem->base_addr;
if (addr >= umem->num_frames * umem->frame_size ||
addr + len > umem->num_frames * umem->frame_size) {
ksft_print_msg("Frag invalid addr: %llx len: %u\n", addr, len);
return false;
}
if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) {
ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n", addr, len);
return false;
}
pkt_data = data;
if (!bytes_processed) {
pkt_data += PKT_HDR_SIZE / sizeof(*pkt_data);
len -= PKT_HDR_SIZE;
} else {
bytes_processed -= PKT_HDR_SIZE;
}
expected_seqnum = bytes_processed / sizeof(*pkt_data);
seqnum = ntohl(*pkt_data) & 0xffff;
pkt_nb = ntohl(*pkt_data) >> 16;
if (expected_pkt_nb != pkt_nb) {
ksft_print_msg("[%s] expected pkt_nb [%u], got pkt_nb [%u]\n",
__func__, expected_pkt_nb, pkt_nb);
goto error;
}
if (expected_seqnum != seqnum) {
ksft_print_msg("[%s] expected seqnum at start [%u], got seqnum [%u]\n",
__func__, expected_seqnum, seqnum);
goto error;
}
words_to_end = len / sizeof(*pkt_data) - 1;
pkt_data += words_to_end;
seqnum = ntohl(*pkt_data) & 0xffff;
expected_seqnum += words_to_end;
if (expected_seqnum != seqnum) {
ksft_print_msg("[%s] expected seqnum at end [%u], got seqnum [%u]\n",
__func__, expected_seqnum, seqnum);
goto error;
}
return true;
error:
pkt_dump(data, len, !bytes_processed);
return false;
}
static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
{
if (pkt->len != len) {
ksft_print_msg("[%s] expected packet length [%d], got length [%d]\n",
__func__, pkt->len, len);
pkt_dump(xsk_umem__get_data(buffer, addr), len, true);
return false;
}
return true;
}
static void kick_tx(struct xsk_socket_info *xsk)
{
int ret;
ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
if (ret >= 0)
return;
if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
usleep(100);
return;
}
exit_with_error(errno);
}
static void kick_rx(struct xsk_socket_info *xsk)
{
int ret;
ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
if (ret < 0)
exit_with_error(errno);
}
static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
{
unsigned int rcvd;
u32 idx;
if (xsk_ring_prod__needs_wakeup(&xsk->tx))
kick_tx(xsk);
rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
if (rcvd) {
if (rcvd > xsk->outstanding_tx) {
u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
ksft_print_msg("[%s] Too many packets completed\n", __func__);
ksft_print_msg("Last completion address: %llx\n", addr);
return TEST_FAILURE;
}
xsk_ring_cons__release(&xsk->umem->cq, rcvd);
xsk->outstanding_tx -= rcvd;
}
return TEST_PASS;
}
static int receive_pkts(struct test_spec *test, struct pollfd *fds)
{
struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream;
struct xsk_socket_info *xsk = test->ifobj_rx->xsk;
u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0;
struct ifobject *ifobj = test->ifobj_rx;
struct xsk_umem_info *umem = xsk->umem;
struct pkt *pkt;
int ret;
ret = gettimeofday(&tv_now, NULL);
if (ret)
exit_with_error(errno);
timeradd(&tv_now, &tv_timeout, &tv_end);
pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
while (pkt) {
u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
u64 first_addr;
ret = gettimeofday(&tv_now, NULL);
if (ret)
exit_with_error(errno);
if (timercmp(&tv_now, &tv_end, >)) {
ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
return TEST_FAILURE;
}
kick_rx(xsk);
if (ifobj->use_poll) {
ret = poll(fds, 1, POLL_TMOUT);
if (ret < 0)
exit_with_error(errno);
if (!ret) {
if (!is_umem_valid(test->ifobj_tx))
return TEST_PASS;
ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
return TEST_FAILURE;
}
if (!(fds->revents & POLLIN))
continue;
}
rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
if (!rcvd)
continue;
if (ifobj->use_fill_ring) {
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
while (ret != rcvd) {
if (ret < 0)
exit_with_error(-ret);
if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
ret = poll(fds, 1, POLL_TMOUT);
if (ret < 0)
exit_with_error(errno);
}
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
}
}
while (frags_processed < rcvd) {
const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
u64 addr = desc->addr, orig;
orig = xsk_umem__extract_addr(addr);
addr = xsk_umem__add_offset_to_addr(addr);
if (!pkt) {
ksft_print_msg("[%s] received too many packets addr: %lx len %u\n",
__func__, addr, desc->len);
return TEST_FAILURE;
}
if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
!is_offset_correct(umem, pkt, addr) ||
(ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr)))
return TEST_FAILURE;
if (!nb_frags++)
first_addr = addr;
frags_processed++;
pkt_len += desc->len;
if (ifobj->use_fill_ring)
*xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
if (pkt_continues(desc->options))
continue;
/* The complete packet has been received */
if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
!is_offset_correct(umem, pkt, addr))
return TEST_FAILURE;
pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
nb_frags = 0;
pkt_len = 0;
}
if (nb_frags) {
/* In the middle of a packet. Start over from beginning of packet. */
idx_rx -= nb_frags;
xsk_ring_cons__cancel(&xsk->rx, nb_frags);
if (ifobj->use_fill_ring) {
idx_fq -= nb_frags;
xsk_ring_prod__cancel(&umem->fq, nb_frags);
}
frags_processed -= nb_frags;
}
if (ifobj->use_fill_ring)
xsk_ring_prod__submit(&umem->fq, frags_processed);
if (ifobj->release_rx)
xsk_ring_cons__release(&xsk->rx, frags_processed);
pthread_mutex_lock(&pacing_mutex);
pkts_in_flight -= pkts_sent;
pthread_mutex_unlock(&pacing_mutex);
pkts_sent = 0;
}
return TEST_PASS;
}
static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout)
{
u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
struct pkt_stream *pkt_stream = ifobject->pkt_stream;
struct xsk_socket_info *xsk = ifobject->xsk;
struct xsk_umem_info *umem = ifobject->umem;
bool use_poll = ifobject->use_poll;
int ret;
buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
/* pkts_in_flight might be negative if many invalid packets are sent */
if (pkts_in_flight >= (int)((umem_size(umem) - BATCH_SIZE * buffer_len) / buffer_len)) {
kick_tx(xsk);
return TEST_CONTINUE;
}
while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
if (use_poll) {
ret = poll(fds, 1, POLL_TMOUT);
if (timeout) {
if (ret < 0) {
ksft_print_msg("ERROR: [%s] Poll error %d\n",
__func__, errno);
return TEST_FAILURE;
}
if (ret == 0)
return TEST_PASS;
break;
}
if (ret <= 0) {
ksft_print_msg("ERROR: [%s] Poll error %d\n",
__func__, errno);
return TEST_FAILURE;
}
}
complete_pkts(xsk, BATCH_SIZE);
}
for (i = 0; i < BATCH_SIZE; i++) {
struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
u32 nb_frags_left, nb_frags, bytes_written = 0;
if (!pkt)
break;
nb_frags = pkt_nb_frags(umem->frame_size, pkt_stream, pkt);
if (nb_frags > BATCH_SIZE - i) {
pkt_stream_cancel(pkt_stream);
xsk_ring_prod__cancel(&xsk->tx, BATCH_SIZE - i);
break;
}
nb_frags_left = nb_frags;
while (nb_frags_left--) {
struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
tx_desc->addr = pkt_get_addr(pkt, ifobject->umem);
if (pkt_stream->verbatim) {
tx_desc->len = pkt->len;
tx_desc->options = pkt->options;
} else if (nb_frags_left) {
tx_desc->len = umem->frame_size;
tx_desc->options = XDP_PKT_CONTD;
} else {
tx_desc->len = pkt->len - bytes_written;
tx_desc->options = 0;
}
if (pkt->valid)
pkt_generate(ifobject, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
bytes_written);
bytes_written += tx_desc->len;
if (nb_frags_left) {
i++;
if (pkt_stream->verbatim)
pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
}
}
if (pkt && pkt->valid) {
valid_pkts++;
valid_frags += nb_frags;
}
}
pthread_mutex_lock(&pacing_mutex);
pkts_in_flight += valid_pkts;
pthread_mutex_unlock(&pacing_mutex);
xsk_ring_prod__submit(&xsk->tx, i);
xsk->outstanding_tx += valid_frags;
if (use_poll) {
ret = poll(fds, 1, POLL_TMOUT);
if (ret <= 0) {
if (ret == 0 && timeout)
return TEST_PASS;
ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
return TEST_FAILURE;
}
}
if (!timeout) {
if (complete_pkts(xsk, i))
return TEST_FAILURE;
usleep(10);
return TEST_PASS;
}
return TEST_CONTINUE;
}
static void wait_for_tx_completion(struct xsk_socket_info *xsk)
{
while (xsk->outstanding_tx)
complete_pkts(xsk, BATCH_SIZE);
}
static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
{
struct pkt_stream *pkt_stream = ifobject->pkt_stream;
bool timeout = !is_umem_valid(test->ifobj_rx);
struct pollfd fds = { };
u32 ret;
fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
fds.events = POLLOUT;
while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
ret = __send_pkts(ifobject, &fds, timeout);
if (ret == TEST_CONTINUE && !test->fail)
continue;
if ((ret || test->fail) && !timeout)
return TEST_FAILURE;
if (ret == TEST_PASS && timeout)
return ret;
}
wait_for_tx_completion(ifobject->xsk);
return TEST_PASS;
}
static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
{
int fd = xsk_socket__fd(xsk), err;
socklen_t optlen, expected_len;
optlen = sizeof(*stats);
err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
if (err) {
ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
__func__, -err, strerror(-err));
return TEST_FAILURE;
}
expected_len = sizeof(struct xdp_statistics);
if (optlen != expected_len) {
ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
__func__, expected_len, optlen);
return TEST_FAILURE;
}
return TEST_PASS;
}
static int validate_rx_dropped(struct ifobject *ifobject)
{
struct xsk_socket *xsk = ifobject->xsk->xsk;
struct xdp_statistics stats;
int err;
kick_rx(ifobject->xsk);
err = get_xsk_stats(xsk, &stats);
if (err)
return TEST_FAILURE;
/* The receiver calls getsockopt after receiving the last (valid)
* packet which is not the final packet sent in this test (valid and
* invalid packets are sent in alternating fashion with the final
* packet being invalid). Since the last packet may or may not have
* been dropped already, both outcomes must be allowed.
*/
if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 ||
stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1)
return TEST_PASS;
return TEST_FAILURE;
}
static int validate_rx_full(struct ifobject *ifobject)
{
struct xsk_socket *xsk = ifobject->xsk->xsk;
struct xdp_statistics stats;
int err;
usleep(1000);
kick_rx(ifobject->xsk);
err = get_xsk_stats(xsk, &stats);
if (err)
return TEST_FAILURE;
if (stats.rx_ring_full)
return TEST_PASS;
return TEST_FAILURE;
}
static int validate_fill_empty(struct ifobject *ifobject)
{
struct xsk_socket *xsk = ifobject->xsk->xsk;
struct xdp_statistics stats;
int err;
usleep(1000);
kick_rx(ifobject->xsk);
err = get_xsk_stats(xsk, &stats);
if (err)
return TEST_FAILURE;
if (stats.rx_fill_ring_empty_descs)
return TEST_PASS;
return TEST_FAILURE;
}
static int validate_tx_invalid_descs(struct ifobject *ifobject)
{
struct xsk_socket *xsk = ifobject->xsk->xsk;
int fd = xsk_socket__fd(xsk);
struct xdp_statistics stats;
socklen_t optlen;
int err;
optlen = sizeof(stats);
err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
if (err) {
ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
__func__, -err, strerror(-err));
return TEST_FAILURE;
}
if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) {
ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n",
__func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts);
return TEST_FAILURE;
}
return TEST_PASS;
}
static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject,
struct xsk_umem_info *umem, bool tx)
{
int i, ret;
for (i = 0; i < test->nb_sockets; i++) {
bool shared = (ifobject->shared_umem && tx) ? true : !!i;
u32 ctr = 0;
while (ctr++ < SOCK_RECONF_CTR) {
ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem,
ifobject, shared);
if (!ret)
break;
/* Retry if it fails as xsk_socket__create() is asynchronous */
if (ctr >= SOCK_RECONF_CTR)
exit_with_error(-ret);
usleep(USLEEP_MAX);
}
if (ifobject->busy_poll)
enable_busy_poll(&ifobject->xsk_arr[i]);
}
}
static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
{
xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true);
ifobject->xsk = &ifobject->xsk_arr[0];
ifobject->xskmap = test->ifobj_rx->xskmap;
memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
ifobject->umem->base_addr = 0;
}
static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream,
bool fill_up)
{
u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM;
u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts;
int ret;
if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
buffers_to_fill = umem->num_frames;
else
buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS;
ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
if (ret != buffers_to_fill)
exit_with_error(ENOSPC);
while (filled < buffers_to_fill) {
struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts);
u64 addr;
u32 i;
for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt_stream, pkt); i++) {
if (!pkt) {
if (!fill_up)
break;
addr = filled * umem->frame_size + umem->base_addr;
} else if (pkt->offset >= 0) {
addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
} else {
addr = pkt->offset + umem_alloc_buffer(umem);
}
*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
if (++filled >= buffers_to_fill)
break;
}
}
xsk_ring_prod__submit(&umem->fq, filled);
xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled);
pkt_stream_reset(pkt_stream);
umem_reset_alloc(umem);
}
static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
{
u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
void *bufs;
int ret;
if (ifobject->umem->unaligned_mode)
mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
if (ifobject->shared_umem)
umem_sz *= 2;
bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
if (bufs == MAP_FAILED)
exit_with_error(errno);
ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz);
if (ret)
exit_with_error(-ret);
xsk_configure_socket(test, ifobject, ifobject->umem, false);
ifobject->xsk = &ifobject->xsk_arr[0];
if (!ifobject->rx_on)
return;
xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream, ifobject->use_fill_ring);
ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
if (ret)
exit_with_error(errno);
}
static void *worker_testapp_validate_tx(void *arg)
{
struct test_spec *test = (struct test_spec *)arg;
struct ifobject *ifobject = test->ifobj_tx;
int err;
if (test->current_step == 1) {
if (!ifobject->shared_umem)
thread_common_ops(test, ifobject);
else
thread_common_ops_tx(test, ifobject);
}
print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts,
ifobject->ifname);
err = send_pkts(test, ifobject);
if (!err && ifobject->validation_func)
err = ifobject->validation_func(ifobject);
if (err)
report_failure(test);
pthread_exit(NULL);
}
static void *worker_testapp_validate_rx(void *arg)
{
struct test_spec *test = (struct test_spec *)arg;
struct ifobject *ifobject = test->ifobj_rx;
struct pollfd fds = { };
int err;
if (test->current_step == 1) {
thread_common_ops(test, ifobject);
} else {
xsk_clear_xskmap(ifobject->xskmap);
err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
if (err) {
printf("Error: Failed to update xskmap, error %s\n", strerror(-err));
exit_with_error(-err);
}
}
fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
fds.events = POLLIN;
pthread_barrier_wait(&barr);
err = receive_pkts(test, &fds);
if (!err && ifobject->validation_func)
err = ifobject->validation_func(ifobject);
if (err)
report_failure(test);
pthread_exit(NULL);
}
static u64 ceil_u64(u64 a, u64 b)
{
return (a + b - 1) / b;
}
static void testapp_clean_xsk_umem(struct ifobject *ifobj)
{
u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
if (ifobj->shared_umem)
umem_sz *= 2;
umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
xsk_umem__delete(ifobj->umem->umem);
munmap(ifobj->umem->buffer, umem_sz);
}
static void handler(int signum)
{
pthread_exit(NULL);
}
static bool xdp_prog_changed_rx(struct test_spec *test)
{
struct ifobject *ifobj = test->ifobj_rx;
return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode;
}
static bool xdp_prog_changed_tx(struct test_spec *test)
{
struct ifobject *ifobj = test->ifobj_tx;
return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode;
}
static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog,
struct bpf_map *xskmap, enum test_mode mode)
{
int err;
xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
if (err) {
printf("Error attaching XDP program\n");
exit_with_error(-err);
}
if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC))
if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) {
ksft_print_msg("ERROR: XDP prog not in DRV mode\n");
exit_with_error(EINVAL);
}
ifobj->xdp_prog = xdp_prog;
ifobj->xskmap = xskmap;
ifobj->mode = mode;
}
static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx,
struct ifobject *ifobj_tx)
{
if (xdp_prog_changed_rx(test))
xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode);
if (!ifobj_tx || ifobj_tx->shared_umem)
return;
if (xdp_prog_changed_tx(test))
xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode);
}
static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1,
struct ifobject *ifobj2)
{
pthread_t t0, t1;
int err;
if (test->mtu > MAX_ETH_PKT_SIZE) {
if (test->mode == TEST_MODE_ZC && (!ifobj1->multi_buff_zc_supp ||
(ifobj2 && !ifobj2->multi_buff_zc_supp))) {
ksft_test_result_skip("Multi buffer for zero-copy not supported.\n");
return TEST_SKIP;
}
if (test->mode != TEST_MODE_ZC && (!ifobj1->multi_buff_supp ||
(ifobj2 && !ifobj2->multi_buff_supp))) {
ksft_test_result_skip("Multi buffer not supported.\n");
return TEST_SKIP;
}
}
err = test_spec_set_mtu(test, test->mtu);
if (err) {
ksft_print_msg("Error, could not set mtu.\n");
exit_with_error(err);
}
if (ifobj2) {
if (pthread_barrier_init(&barr, NULL, 2))
exit_with_error(errno);
pkt_stream_reset(ifobj2->pkt_stream);
}
test->current_step++;
pkt_stream_reset(ifobj1->pkt_stream);
pkts_in_flight = 0;
signal(SIGUSR1, handler);
/*Spawn RX thread */
pthread_create(&t0, NULL, ifobj1->func_ptr, test);
if (ifobj2) {
pthread_barrier_wait(&barr);
if (pthread_barrier_destroy(&barr))
exit_with_error(errno);
/*Spawn TX thread */
pthread_create(&t1, NULL, ifobj2->func_ptr, test);
pthread_join(t1, NULL);
}
if (!ifobj2)
pthread_kill(t0, SIGUSR1);
else
pthread_join(t0, NULL);
if (test->total_steps == test->current_step || test->fail) {
if (ifobj2)
xsk_socket__delete(ifobj2->xsk->xsk);
xsk_socket__delete(ifobj1->xsk->xsk);
testapp_clean_xsk_umem(ifobj1);
if (ifobj2 && !ifobj2->shared_umem)
testapp_clean_xsk_umem(ifobj2);
}
return !!test->fail;
}
static int testapp_validate_traffic(struct test_spec *test)
{
struct ifobject *ifobj_rx = test->ifobj_rx;
struct ifobject *ifobj_tx = test->ifobj_tx;
if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) ||
(ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) {
ksft_test_result_skip("No huge pages present.\n");
return TEST_SKIP;
}
xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx);
return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx);
}
static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj)
{
return __testapp_validate_traffic(test, ifobj, NULL);
}
static int testapp_teardown(struct test_spec *test)
{
int i;
test_spec_set_name(test, "TEARDOWN");
for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
if (testapp_validate_traffic(test))
return TEST_FAILURE;
test_spec_reset(test);
}
return TEST_PASS;
}
static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
{
thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
struct ifobject *tmp_ifobj = (*ifobj1);
(*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
(*ifobj2)->func_ptr = tmp_func_ptr;
*ifobj1 = *ifobj2;
*ifobj2 = tmp_ifobj;
}
static int testapp_bidi(struct test_spec *test)
{
int res;
test_spec_set_name(test, "BIDIRECTIONAL");
test->ifobj_tx->rx_on = true;
test->ifobj_rx->tx_on = true;
test->total_steps = 2;
if (testapp_validate_traffic(test))
return TEST_FAILURE;
print_verbose("Switching Tx/Rx vectors\n");
swap_directions(&test->ifobj_rx, &test->ifobj_tx);
res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
swap_directions(&test->ifobj_rx, &test->ifobj_tx);
return res;
}
static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
{
int ret;
xsk_socket__delete(ifobj_tx->xsk->xsk);
xsk_socket__delete(ifobj_rx->xsk->xsk);
ifobj_tx->xsk = &ifobj_tx->xsk_arr[1];
ifobj_rx->xsk = &ifobj_rx->xsk_arr[1];
ret = xsk_update_xskmap(ifobj_rx->xskmap, ifobj_rx->xsk->xsk);
if (ret)
exit_with_error(errno);
}
static int testapp_bpf_res(struct test_spec *test)
{
test_spec_set_name(test, "BPF_RES");
test->total_steps = 2;
test->nb_sockets = 2;
if (testapp_validate_traffic(test))
return TEST_FAILURE;
swap_xsk_resources(test->ifobj_tx, test->ifobj_rx);
return testapp_validate_traffic(test);
}
static int testapp_headroom(struct test_spec *test)
{
test_spec_set_name(test, "UMEM_HEADROOM");
test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
return testapp_validate_traffic(test);
}
static int testapp_stats_rx_dropped(struct test_spec *test)
{
test_spec_set_name(test, "STAT_RX_DROPPED");
if (test->mode == TEST_MODE_ZC) {
ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n");
return TEST_SKIP;
}
pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0);
test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
pkt_stream_receive_half(test);
test->ifobj_rx->validation_func = validate_rx_dropped;
return testapp_validate_traffic(test);
}
static int testapp_stats_tx_invalid_descs(struct test_spec *test)
{
test_spec_set_name(test, "STAT_TX_INVALID");
pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
test->ifobj_tx->validation_func = validate_tx_invalid_descs;
return testapp_validate_traffic(test);
}
static int testapp_stats_rx_full(struct test_spec *test)
{
test_spec_set_name(test, "STAT_RX_FULL");
pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
test->ifobj_rx->release_rx = false;
test->ifobj_rx->validation_func = validate_rx_full;
return testapp_validate_traffic(test);
}
static int testapp_stats_fill_empty(struct test_spec *test)
{
test_spec_set_name(test, "STAT_RX_FILL_EMPTY");
pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
test->ifobj_rx->use_fill_ring = false;
test->ifobj_rx->validation_func = validate_fill_empty;
return testapp_validate_traffic(test);
}
static int testapp_unaligned(struct test_spec *test)
{
test_spec_set_name(test, "UNALIGNED_MODE");
test->ifobj_tx->umem->unaligned_mode = true;
test->ifobj_rx->umem->unaligned_mode = true;
/* Let half of the packets straddle a 4K buffer boundary */
pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2);
return testapp_validate_traffic(test);
}
static int testapp_unaligned_mb(struct test_spec *test)
{
test_spec_set_name(test, "UNALIGNED_MODE_9K");
test->mtu = MAX_ETH_JUMBO_SIZE;
test->ifobj_tx->umem->unaligned_mode = true;
test->ifobj_rx->umem->unaligned_mode = true;
pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE);
return testapp_validate_traffic(test);
}
static int testapp_single_pkt(struct test_spec *test)
{
struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}};
pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
return testapp_validate_traffic(test);
}
static int testapp_multi_buffer(struct test_spec *test)
{
test_spec_set_name(test, "RUN_TO_COMPLETION_9K_PACKETS");
test->mtu = MAX_ETH_JUMBO_SIZE;
pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE);
return testapp_validate_traffic(test);
}
static int testapp_invalid_desc_mb(struct test_spec *test)
{
struct xsk_umem_info *umem = test->ifobj_tx->umem;
u64 umem_size = umem->num_frames * umem->frame_size;
struct pkt pkts[] = {
/* Valid packet for synch to start with */
{0, MIN_PKT_SIZE, 0, true, 0},
/* Zero frame len is not legal */
{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
{0, 0, 0, false, 0},
/* Invalid address in the second frame */
{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
{umem_size, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
/* Invalid len in the middle */
{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
{0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
/* Invalid options in the middle */
{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XSK_DESC__INVALID_OPTION},
/* Transmit 2 frags, receive 3 */
{0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, XDP_PKT_CONTD},
{0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, 0},
/* Middle frame crosses chunk boundary with small length */
{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
{-MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false, 0},
/* Valid packet for synch so that something is received */
{0, MIN_PKT_SIZE, 0, true, 0}};
if (umem->unaligned_mode) {
/* Crossing a chunk boundary allowed */
pkts[12].valid = true;
pkts[13].valid = true;
}
test->mtu = MAX_ETH_JUMBO_SIZE;
pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
return testapp_validate_traffic(test);
}
static int testapp_invalid_desc(struct test_spec *test)
{
struct xsk_umem_info *umem = test->ifobj_tx->umem;
u64 umem_size = umem->num_frames * umem->frame_size;
struct pkt pkts[] = {
/* Zero packet address allowed */
{0, MIN_PKT_SIZE, 0, true},
/* Allowed packet */
{0, MIN_PKT_SIZE, 0, true},
/* Straddling the start of umem */
{-2, MIN_PKT_SIZE, 0, false},
/* Packet too large */
{0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
/* Up to end of umem allowed */
{umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true},
/* After umem ends */
{umem_size, MIN_PKT_SIZE, 0, false},
/* Straddle the end of umem */
{umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
/* Straddle a 4K boundary */
{0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
/* Straddle a 2K boundary */
{0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true},
/* Valid packet for synch so that something is received */
{0, MIN_PKT_SIZE, 0, true}};
if (umem->unaligned_mode) {
/* Crossing a page boundary allowed */
pkts[7].valid = true;
}
if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
/* Crossing a 2K frame size boundary not allowed */
pkts[8].valid = false;
}
if (test->ifobj_tx->shared_umem) {
pkts[4].offset += umem_size;
pkts[5].offset += umem_size;
pkts[6].offset += umem_size;
}
pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
return testapp_validate_traffic(test);
}
static int testapp_xdp_drop(struct test_spec *test)
{
struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
test_spec_set_name(test, "XDP_DROP_HALF");
test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
skel_rx->maps.xsk, skel_tx->maps.xsk);
pkt_stream_receive_half(test);
return testapp_validate_traffic(test);
}
static int testapp_xdp_metadata_count(struct test_spec *test)
{
struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
struct bpf_map *data_map;
int count = 0;
int key = 0;
test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata,
skel_tx->progs.xsk_xdp_populate_metadata,
skel_rx->maps.xsk, skel_tx->maps.xsk);
test->ifobj_rx->use_metadata = true;
data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
if (!data_map || !bpf_map__is_internal(data_map))
exit_with_error(ENOMEM);
if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY))
exit_with_error(errno);
return testapp_validate_traffic(test);
}
static int testapp_poll_txq_tmout(struct test_spec *test)
{
test_spec_set_name(test, "POLL_TXQ_FULL");
test->ifobj_tx->use_poll = true;
/* create invalid frame by set umem frame_size and pkt length equal to 2048 */
test->ifobj_tx->umem->frame_size = 2048;
pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
return testapp_validate_traffic_single_thread(test, test->ifobj_tx);
}
static int testapp_poll_rxq_tmout(struct test_spec *test)
{
test_spec_set_name(test, "POLL_RXQ_EMPTY");
test->ifobj_rx->use_poll = true;
return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
}
static int testapp_too_many_frags(struct test_spec *test)
{
struct pkt pkts[2 * XSK_DESC__MAX_SKB_FRAGS + 2] = {};
u32 max_frags, i;
test_spec_set_name(test, "TOO_MANY_FRAGS");
if (test->mode == TEST_MODE_ZC)
max_frags = test->ifobj_tx->xdp_zc_max_segs;
else
max_frags = XSK_DESC__MAX_SKB_FRAGS;
test->mtu = MAX_ETH_JUMBO_SIZE;
/* Valid packet for synch */
pkts[0].len = MIN_PKT_SIZE;
pkts[0].valid = true;
/* One valid packet with the max amount of frags */
for (i = 1; i < max_frags + 1; i++) {
pkts[i].len = MIN_PKT_SIZE;
pkts[i].options = XDP_PKT_CONTD;
pkts[i].valid = true;
}
pkts[max_frags].options = 0;
/* An invalid packet with the max amount of frags but signals packet
* continues on the last frag
*/
for (i = max_frags + 1; i < 2 * max_frags + 1; i++) {
pkts[i].len = MIN_PKT_SIZE;
pkts[i].options = XDP_PKT_CONTD;
pkts[i].valid = false;
}
/* Valid packet for synch */
pkts[2 * max_frags + 1].len = MIN_PKT_SIZE;
pkts[2 * max_frags + 1].valid = true;
pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2);
return testapp_validate_traffic(test);
}
static int xsk_load_xdp_programs(struct ifobject *ifobj)
{
ifobj->xdp_progs = xsk_xdp_progs__open_and_load();
if (libbpf_get_error(ifobj->xdp_progs))
return libbpf_get_error(ifobj->xdp_progs);
return 0;
}
static void xsk_unload_xdp_programs(struct ifobject *ifobj)
{
xsk_xdp_progs__destroy(ifobj->xdp_progs);
}
/* Simple test */
static bool hugepages_present(void)
{
size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
void *bufs;
bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB);
if (bufs == MAP_FAILED)
return false;
mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
munmap(bufs, mmap_sz);
return true;
}
static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac,
thread_func_t func_ptr)
{
LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
int err;
memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN);
memcpy(ifobj->src_mac, src_mac, ETH_ALEN);
ifobj->func_ptr = func_ptr;
err = xsk_load_xdp_programs(ifobj);
if (err) {
printf("Error loading XDP program\n");
exit_with_error(err);
}
if (hugepages_present())
ifobj->unaligned_supp = true;
err = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &query_opts);
if (err) {
ksft_print_msg("Error querying XDP capabilities\n");
exit_with_error(-err);
}
if (query_opts.feature_flags & NETDEV_XDP_ACT_RX_SG)
ifobj->multi_buff_supp = true;
if (query_opts.feature_flags & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
if (query_opts.xdp_zc_max_segs > 1) {
ifobj->multi_buff_zc_supp = true;
ifobj->xdp_zc_max_segs = query_opts.xdp_zc_max_segs;
} else {
ifobj->xdp_zc_max_segs = 0;
}
}
}
static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type)
{
int ret = TEST_SKIP;
switch (type) {
case TEST_TYPE_STATS_RX_DROPPED:
ret = testapp_stats_rx_dropped(test);
break;
case TEST_TYPE_STATS_TX_INVALID_DESCS:
ret = testapp_stats_tx_invalid_descs(test);
break;
case TEST_TYPE_STATS_RX_FULL:
ret = testapp_stats_rx_full(test);
break;
case TEST_TYPE_STATS_FILL_EMPTY:
ret = testapp_stats_fill_empty(test);
break;
case TEST_TYPE_TEARDOWN:
ret = testapp_teardown(test);
break;
case TEST_TYPE_BIDI:
ret = testapp_bidi(test);
break;
case TEST_TYPE_BPF_RES:
ret = testapp_bpf_res(test);
break;
case TEST_TYPE_RUN_TO_COMPLETION:
test_spec_set_name(test, "RUN_TO_COMPLETION");
ret = testapp_validate_traffic(test);
break;
case TEST_TYPE_RUN_TO_COMPLETION_MB:
ret = testapp_multi_buffer(test);
break;
case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT:
test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT");
ret = testapp_single_pkt(test);
break;
case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME:
test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
test->ifobj_tx->umem->frame_size = 2048;
test->ifobj_rx->umem->frame_size = 2048;
pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
ret = testapp_validate_traffic(test);
break;
case TEST_TYPE_RX_POLL:
test->ifobj_rx->use_poll = true;
test_spec_set_name(test, "POLL_RX");
ret = testapp_validate_traffic(test);
break;
case TEST_TYPE_TX_POLL:
test->ifobj_tx->use_poll = true;
test_spec_set_name(test, "POLL_TX");
ret = testapp_validate_traffic(test);
break;
case TEST_TYPE_POLL_TXQ_TMOUT:
ret = testapp_poll_txq_tmout(test);
break;
case TEST_TYPE_POLL_RXQ_TMOUT:
ret = testapp_poll_rxq_tmout(test);
break;
case TEST_TYPE_ALIGNED_INV_DESC:
test_spec_set_name(test, "ALIGNED_INV_DESC");
ret = testapp_invalid_desc(test);
break;
case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME:
test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE");
test->ifobj_tx->umem->frame_size = 2048;
test->ifobj_rx->umem->frame_size = 2048;
ret = testapp_invalid_desc(test);
break;
case TEST_TYPE_UNALIGNED_INV_DESC:
test_spec_set_name(test, "UNALIGNED_INV_DESC");
test->ifobj_tx->umem->unaligned_mode = true;
test->ifobj_rx->umem->unaligned_mode = true;
ret = testapp_invalid_desc(test);
break;
case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: {
u64 page_size, umem_size;
test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE");
/* Odd frame size so the UMEM doesn't end near a page boundary. */
test->ifobj_tx->umem->frame_size = 4001;
test->ifobj_rx->umem->frame_size = 4001;
test->ifobj_tx->umem->unaligned_mode = true;
test->ifobj_rx->umem->unaligned_mode = true;
/* This test exists to test descriptors that staddle the end of
* the UMEM but not a page.
*/
page_size = sysconf(_SC_PAGESIZE);
umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
assert(umem_size % page_size > MIN_PKT_SIZE);
assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
ret = testapp_invalid_desc(test);
break;
}
case TEST_TYPE_ALIGNED_INV_DESC_MB:
test_spec_set_name(test, "ALIGNED_INV_DESC_MULTI_BUFF");
ret = testapp_invalid_desc_mb(test);
break;
case TEST_TYPE_UNALIGNED_INV_DESC_MB:
test_spec_set_name(test, "UNALIGNED_INV_DESC_MULTI_BUFF");
test->ifobj_tx->umem->unaligned_mode = true;
test->ifobj_rx->umem->unaligned_mode = true;
ret = testapp_invalid_desc_mb(test);
break;
case TEST_TYPE_UNALIGNED:
ret = testapp_unaligned(test);
break;
case TEST_TYPE_UNALIGNED_MB:
ret = testapp_unaligned_mb(test);
break;
case TEST_TYPE_HEADROOM:
ret = testapp_headroom(test);
break;
case TEST_TYPE_XDP_DROP_HALF:
ret = testapp_xdp_drop(test);
break;
case TEST_TYPE_XDP_METADATA_COUNT:
test_spec_set_name(test, "XDP_METADATA_COUNT");
ret = testapp_xdp_metadata_count(test);
break;
case TEST_TYPE_XDP_METADATA_COUNT_MB:
test_spec_set_name(test, "XDP_METADATA_COUNT_MULTI_BUFF");
test->mtu = MAX_ETH_JUMBO_SIZE;
ret = testapp_xdp_metadata_count(test);
break;
case TEST_TYPE_TOO_MANY_FRAGS:
ret = testapp_too_many_frags(test);
break;
default:
break;
}
if (ret == TEST_PASS)
ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
test->name);
pkt_stream_restore_default(test);
}
static struct ifobject *ifobject_create(void)
{
struct ifobject *ifobj;
ifobj = calloc(1, sizeof(struct ifobject));
if (!ifobj)
return NULL;
ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
if (!ifobj->xsk_arr)
goto out_xsk_arr;
ifobj->umem = calloc(1, sizeof(*ifobj->umem));
if (!ifobj->umem)
goto out_umem;
return ifobj;
out_umem:
free(ifobj->xsk_arr);
out_xsk_arr:
free(ifobj);
return NULL;
}
static void ifobject_delete(struct ifobject *ifobj)
{
free(ifobj->umem);
free(ifobj->xsk_arr);
free(ifobj);
}
static bool is_xdp_supported(int ifindex)
{
int flags = XDP_FLAGS_DRV_MODE;
LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags);
struct bpf_insn insns[2] = {
BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
BPF_EXIT_INSN()
};
int prog_fd, insn_cnt = ARRAY_SIZE(insns);
int err;
prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
if (prog_fd < 0)
return false;
err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL);
if (err) {
close(prog_fd);
return false;
}
bpf_xdp_detach(ifindex, flags, NULL);
close(prog_fd);
return true;
}
int main(int argc, char **argv)
{
struct pkt_stream *rx_pkt_stream_default;
struct pkt_stream *tx_pkt_stream_default;
struct ifobject *ifobj_tx, *ifobj_rx;
int modes = TEST_MODE_SKB + 1;
u32 i, j, failed_tests = 0;
struct test_spec test;
bool shared_netdev;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
ifobj_tx = ifobject_create();
if (!ifobj_tx)
exit_with_error(ENOMEM);
ifobj_rx = ifobject_create();
if (!ifobj_rx)
exit_with_error(ENOMEM);
setlocale(LC_ALL, "");
parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex);
ifobj_tx->shared_umem = shared_netdev;
ifobj_rx->shared_umem = shared_netdev;
if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) {
usage(basename(argv[0]));
ksft_exit_xfail();
}
if (is_xdp_supported(ifobj_tx->ifindex)) {
modes++;
if (ifobj_zc_avail(ifobj_tx))
modes++;
}
init_iface(ifobj_rx, MAC1, MAC2, worker_testapp_validate_rx);
init_iface(ifobj_tx, MAC2, MAC1, worker_testapp_validate_tx);
test_spec_init(&test, ifobj_tx, ifobj_rx, 0);
tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
if (!tx_pkt_stream_default || !rx_pkt_stream_default)
exit_with_error(ENOMEM);
test.tx_pkt_stream_default = tx_pkt_stream_default;
test.rx_pkt_stream_default = rx_pkt_stream_default;
ksft_set_plan(modes * TEST_TYPE_MAX);
for (i = 0; i < modes; i++) {
for (j = 0; j < TEST_TYPE_MAX; j++) {
test_spec_init(&test, ifobj_tx, ifobj_rx, i);
run_pkt_test(&test, i, j);
usleep(USLEEP_MAX);
if (test.fail)
failed_tests++;
}
}
pkt_stream_delete(tx_pkt_stream_default);
pkt_stream_delete(rx_pkt_stream_default);
xsk_unload_xdp_programs(ifobj_tx);
xsk_unload_xdp_programs(ifobj_rx);
ifobject_delete(ifobj_tx);
ifobject_delete(ifobj_rx);
if (failed_tests)
ksft_exit_fail();
else
ksft_exit_pass();
}
| linux-master | tools/testing/selftests/bpf/xskxceiver.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. */
#include <linux/bpf.h>
#include <linux/if_link.h>
#include <arpa/inet.h>
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <libgen.h>
#include <net/if.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netdb.h>
#include "bpf/bpf.h"
#include "bpf/libbpf.h"
#include "xdping.h"
#include "testing_helpers.h"
static int ifindex;
static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
static void cleanup(int sig)
{
bpf_xdp_detach(ifindex, xdp_flags, NULL);
if (sig)
exit(1);
}
static int get_stats(int fd, __u16 count, __u32 raddr)
{
struct pinginfo pinginfo = { 0 };
char inaddrbuf[INET_ADDRSTRLEN];
struct in_addr inaddr;
__u16 i;
inaddr.s_addr = raddr;
printf("\nXDP RTT data:\n");
if (bpf_map_lookup_elem(fd, &raddr, &pinginfo)) {
perror("bpf_map_lookup elem");
return 1;
}
for (i = 0; i < count; i++) {
if (pinginfo.times[i] == 0)
break;
printf("64 bytes from %s: icmp_seq=%d ttl=64 time=%#.5f ms\n",
inet_ntop(AF_INET, &inaddr, inaddrbuf,
sizeof(inaddrbuf)),
count + i + 1,
(double)pinginfo.times[i]/1000000);
}
if (i < count) {
fprintf(stderr, "Expected %d samples, got %d.\n", count, i);
return 1;
}
bpf_map_delete_elem(fd, &raddr);
return 0;
}
static void show_usage(const char *prog)
{
fprintf(stderr,
"usage: %s [OPTS] -I interface destination\n\n"
"OPTS:\n"
" -c count Stop after sending count requests\n"
" (default %d, max %d)\n"
" -I interface interface name\n"
" -N Run in driver mode\n"
" -s Server mode\n"
" -S Run in skb mode\n",
prog, XDPING_DEFAULT_COUNT, XDPING_MAX_COUNT);
}
int main(int argc, char **argv)
{
__u32 mode_flags = XDP_FLAGS_DRV_MODE | XDP_FLAGS_SKB_MODE;
struct addrinfo *a, hints = { .ai_family = AF_INET };
__u16 count = XDPING_DEFAULT_COUNT;
struct pinginfo pinginfo = { 0 };
const char *optstr = "c:I:NsS";
struct bpf_program *main_prog;
int prog_fd = -1, map_fd = -1;
struct sockaddr_in rin;
struct bpf_object *obj;
struct bpf_map *map;
char *ifname = NULL;
char filename[256];
int opt, ret = 1;
__u32 raddr = 0;
int server = 0;
char cmd[256];
while ((opt = getopt(argc, argv, optstr)) != -1) {
switch (opt) {
case 'c':
count = atoi(optarg);
if (count < 1 || count > XDPING_MAX_COUNT) {
fprintf(stderr,
"min count is 1, max count is %d\n",
XDPING_MAX_COUNT);
return 1;
}
break;
case 'I':
ifname = optarg;
ifindex = if_nametoindex(ifname);
if (!ifindex) {
fprintf(stderr, "Could not get interface %s\n",
ifname);
return 1;
}
break;
case 'N':
xdp_flags |= XDP_FLAGS_DRV_MODE;
break;
case 's':
/* use server program */
server = 1;
break;
case 'S':
xdp_flags |= XDP_FLAGS_SKB_MODE;
break;
default:
show_usage(basename(argv[0]));
return 1;
}
}
if (!ifname) {
show_usage(basename(argv[0]));
return 1;
}
if (!server && optind == argc) {
show_usage(basename(argv[0]));
return 1;
}
if ((xdp_flags & mode_flags) == mode_flags) {
fprintf(stderr, "-N or -S can be specified, not both.\n");
show_usage(basename(argv[0]));
return 1;
}
if (!server) {
/* Only supports IPv4; see hints initiailization above. */
if (getaddrinfo(argv[optind], NULL, &hints, &a) || !a) {
fprintf(stderr, "Could not resolve %s\n", argv[optind]);
return 1;
}
memcpy(&rin, a->ai_addr, sizeof(rin));
raddr = rin.sin_addr.s_addr;
freeaddrinfo(a);
}
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
snprintf(filename, sizeof(filename), "%s_kern.bpf.o", argv[0]);
if (bpf_prog_test_load(filename, BPF_PROG_TYPE_XDP, &obj, &prog_fd)) {
fprintf(stderr, "load of %s failed\n", filename);
return 1;
}
main_prog = bpf_object__find_program_by_name(obj,
server ? "xdping_server" : "xdping_client");
if (main_prog)
prog_fd = bpf_program__fd(main_prog);
if (!main_prog || prog_fd < 0) {
fprintf(stderr, "could not find xdping program");
return 1;
}
map = bpf_object__next_map(obj, NULL);
if (map)
map_fd = bpf_map__fd(map);
if (!map || map_fd < 0) {
fprintf(stderr, "Could not find ping map");
goto done;
}
signal(SIGINT, cleanup);
signal(SIGTERM, cleanup);
printf("Setting up XDP for %s, please wait...\n", ifname);
printf("XDP setup disrupts network connectivity, hit Ctrl+C to quit\n");
if (bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL) < 0) {
fprintf(stderr, "Link set xdp fd failed for %s\n", ifname);
goto done;
}
if (server) {
close(prog_fd);
close(map_fd);
printf("Running server on %s; press Ctrl+C to exit...\n",
ifname);
do { } while (1);
}
/* Start xdping-ing from last regular ping reply, e.g. for a count
* of 10 ICMP requests, we start xdping-ing using reply with seq number
* 10. The reason the last "real" ping RTT is much higher is that
* the ping program sees the ICMP reply associated with the last
* XDP-generated packet, so ping doesn't get a reply until XDP is done.
*/
pinginfo.seq = htons(count);
pinginfo.count = count;
if (bpf_map_update_elem(map_fd, &raddr, &pinginfo, BPF_ANY)) {
fprintf(stderr, "could not communicate with BPF map: %s\n",
strerror(errno));
cleanup(0);
goto done;
}
/* We need to wait for XDP setup to complete. */
sleep(10);
snprintf(cmd, sizeof(cmd), "ping -c %d -I %s %s",
count, ifname, argv[optind]);
printf("\nNormal ping RTT data\n");
printf("[Ignore final RTT; it is distorted by XDP using the reply]\n");
ret = system(cmd);
if (!ret)
ret = get_stats(map_fd, count, raddr);
cleanup(0);
done:
if (prog_fd > 0)
close(prog_fd);
if (map_fd > 0)
close(map_fd);
return ret;
}
| linux-master | tools/testing/selftests/bpf/xdping.c |
Subsets and Splits