python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <errno.h>
#include <error.h>
#include <fcntl.h>
#include <poll.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <linux/tls.h>
#include <linux/tcp.h>
#include <linux/socket.h>
#include <sys/epoll.h>
#include <sys/types.h>
#include <sys/sendfile.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include "../kselftest_harness.h"
#define TLS_PAYLOAD_MAX_LEN 16384
#define SOL_TLS 282
static int fips_enabled;
struct tls_crypto_info_keys {
union {
struct tls_crypto_info crypto_info;
struct tls12_crypto_info_aes_gcm_128 aes128;
struct tls12_crypto_info_chacha20_poly1305 chacha20;
struct tls12_crypto_info_sm4_gcm sm4gcm;
struct tls12_crypto_info_sm4_ccm sm4ccm;
struct tls12_crypto_info_aes_ccm_128 aesccm128;
struct tls12_crypto_info_aes_gcm_256 aesgcm256;
struct tls12_crypto_info_aria_gcm_128 ariagcm128;
struct tls12_crypto_info_aria_gcm_256 ariagcm256;
};
size_t len;
};
static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type,
struct tls_crypto_info_keys *tls12)
{
memset(tls12, 0, sizeof(*tls12));
switch (cipher_type) {
case TLS_CIPHER_CHACHA20_POLY1305:
tls12->len = sizeof(struct tls12_crypto_info_chacha20_poly1305);
tls12->chacha20.info.version = tls_version;
tls12->chacha20.info.cipher_type = cipher_type;
break;
case TLS_CIPHER_AES_GCM_128:
tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_128);
tls12->aes128.info.version = tls_version;
tls12->aes128.info.cipher_type = cipher_type;
break;
case TLS_CIPHER_SM4_GCM:
tls12->len = sizeof(struct tls12_crypto_info_sm4_gcm);
tls12->sm4gcm.info.version = tls_version;
tls12->sm4gcm.info.cipher_type = cipher_type;
break;
case TLS_CIPHER_SM4_CCM:
tls12->len = sizeof(struct tls12_crypto_info_sm4_ccm);
tls12->sm4ccm.info.version = tls_version;
tls12->sm4ccm.info.cipher_type = cipher_type;
break;
case TLS_CIPHER_AES_CCM_128:
tls12->len = sizeof(struct tls12_crypto_info_aes_ccm_128);
tls12->aesccm128.info.version = tls_version;
tls12->aesccm128.info.cipher_type = cipher_type;
break;
case TLS_CIPHER_AES_GCM_256:
tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_256);
tls12->aesgcm256.info.version = tls_version;
tls12->aesgcm256.info.cipher_type = cipher_type;
break;
case TLS_CIPHER_ARIA_GCM_128:
tls12->len = sizeof(struct tls12_crypto_info_aria_gcm_128);
tls12->ariagcm128.info.version = tls_version;
tls12->ariagcm128.info.cipher_type = cipher_type;
break;
case TLS_CIPHER_ARIA_GCM_256:
tls12->len = sizeof(struct tls12_crypto_info_aria_gcm_256);
tls12->ariagcm256.info.version = tls_version;
tls12->ariagcm256.info.cipher_type = cipher_type;
break;
default:
break;
}
}
static void memrnd(void *s, size_t n)
{
int *dword = s;
char *byte;
for (; n >= 4; n -= 4)
*dword++ = rand();
byte = (void *)dword;
while (n--)
*byte++ = rand();
}
static void ulp_sock_pair(struct __test_metadata *_metadata,
int *fd, int *cfd, bool *notls)
{
struct sockaddr_in addr;
socklen_t len;
int sfd, ret;
*notls = false;
len = sizeof(addr);
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_ANY);
addr.sin_port = 0;
*fd = socket(AF_INET, SOCK_STREAM, 0);
sfd = socket(AF_INET, SOCK_STREAM, 0);
ret = bind(sfd, &addr, sizeof(addr));
ASSERT_EQ(ret, 0);
ret = listen(sfd, 10);
ASSERT_EQ(ret, 0);
ret = getsockname(sfd, &addr, &len);
ASSERT_EQ(ret, 0);
ret = connect(*fd, &addr, sizeof(addr));
ASSERT_EQ(ret, 0);
*cfd = accept(sfd, &addr, &len);
ASSERT_GE(*cfd, 0);
close(sfd);
ret = setsockopt(*fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
if (ret != 0) {
ASSERT_EQ(errno, ENOENT);
*notls = true;
printf("Failure setting TCP_ULP, testing without tls\n");
return;
}
ret = setsockopt(*cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
ASSERT_EQ(ret, 0);
}
/* Produce a basic cmsg */
static int tls_send_cmsg(int fd, unsigned char record_type,
void *data, size_t len, int flags)
{
char cbuf[CMSG_SPACE(sizeof(char))];
int cmsg_len = sizeof(char);
struct cmsghdr *cmsg;
struct msghdr msg;
struct iovec vec;
vec.iov_base = data;
vec.iov_len = len;
memset(&msg, 0, sizeof(struct msghdr));
msg.msg_iov = &vec;
msg.msg_iovlen = 1;
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_TLS;
/* test sending non-record types. */
cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
cmsg->cmsg_len = CMSG_LEN(cmsg_len);
*CMSG_DATA(cmsg) = record_type;
msg.msg_controllen = cmsg->cmsg_len;
return sendmsg(fd, &msg, flags);
}
static int tls_recv_cmsg(struct __test_metadata *_metadata,
int fd, unsigned char record_type,
void *data, size_t len, int flags)
{
char cbuf[CMSG_SPACE(sizeof(char))];
struct cmsghdr *cmsg;
unsigned char ctype;
struct msghdr msg;
struct iovec vec;
int n;
vec.iov_base = data;
vec.iov_len = len;
memset(&msg, 0, sizeof(struct msghdr));
msg.msg_iov = &vec;
msg.msg_iovlen = 1;
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
n = recvmsg(fd, &msg, flags);
cmsg = CMSG_FIRSTHDR(&msg);
EXPECT_NE(cmsg, NULL);
EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
ctype = *((unsigned char *)CMSG_DATA(cmsg));
EXPECT_EQ(ctype, record_type);
return n;
}
FIXTURE(tls_basic)
{
int fd, cfd;
bool notls;
};
FIXTURE_SETUP(tls_basic)
{
ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
}
FIXTURE_TEARDOWN(tls_basic)
{
close(self->fd);
close(self->cfd);
}
/* Send some data through with ULP but no keys */
TEST_F(tls_basic, base_base)
{
char const *test_str = "test_read";
int send_len = 10;
char buf[10];
ASSERT_EQ(strlen(test_str) + 1, send_len);
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
};
TEST_F(tls_basic, bad_cipher)
{
struct tls_crypto_info_keys tls12;
tls12.crypto_info.version = 200;
tls12.crypto_info.cipher_type = TLS_CIPHER_AES_GCM_128;
EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, sizeof(struct tls12_crypto_info_aes_gcm_128)), -1);
tls12.crypto_info.version = TLS_1_2_VERSION;
tls12.crypto_info.cipher_type = 50;
EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, sizeof(struct tls12_crypto_info_aes_gcm_128)), -1);
tls12.crypto_info.version = TLS_1_2_VERSION;
tls12.crypto_info.cipher_type = 59;
EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, sizeof(struct tls12_crypto_info_aes_gcm_128)), -1);
tls12.crypto_info.version = TLS_1_2_VERSION;
tls12.crypto_info.cipher_type = 10;
EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, sizeof(struct tls12_crypto_info_aes_gcm_128)), -1);
tls12.crypto_info.version = TLS_1_2_VERSION;
tls12.crypto_info.cipher_type = 70;
EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, sizeof(struct tls12_crypto_info_aes_gcm_128)), -1);
}
FIXTURE(tls)
{
int fd, cfd;
bool notls;
};
FIXTURE_VARIANT(tls)
{
uint16_t tls_version;
uint16_t cipher_type;
bool nopad, fips_non_compliant;
};
FIXTURE_VARIANT_ADD(tls, 12_aes_gcm)
{
.tls_version = TLS_1_2_VERSION,
.cipher_type = TLS_CIPHER_AES_GCM_128,
};
FIXTURE_VARIANT_ADD(tls, 13_aes_gcm)
{
.tls_version = TLS_1_3_VERSION,
.cipher_type = TLS_CIPHER_AES_GCM_128,
};
FIXTURE_VARIANT_ADD(tls, 12_chacha)
{
.tls_version = TLS_1_2_VERSION,
.cipher_type = TLS_CIPHER_CHACHA20_POLY1305,
.fips_non_compliant = true,
};
FIXTURE_VARIANT_ADD(tls, 13_chacha)
{
.tls_version = TLS_1_3_VERSION,
.cipher_type = TLS_CIPHER_CHACHA20_POLY1305,
.fips_non_compliant = true,
};
FIXTURE_VARIANT_ADD(tls, 13_sm4_gcm)
{
.tls_version = TLS_1_3_VERSION,
.cipher_type = TLS_CIPHER_SM4_GCM,
.fips_non_compliant = true,
};
FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm)
{
.tls_version = TLS_1_3_VERSION,
.cipher_type = TLS_CIPHER_SM4_CCM,
.fips_non_compliant = true,
};
FIXTURE_VARIANT_ADD(tls, 12_aes_ccm)
{
.tls_version = TLS_1_2_VERSION,
.cipher_type = TLS_CIPHER_AES_CCM_128,
};
FIXTURE_VARIANT_ADD(tls, 13_aes_ccm)
{
.tls_version = TLS_1_3_VERSION,
.cipher_type = TLS_CIPHER_AES_CCM_128,
};
FIXTURE_VARIANT_ADD(tls, 12_aes_gcm_256)
{
.tls_version = TLS_1_2_VERSION,
.cipher_type = TLS_CIPHER_AES_GCM_256,
};
FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256)
{
.tls_version = TLS_1_3_VERSION,
.cipher_type = TLS_CIPHER_AES_GCM_256,
};
FIXTURE_VARIANT_ADD(tls, 13_nopad)
{
.tls_version = TLS_1_3_VERSION,
.cipher_type = TLS_CIPHER_AES_GCM_128,
.nopad = true,
};
FIXTURE_VARIANT_ADD(tls, 12_aria_gcm)
{
.tls_version = TLS_1_2_VERSION,
.cipher_type = TLS_CIPHER_ARIA_GCM_128,
};
FIXTURE_VARIANT_ADD(tls, 12_aria_gcm_256)
{
.tls_version = TLS_1_2_VERSION,
.cipher_type = TLS_CIPHER_ARIA_GCM_256,
};
FIXTURE_SETUP(tls)
{
struct tls_crypto_info_keys tls12;
int one = 1;
int ret;
if (fips_enabled && variant->fips_non_compliant)
SKIP(return, "Unsupported cipher in FIPS mode");
tls_crypto_info_init(variant->tls_version, variant->cipher_type,
&tls12);
ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
if (self->notls)
return;
ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
if (variant->nopad) {
ret = setsockopt(self->cfd, SOL_TLS, TLS_RX_EXPECT_NO_PAD,
(void *)&one, sizeof(one));
ASSERT_EQ(ret, 0);
}
}
FIXTURE_TEARDOWN(tls)
{
close(self->fd);
close(self->cfd);
}
TEST_F(tls, sendfile)
{
int filefd = open("/proc/self/exe", O_RDONLY);
struct stat st;
EXPECT_GE(filefd, 0);
fstat(filefd, &st);
EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0);
}
TEST_F(tls, send_then_sendfile)
{
int filefd = open("/proc/self/exe", O_RDONLY);
char const *test_str = "test_send";
int to_send = strlen(test_str) + 1;
char recv_buf[10];
struct stat st;
char *buf;
EXPECT_GE(filefd, 0);
fstat(filefd, &st);
buf = (char *)malloc(st.st_size);
EXPECT_EQ(send(self->fd, test_str, to_send, 0), to_send);
EXPECT_EQ(recv(self->cfd, recv_buf, to_send, MSG_WAITALL), to_send);
EXPECT_EQ(memcmp(test_str, recv_buf, to_send), 0);
EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0);
EXPECT_EQ(recv(self->cfd, buf, st.st_size, MSG_WAITALL), st.st_size);
}
static void chunked_sendfile(struct __test_metadata *_metadata,
struct _test_data_tls *self,
uint16_t chunk_size,
uint16_t extra_payload_size)
{
char buf[TLS_PAYLOAD_MAX_LEN];
uint16_t test_payload_size;
int size = 0;
int ret;
char filename[] = "/tmp/mytemp.XXXXXX";
int fd = mkstemp(filename);
off_t offset = 0;
unlink(filename);
ASSERT_GE(fd, 0);
EXPECT_GE(chunk_size, 1);
test_payload_size = chunk_size + extra_payload_size;
ASSERT_GE(TLS_PAYLOAD_MAX_LEN, test_payload_size);
memset(buf, 1, test_payload_size);
size = write(fd, buf, test_payload_size);
EXPECT_EQ(size, test_payload_size);
fsync(fd);
while (size > 0) {
ret = sendfile(self->fd, fd, &offset, chunk_size);
EXPECT_GE(ret, 0);
size -= ret;
}
EXPECT_EQ(recv(self->cfd, buf, test_payload_size, MSG_WAITALL),
test_payload_size);
close(fd);
}
TEST_F(tls, multi_chunk_sendfile)
{
chunked_sendfile(_metadata, self, 4096, 4096);
chunked_sendfile(_metadata, self, 4096, 0);
chunked_sendfile(_metadata, self, 4096, 1);
chunked_sendfile(_metadata, self, 4096, 2048);
chunked_sendfile(_metadata, self, 8192, 2048);
chunked_sendfile(_metadata, self, 4096, 8192);
chunked_sendfile(_metadata, self, 8192, 4096);
chunked_sendfile(_metadata, self, 12288, 1024);
chunked_sendfile(_metadata, self, 12288, 2000);
chunked_sendfile(_metadata, self, 15360, 100);
chunked_sendfile(_metadata, self, 15360, 300);
chunked_sendfile(_metadata, self, 1, 4096);
chunked_sendfile(_metadata, self, 2048, 4096);
chunked_sendfile(_metadata, self, 2048, 8192);
chunked_sendfile(_metadata, self, 4096, 8192);
chunked_sendfile(_metadata, self, 1024, 12288);
chunked_sendfile(_metadata, self, 2000, 12288);
chunked_sendfile(_metadata, self, 100, 15360);
chunked_sendfile(_metadata, self, 300, 15360);
}
TEST_F(tls, recv_max)
{
unsigned int send_len = TLS_PAYLOAD_MAX_LEN;
char recv_mem[TLS_PAYLOAD_MAX_LEN];
char buf[TLS_PAYLOAD_MAX_LEN];
memrnd(buf, sizeof(buf));
EXPECT_GE(send(self->fd, buf, send_len, 0), 0);
EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1);
EXPECT_EQ(memcmp(buf, recv_mem, send_len), 0);
}
TEST_F(tls, recv_small)
{
char const *test_str = "test_read";
int send_len = 10;
char buf[10];
send_len = strlen(test_str) + 1;
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
TEST_F(tls, msg_more)
{
char const *test_str = "test_read";
int send_len = 10;
char buf[10 * 2];
EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len);
EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_DONTWAIT), -1);
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
EXPECT_EQ(recv(self->cfd, buf, send_len * 2, MSG_WAITALL),
send_len * 2);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
TEST_F(tls, msg_more_unsent)
{
char const *test_str = "test_read";
int send_len = 10;
char buf[10];
EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len);
EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_DONTWAIT), -1);
}
TEST_F(tls, msg_eor)
{
char const *test_str = "test_read";
int send_len = 10;
char buf[10];
EXPECT_EQ(send(self->fd, test_str, send_len, MSG_EOR), send_len);
EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_WAITALL), send_len);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
TEST_F(tls, sendmsg_single)
{
struct msghdr msg;
char const *test_str = "test_sendmsg";
size_t send_len = 13;
struct iovec vec;
char buf[13];
vec.iov_base = (char *)test_str;
vec.iov_len = send_len;
memset(&msg, 0, sizeof(struct msghdr));
msg.msg_iov = &vec;
msg.msg_iovlen = 1;
EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_WAITALL), send_len);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
#define MAX_FRAGS 64
#define SEND_LEN 13
TEST_F(tls, sendmsg_fragmented)
{
char const *test_str = "test_sendmsg";
char buf[SEND_LEN * MAX_FRAGS];
struct iovec vec[MAX_FRAGS];
struct msghdr msg;
int i, frags;
for (frags = 1; frags <= MAX_FRAGS; frags++) {
for (i = 0; i < frags; i++) {
vec[i].iov_base = (char *)test_str;
vec[i].iov_len = SEND_LEN;
}
memset(&msg, 0, sizeof(struct msghdr));
msg.msg_iov = vec;
msg.msg_iovlen = frags;
EXPECT_EQ(sendmsg(self->fd, &msg, 0), SEND_LEN * frags);
EXPECT_EQ(recv(self->cfd, buf, SEND_LEN * frags, MSG_WAITALL),
SEND_LEN * frags);
for (i = 0; i < frags; i++)
EXPECT_EQ(memcmp(buf + SEND_LEN * i,
test_str, SEND_LEN), 0);
}
}
#undef MAX_FRAGS
#undef SEND_LEN
TEST_F(tls, sendmsg_large)
{
void *mem = malloc(16384);
size_t send_len = 16384;
size_t sends = 128;
struct msghdr msg;
size_t recvs = 0;
size_t sent = 0;
memset(&msg, 0, sizeof(struct msghdr));
while (sent++ < sends) {
struct iovec vec = { (void *)mem, send_len };
msg.msg_iov = &vec;
msg.msg_iovlen = 1;
EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
}
while (recvs++ < sends) {
EXPECT_NE(recv(self->cfd, mem, send_len, 0), -1);
}
free(mem);
}
TEST_F(tls, sendmsg_multiple)
{
char const *test_str = "test_sendmsg_multiple";
struct iovec vec[5];
char *test_strs[5];
struct msghdr msg;
int total_len = 0;
int len_cmp = 0;
int iov_len = 5;
char *buf;
int i;
memset(&msg, 0, sizeof(struct msghdr));
for (i = 0; i < iov_len; i++) {
test_strs[i] = (char *)malloc(strlen(test_str) + 1);
snprintf(test_strs[i], strlen(test_str) + 1, "%s", test_str);
vec[i].iov_base = (void *)test_strs[i];
vec[i].iov_len = strlen(test_strs[i]) + 1;
total_len += vec[i].iov_len;
}
msg.msg_iov = vec;
msg.msg_iovlen = iov_len;
EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len);
buf = malloc(total_len);
EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1);
for (i = 0; i < iov_len; i++) {
EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp,
strlen(test_strs[i])),
0);
len_cmp += strlen(buf + len_cmp) + 1;
}
for (i = 0; i < iov_len; i++)
free(test_strs[i]);
free(buf);
}
TEST_F(tls, sendmsg_multiple_stress)
{
char const *test_str = "abcdefghijklmno";
struct iovec vec[1024];
char *test_strs[1024];
int iov_len = 1024;
int total_len = 0;
char buf[1 << 14];
struct msghdr msg;
int len_cmp = 0;
int i;
memset(&msg, 0, sizeof(struct msghdr));
for (i = 0; i < iov_len; i++) {
test_strs[i] = (char *)malloc(strlen(test_str) + 1);
snprintf(test_strs[i], strlen(test_str) + 1, "%s", test_str);
vec[i].iov_base = (void *)test_strs[i];
vec[i].iov_len = strlen(test_strs[i]) + 1;
total_len += vec[i].iov_len;
}
msg.msg_iov = vec;
msg.msg_iovlen = iov_len;
EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len);
EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1);
for (i = 0; i < iov_len; i++)
len_cmp += strlen(buf + len_cmp) + 1;
for (i = 0; i < iov_len; i++)
free(test_strs[i]);
}
TEST_F(tls, splice_from_pipe)
{
int send_len = TLS_PAYLOAD_MAX_LEN;
char mem_send[TLS_PAYLOAD_MAX_LEN];
char mem_recv[TLS_PAYLOAD_MAX_LEN];
int p[2];
ASSERT_GE(pipe(p), 0);
EXPECT_GE(write(p[1], mem_send, send_len), 0);
EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), 0);
EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
TEST_F(tls, splice_from_pipe2)
{
int send_len = 16000;
char mem_send[16000];
char mem_recv[16000];
int p2[2];
int p[2];
memrnd(mem_send, sizeof(mem_send));
ASSERT_GE(pipe(p), 0);
ASSERT_GE(pipe(p2), 0);
EXPECT_EQ(write(p[1], mem_send, 8000), 8000);
EXPECT_EQ(splice(p[0], NULL, self->fd, NULL, 8000, 0), 8000);
EXPECT_EQ(write(p2[1], mem_send + 8000, 8000), 8000);
EXPECT_EQ(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 8000);
EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
TEST_F(tls, send_and_splice)
{
int send_len = TLS_PAYLOAD_MAX_LEN;
char mem_send[TLS_PAYLOAD_MAX_LEN];
char mem_recv[TLS_PAYLOAD_MAX_LEN];
char const *test_str = "test_read";
int send_len2 = 10;
char buf[10];
int p[2];
ASSERT_GE(pipe(p), 0);
EXPECT_EQ(send(self->fd, test_str, send_len2, 0), send_len2);
EXPECT_EQ(recv(self->cfd, buf, send_len2, MSG_WAITALL), send_len2);
EXPECT_EQ(memcmp(test_str, buf, send_len2), 0);
EXPECT_GE(write(p[1], mem_send, send_len), send_len);
EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), send_len);
EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
TEST_F(tls, splice_to_pipe)
{
int send_len = TLS_PAYLOAD_MAX_LEN;
char mem_send[TLS_PAYLOAD_MAX_LEN];
char mem_recv[TLS_PAYLOAD_MAX_LEN];
int p[2];
memrnd(mem_send, sizeof(mem_send));
ASSERT_GE(pipe(p), 0);
EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len);
EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), send_len);
EXPECT_EQ(read(p[0], mem_recv, send_len), send_len);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
TEST_F(tls, splice_cmsg_to_pipe)
{
char *test_str = "test_read";
char record_type = 100;
int send_len = 10;
char buf[10];
int p[2];
if (self->notls)
SKIP(return, "no TLS support");
ASSERT_GE(pipe(p), 0);
EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1);
EXPECT_EQ(errno, EINVAL);
EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
EXPECT_EQ(errno, EIO);
EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
buf, sizeof(buf), MSG_WAITALL),
send_len);
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
}
TEST_F(tls, splice_dec_cmsg_to_pipe)
{
char *test_str = "test_read";
char record_type = 100;
int send_len = 10;
char buf[10];
int p[2];
if (self->notls)
SKIP(return, "no TLS support");
ASSERT_GE(pipe(p), 0);
EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
EXPECT_EQ(errno, EIO);
EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1);
EXPECT_EQ(errno, EINVAL);
EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
buf, sizeof(buf), MSG_WAITALL),
send_len);
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
}
TEST_F(tls, recv_and_splice)
{
int send_len = TLS_PAYLOAD_MAX_LEN;
char mem_send[TLS_PAYLOAD_MAX_LEN];
char mem_recv[TLS_PAYLOAD_MAX_LEN];
int half = send_len / 2;
int p[2];
ASSERT_GE(pipe(p), 0);
EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len);
/* Recv hald of the record, splice the other half */
EXPECT_EQ(recv(self->cfd, mem_recv, half, MSG_WAITALL), half);
EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, half, SPLICE_F_NONBLOCK),
half);
EXPECT_EQ(read(p[0], &mem_recv[half], half), half);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
TEST_F(tls, peek_and_splice)
{
int send_len = TLS_PAYLOAD_MAX_LEN;
char mem_send[TLS_PAYLOAD_MAX_LEN];
char mem_recv[TLS_PAYLOAD_MAX_LEN];
int chunk = TLS_PAYLOAD_MAX_LEN / 4;
int n, i, p[2];
memrnd(mem_send, sizeof(mem_send));
ASSERT_GE(pipe(p), 0);
for (i = 0; i < 4; i++)
EXPECT_EQ(send(self->fd, &mem_send[chunk * i], chunk, 0),
chunk);
EXPECT_EQ(recv(self->cfd, mem_recv, chunk * 5 / 2,
MSG_WAITALL | MSG_PEEK),
chunk * 5 / 2);
EXPECT_EQ(memcmp(mem_send, mem_recv, chunk * 5 / 2), 0);
n = 0;
while (n < send_len) {
i = splice(self->cfd, NULL, p[1], NULL, send_len - n, 0);
EXPECT_GT(i, 0);
n += i;
}
EXPECT_EQ(n, send_len);
EXPECT_EQ(read(p[0], mem_recv, send_len), send_len);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
TEST_F(tls, recvmsg_single)
{
char const *test_str = "test_recvmsg_single";
int send_len = strlen(test_str) + 1;
char buf[20];
struct msghdr hdr;
struct iovec vec;
memset(&hdr, 0, sizeof(hdr));
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
vec.iov_base = (char *)buf;
vec.iov_len = send_len;
hdr.msg_iovlen = 1;
hdr.msg_iov = &vec;
EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
}
TEST_F(tls, recvmsg_single_max)
{
int send_len = TLS_PAYLOAD_MAX_LEN;
char send_mem[TLS_PAYLOAD_MAX_LEN];
char recv_mem[TLS_PAYLOAD_MAX_LEN];
struct iovec vec;
struct msghdr hdr;
memrnd(send_mem, sizeof(send_mem));
EXPECT_EQ(send(self->fd, send_mem, send_len, 0), send_len);
vec.iov_base = (char *)recv_mem;
vec.iov_len = TLS_PAYLOAD_MAX_LEN;
hdr.msg_iovlen = 1;
hdr.msg_iov = &vec;
EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0);
}
TEST_F(tls, recvmsg_multiple)
{
unsigned int msg_iovlen = 1024;
struct iovec vec[1024];
char *iov_base[1024];
unsigned int iov_len = 16;
int send_len = 1 << 14;
char buf[1 << 14];
struct msghdr hdr;
int i;
memrnd(buf, sizeof(buf));
EXPECT_EQ(send(self->fd, buf, send_len, 0), send_len);
for (i = 0; i < msg_iovlen; i++) {
iov_base[i] = (char *)malloc(iov_len);
vec[i].iov_base = iov_base[i];
vec[i].iov_len = iov_len;
}
hdr.msg_iovlen = msg_iovlen;
hdr.msg_iov = vec;
EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
for (i = 0; i < msg_iovlen; i++)
free(iov_base[i]);
}
TEST_F(tls, single_send_multiple_recv)
{
unsigned int total_len = TLS_PAYLOAD_MAX_LEN * 2;
unsigned int send_len = TLS_PAYLOAD_MAX_LEN;
char send_mem[TLS_PAYLOAD_MAX_LEN * 2];
char recv_mem[TLS_PAYLOAD_MAX_LEN * 2];
memrnd(send_mem, sizeof(send_mem));
EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
memset(recv_mem, 0, total_len);
EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1);
EXPECT_NE(recv(self->cfd, recv_mem + send_len, send_len, 0), -1);
EXPECT_EQ(memcmp(send_mem, recv_mem, total_len), 0);
}
TEST_F(tls, multiple_send_single_recv)
{
unsigned int total_len = 2 * 10;
unsigned int send_len = 10;
char recv_mem[2 * 10];
char send_mem[10];
memrnd(send_mem, sizeof(send_mem));
EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
memset(recv_mem, 0, total_len);
EXPECT_EQ(recv(self->cfd, recv_mem, total_len, MSG_WAITALL), total_len);
EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0);
EXPECT_EQ(memcmp(send_mem, recv_mem + send_len, send_len), 0);
}
TEST_F(tls, single_send_multiple_recv_non_align)
{
const unsigned int total_len = 15;
const unsigned int recv_len = 10;
char recv_mem[recv_len * 2];
char send_mem[total_len];
memrnd(send_mem, sizeof(send_mem));
EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
memset(recv_mem, 0, total_len);
EXPECT_EQ(recv(self->cfd, recv_mem, recv_len, 0), recv_len);
EXPECT_EQ(recv(self->cfd, recv_mem + recv_len, recv_len, 0), 5);
EXPECT_EQ(memcmp(send_mem, recv_mem, total_len), 0);
}
TEST_F(tls, recv_partial)
{
char const *test_str = "test_read_partial";
char const *test_str_first = "test_read";
char const *test_str_second = "_partial";
int send_len = strlen(test_str) + 1;
char recv_mem[18];
memset(recv_mem, 0, sizeof(recv_mem));
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first),
MSG_WAITALL), -1);
EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
memset(recv_mem, 0, sizeof(recv_mem));
EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second),
MSG_WAITALL), -1);
EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
0);
}
TEST_F(tls, recv_nonblock)
{
char buf[4096];
bool err;
EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_DONTWAIT), -1);
err = (errno == EAGAIN || errno == EWOULDBLOCK);
EXPECT_EQ(err, true);
}
TEST_F(tls, recv_peek)
{
char const *test_str = "test_read_peek";
int send_len = strlen(test_str) + 1;
char buf[15];
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_PEEK), send_len);
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
memset(buf, 0, sizeof(buf));
EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
}
TEST_F(tls, recv_peek_multiple)
{
char const *test_str = "test_read_peek";
int send_len = strlen(test_str) + 1;
unsigned int num_peeks = 100;
char buf[15];
int i;
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
for (i = 0; i < num_peeks; i++) {
EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1);
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
memset(buf, 0, sizeof(buf));
}
EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
}
TEST_F(tls, recv_peek_multiple_records)
{
char const *test_str = "test_read_peek_mult_recs";
char const *test_str_first = "test_read_peek";
char const *test_str_second = "_mult_recs";
int len;
char buf[64];
len = strlen(test_str_first);
EXPECT_EQ(send(self->fd, test_str_first, len, 0), len);
len = strlen(test_str_second) + 1;
EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
len = strlen(test_str_first);
memset(buf, 0, len);
EXPECT_EQ(recv(self->cfd, buf, len, MSG_PEEK | MSG_WAITALL), len);
/* MSG_PEEK can only peek into the current record. */
len = strlen(test_str_first);
EXPECT_EQ(memcmp(test_str_first, buf, len), 0);
len = strlen(test_str) + 1;
memset(buf, 0, len);
EXPECT_EQ(recv(self->cfd, buf, len, MSG_WAITALL), len);
/* Non-MSG_PEEK will advance strparser (and therefore record)
* however.
*/
len = strlen(test_str) + 1;
EXPECT_EQ(memcmp(test_str, buf, len), 0);
/* MSG_MORE will hold current record open, so later MSG_PEEK
* will see everything.
*/
len = strlen(test_str_first);
EXPECT_EQ(send(self->fd, test_str_first, len, MSG_MORE), len);
len = strlen(test_str_second) + 1;
EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
len = strlen(test_str) + 1;
memset(buf, 0, len);
EXPECT_EQ(recv(self->cfd, buf, len, MSG_PEEK | MSG_WAITALL), len);
len = strlen(test_str) + 1;
EXPECT_EQ(memcmp(test_str, buf, len), 0);
}
TEST_F(tls, recv_peek_large_buf_mult_recs)
{
char const *test_str = "test_read_peek_mult_recs";
char const *test_str_first = "test_read_peek";
char const *test_str_second = "_mult_recs";
int len;
char buf[64];
len = strlen(test_str_first);
EXPECT_EQ(send(self->fd, test_str_first, len, 0), len);
len = strlen(test_str_second) + 1;
EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
len = strlen(test_str) + 1;
memset(buf, 0, len);
EXPECT_NE((len = recv(self->cfd, buf, len,
MSG_PEEK | MSG_WAITALL)), -1);
len = strlen(test_str) + 1;
EXPECT_EQ(memcmp(test_str, buf, len), 0);
}
TEST_F(tls, recv_lowat)
{
char send_mem[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
char recv_mem[20];
int lowat = 8;
EXPECT_EQ(send(self->fd, send_mem, 10, 0), 10);
EXPECT_EQ(send(self->fd, send_mem, 5, 0), 5);
memset(recv_mem, 0, 20);
EXPECT_EQ(setsockopt(self->cfd, SOL_SOCKET, SO_RCVLOWAT,
&lowat, sizeof(lowat)), 0);
EXPECT_EQ(recv(self->cfd, recv_mem, 1, MSG_WAITALL), 1);
EXPECT_EQ(recv(self->cfd, recv_mem + 1, 6, MSG_WAITALL), 6);
EXPECT_EQ(recv(self->cfd, recv_mem + 7, 10, 0), 8);
EXPECT_EQ(memcmp(send_mem, recv_mem, 10), 0);
EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0);
}
TEST_F(tls, bidir)
{
char const *test_str = "test_read";
int send_len = 10;
char buf[10];
int ret;
if (!self->notls) {
struct tls_crypto_info_keys tls12;
tls_crypto_info_init(variant->tls_version, variant->cipher_type,
&tls12);
ret = setsockopt(self->fd, SOL_TLS, TLS_RX, &tls12,
tls12.len);
ASSERT_EQ(ret, 0);
ret = setsockopt(self->cfd, SOL_TLS, TLS_TX, &tls12,
tls12.len);
ASSERT_EQ(ret, 0);
}
ASSERT_EQ(strlen(test_str) + 1, send_len);
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
memset(buf, 0, sizeof(buf));
EXPECT_EQ(send(self->cfd, test_str, send_len, 0), send_len);
EXPECT_NE(recv(self->fd, buf, send_len, 0), -1);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
};
TEST_F(tls, pollin)
{
char const *test_str = "test_poll";
struct pollfd fd = { 0, 0, 0 };
char buf[10];
int send_len = 10;
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
fd.fd = self->cfd;
fd.events = POLLIN;
EXPECT_EQ(poll(&fd, 1, 20), 1);
EXPECT_EQ(fd.revents & POLLIN, 1);
EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_WAITALL), send_len);
/* Test timing out */
EXPECT_EQ(poll(&fd, 1, 20), 0);
}
TEST_F(tls, poll_wait)
{
char const *test_str = "test_poll_wait";
int send_len = strlen(test_str) + 1;
struct pollfd fd = { 0, 0, 0 };
char recv_mem[15];
fd.fd = self->cfd;
fd.events = POLLIN;
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
/* Set timeout to inf. secs */
EXPECT_EQ(poll(&fd, 1, -1), 1);
EXPECT_EQ(fd.revents & POLLIN, 1);
EXPECT_EQ(recv(self->cfd, recv_mem, send_len, MSG_WAITALL), send_len);
}
TEST_F(tls, poll_wait_split)
{
struct pollfd fd = { 0, 0, 0 };
char send_mem[20] = {};
char recv_mem[15];
fd.fd = self->cfd;
fd.events = POLLIN;
/* Send 20 bytes */
EXPECT_EQ(send(self->fd, send_mem, sizeof(send_mem), 0),
sizeof(send_mem));
/* Poll with inf. timeout */
EXPECT_EQ(poll(&fd, 1, -1), 1);
EXPECT_EQ(fd.revents & POLLIN, 1);
EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), MSG_WAITALL),
sizeof(recv_mem));
/* Now the remaining 5 bytes of record data are in TLS ULP */
fd.fd = self->cfd;
fd.events = POLLIN;
EXPECT_EQ(poll(&fd, 1, -1), 1);
EXPECT_EQ(fd.revents & POLLIN, 1);
EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0),
sizeof(send_mem) - sizeof(recv_mem));
}
TEST_F(tls, blocking)
{
size_t data = 100000;
int res = fork();
EXPECT_NE(res, -1);
if (res) {
/* parent */
size_t left = data;
char buf[16384];
int status;
int pid2;
while (left) {
int res = send(self->fd, buf,
left > 16384 ? 16384 : left, 0);
EXPECT_GE(res, 0);
left -= res;
}
pid2 = wait(&status);
EXPECT_EQ(status, 0);
EXPECT_EQ(res, pid2);
} else {
/* child */
size_t left = data;
char buf[16384];
while (left) {
int res = recv(self->cfd, buf,
left > 16384 ? 16384 : left, 0);
EXPECT_GE(res, 0);
left -= res;
}
}
}
TEST_F(tls, nonblocking)
{
size_t data = 100000;
int sendbuf = 100;
int flags;
int res;
flags = fcntl(self->fd, F_GETFL, 0);
fcntl(self->fd, F_SETFL, flags | O_NONBLOCK);
fcntl(self->cfd, F_SETFL, flags | O_NONBLOCK);
/* Ensure nonblocking behavior by imposing a small send
* buffer.
*/
EXPECT_EQ(setsockopt(self->fd, SOL_SOCKET, SO_SNDBUF,
&sendbuf, sizeof(sendbuf)), 0);
res = fork();
EXPECT_NE(res, -1);
if (res) {
/* parent */
bool eagain = false;
size_t left = data;
char buf[16384];
int status;
int pid2;
while (left) {
int res = send(self->fd, buf,
left > 16384 ? 16384 : left, 0);
if (res == -1 && errno == EAGAIN) {
eagain = true;
usleep(10000);
continue;
}
EXPECT_GE(res, 0);
left -= res;
}
EXPECT_TRUE(eagain);
pid2 = wait(&status);
EXPECT_EQ(status, 0);
EXPECT_EQ(res, pid2);
} else {
/* child */
bool eagain = false;
size_t left = data;
char buf[16384];
while (left) {
int res = recv(self->cfd, buf,
left > 16384 ? 16384 : left, 0);
if (res == -1 && errno == EAGAIN) {
eagain = true;
usleep(10000);
continue;
}
EXPECT_GE(res, 0);
left -= res;
}
EXPECT_TRUE(eagain);
}
}
static void
test_mutliproc(struct __test_metadata *_metadata, struct _test_data_tls *self,
bool sendpg, unsigned int n_readers, unsigned int n_writers)
{
const unsigned int n_children = n_readers + n_writers;
const size_t data = 6 * 1000 * 1000;
const size_t file_sz = data / 100;
size_t read_bias, write_bias;
int i, fd, child_id;
char buf[file_sz];
pid_t pid;
/* Only allow multiples for simplicity */
ASSERT_EQ(!(n_readers % n_writers) || !(n_writers % n_readers), true);
read_bias = n_writers / n_readers ?: 1;
write_bias = n_readers / n_writers ?: 1;
/* prep a file to send */
fd = open("/tmp/", O_TMPFILE | O_RDWR, 0600);
ASSERT_GE(fd, 0);
memset(buf, 0xac, file_sz);
ASSERT_EQ(write(fd, buf, file_sz), file_sz);
/* spawn children */
for (child_id = 0; child_id < n_children; child_id++) {
pid = fork();
ASSERT_NE(pid, -1);
if (!pid)
break;
}
/* parent waits for all children */
if (pid) {
for (i = 0; i < n_children; i++) {
int status;
wait(&status);
EXPECT_EQ(status, 0);
}
return;
}
/* Split threads for reading and writing */
if (child_id < n_readers) {
size_t left = data * read_bias;
char rb[8001];
while (left) {
int res;
res = recv(self->cfd, rb,
left > sizeof(rb) ? sizeof(rb) : left, 0);
EXPECT_GE(res, 0);
left -= res;
}
} else {
size_t left = data * write_bias;
while (left) {
int res;
ASSERT_EQ(lseek(fd, 0, SEEK_SET), 0);
if (sendpg)
res = sendfile(self->fd, fd, NULL,
left > file_sz ? file_sz : left);
else
res = send(self->fd, buf,
left > file_sz ? file_sz : left, 0);
EXPECT_GE(res, 0);
left -= res;
}
}
}
TEST_F(tls, mutliproc_even)
{
test_mutliproc(_metadata, self, false, 6, 6);
}
TEST_F(tls, mutliproc_readers)
{
test_mutliproc(_metadata, self, false, 4, 12);
}
TEST_F(tls, mutliproc_writers)
{
test_mutliproc(_metadata, self, false, 10, 2);
}
TEST_F(tls, mutliproc_sendpage_even)
{
test_mutliproc(_metadata, self, true, 6, 6);
}
TEST_F(tls, mutliproc_sendpage_readers)
{
test_mutliproc(_metadata, self, true, 4, 12);
}
TEST_F(tls, mutliproc_sendpage_writers)
{
test_mutliproc(_metadata, self, true, 10, 2);
}
TEST_F(tls, control_msg)
{
char *test_str = "test_read";
char record_type = 100;
int send_len = 10;
char buf[10];
if (self->notls)
SKIP(return, "no TLS support");
EXPECT_EQ(tls_send_cmsg(self->fd, record_type, test_str, send_len, 0),
send_len);
/* Should fail because we didn't provide a control message */
EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
buf, sizeof(buf), MSG_WAITALL | MSG_PEEK),
send_len);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
/* Recv the message again without MSG_PEEK */
memset(buf, 0, sizeof(buf));
EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
buf, sizeof(buf), MSG_WAITALL),
send_len);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
TEST_F(tls, shutdown)
{
char const *test_str = "test_read";
int send_len = 10;
char buf[10];
ASSERT_EQ(strlen(test_str) + 1, send_len);
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
shutdown(self->fd, SHUT_RDWR);
shutdown(self->cfd, SHUT_RDWR);
}
TEST_F(tls, shutdown_unsent)
{
char const *test_str = "test_read";
int send_len = 10;
EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len);
shutdown(self->fd, SHUT_RDWR);
shutdown(self->cfd, SHUT_RDWR);
}
TEST_F(tls, shutdown_reuse)
{
struct sockaddr_in addr;
int ret;
shutdown(self->fd, SHUT_RDWR);
shutdown(self->cfd, SHUT_RDWR);
close(self->cfd);
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_ANY);
addr.sin_port = 0;
ret = bind(self->fd, &addr, sizeof(addr));
EXPECT_EQ(ret, 0);
ret = listen(self->fd, 10);
EXPECT_EQ(ret, -1);
EXPECT_EQ(errno, EINVAL);
ret = connect(self->fd, &addr, sizeof(addr));
EXPECT_EQ(ret, -1);
EXPECT_EQ(errno, EISCONN);
}
TEST_F(tls, getsockopt)
{
struct tls_crypto_info_keys expect, get;
socklen_t len;
/* get only the version/cipher */
len = sizeof(struct tls_crypto_info);
memrnd(&get, sizeof(get));
EXPECT_EQ(getsockopt(self->fd, SOL_TLS, TLS_TX, &get, &len), 0);
EXPECT_EQ(len, sizeof(struct tls_crypto_info));
EXPECT_EQ(get.crypto_info.version, variant->tls_version);
EXPECT_EQ(get.crypto_info.cipher_type, variant->cipher_type);
/* get the full crypto_info */
tls_crypto_info_init(variant->tls_version, variant->cipher_type, &expect);
len = expect.len;
memrnd(&get, sizeof(get));
EXPECT_EQ(getsockopt(self->fd, SOL_TLS, TLS_TX, &get, &len), 0);
EXPECT_EQ(len, expect.len);
EXPECT_EQ(get.crypto_info.version, variant->tls_version);
EXPECT_EQ(get.crypto_info.cipher_type, variant->cipher_type);
EXPECT_EQ(memcmp(&get, &expect, expect.len), 0);
/* short get should fail */
len = sizeof(struct tls_crypto_info) - 1;
EXPECT_EQ(getsockopt(self->fd, SOL_TLS, TLS_TX, &get, &len), -1);
EXPECT_EQ(errno, EINVAL);
/* partial get of the cipher data should fail */
len = expect.len - 1;
EXPECT_EQ(getsockopt(self->fd, SOL_TLS, TLS_TX, &get, &len), -1);
EXPECT_EQ(errno, EINVAL);
}
FIXTURE(tls_err)
{
int fd, cfd;
int fd2, cfd2;
bool notls;
};
FIXTURE_VARIANT(tls_err)
{
uint16_t tls_version;
};
FIXTURE_VARIANT_ADD(tls_err, 12_aes_gcm)
{
.tls_version = TLS_1_2_VERSION,
};
FIXTURE_VARIANT_ADD(tls_err, 13_aes_gcm)
{
.tls_version = TLS_1_3_VERSION,
};
FIXTURE_SETUP(tls_err)
{
struct tls_crypto_info_keys tls12;
int ret;
tls_crypto_info_init(variant->tls_version, TLS_CIPHER_AES_GCM_128,
&tls12);
ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
ulp_sock_pair(_metadata, &self->fd2, &self->cfd2, &self->notls);
if (self->notls)
return;
ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
ret = setsockopt(self->cfd2, SOL_TLS, TLS_RX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
}
FIXTURE_TEARDOWN(tls_err)
{
close(self->fd);
close(self->cfd);
close(self->fd2);
close(self->cfd2);
}
TEST_F(tls_err, bad_rec)
{
char buf[64];
if (self->notls)
SKIP(return, "no TLS support");
memset(buf, 0x55, sizeof(buf));
EXPECT_EQ(send(self->fd2, buf, sizeof(buf), 0), sizeof(buf));
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
EXPECT_EQ(errno, EMSGSIZE);
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), MSG_DONTWAIT), -1);
EXPECT_EQ(errno, EAGAIN);
}
TEST_F(tls_err, bad_auth)
{
char buf[128];
int n;
if (self->notls)
SKIP(return, "no TLS support");
memrnd(buf, sizeof(buf) / 2);
EXPECT_EQ(send(self->fd, buf, sizeof(buf) / 2, 0), sizeof(buf) / 2);
n = recv(self->cfd, buf, sizeof(buf), 0);
EXPECT_GT(n, sizeof(buf) / 2);
buf[n - 1]++;
EXPECT_EQ(send(self->fd2, buf, n, 0), n);
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
EXPECT_EQ(errno, EBADMSG);
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
EXPECT_EQ(errno, EBADMSG);
}
TEST_F(tls_err, bad_in_large_read)
{
char txt[3][64];
char cip[3][128];
char buf[3 * 128];
int i, n;
if (self->notls)
SKIP(return, "no TLS support");
/* Put 3 records in the sockets */
for (i = 0; i < 3; i++) {
memrnd(txt[i], sizeof(txt[i]));
EXPECT_EQ(send(self->fd, txt[i], sizeof(txt[i]), 0),
sizeof(txt[i]));
n = recv(self->cfd, cip[i], sizeof(cip[i]), 0);
EXPECT_GT(n, sizeof(txt[i]));
/* Break the third message */
if (i == 2)
cip[2][n - 1]++;
EXPECT_EQ(send(self->fd2, cip[i], n, 0), n);
}
/* We should be able to receive the first two messages */
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt[0]) * 2);
EXPECT_EQ(memcmp(buf, txt[0], sizeof(txt[0])), 0);
EXPECT_EQ(memcmp(buf + sizeof(txt[0]), txt[1], sizeof(txt[1])), 0);
/* Third mesasge is bad */
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
EXPECT_EQ(errno, EBADMSG);
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
EXPECT_EQ(errno, EBADMSG);
}
TEST_F(tls_err, bad_cmsg)
{
char *test_str = "test_read";
int send_len = 10;
char cip[128];
char buf[128];
char txt[64];
int n;
if (self->notls)
SKIP(return, "no TLS support");
/* Queue up one data record */
memrnd(txt, sizeof(txt));
EXPECT_EQ(send(self->fd, txt, sizeof(txt), 0), sizeof(txt));
n = recv(self->cfd, cip, sizeof(cip), 0);
EXPECT_GT(n, sizeof(txt));
EXPECT_EQ(send(self->fd2, cip, n, 0), n);
EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
n = recv(self->cfd, cip, sizeof(cip), 0);
cip[n - 1]++; /* Break it */
EXPECT_GT(n, send_len);
EXPECT_EQ(send(self->fd2, cip, n, 0), n);
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt));
EXPECT_EQ(memcmp(buf, txt, sizeof(txt)), 0);
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
EXPECT_EQ(errno, EBADMSG);
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
EXPECT_EQ(errno, EBADMSG);
}
TEST_F(tls_err, timeo)
{
struct timeval tv = { .tv_usec = 10000, };
char buf[128];
int ret;
if (self->notls)
SKIP(return, "no TLS support");
ret = setsockopt(self->cfd2, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
ASSERT_EQ(ret, 0);
ret = fork();
ASSERT_GE(ret, 0);
if (ret) {
usleep(1000); /* Give child a head start */
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
EXPECT_EQ(errno, EAGAIN);
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
EXPECT_EQ(errno, EAGAIN);
wait(&ret);
} else {
EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
EXPECT_EQ(errno, EAGAIN);
exit(0);
}
}
TEST_F(tls_err, poll_partial_rec)
{
struct pollfd pfd = { };
ssize_t rec_len;
char rec[256];
char buf[128];
if (self->notls)
SKIP(return, "no TLS support");
pfd.fd = self->cfd2;
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 1), 0);
memrnd(buf, sizeof(buf));
EXPECT_EQ(send(self->fd, buf, sizeof(buf), 0), sizeof(buf));
rec_len = recv(self->cfd, rec, sizeof(rec), 0);
EXPECT_GT(rec_len, sizeof(buf));
/* Write 100B, not the full record ... */
EXPECT_EQ(send(self->fd2, rec, 100, 0), 100);
/* ... no full record should mean no POLLIN */
pfd.fd = self->cfd2;
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 1), 0);
/* Now write the rest, and it should all pop out of the other end. */
EXPECT_EQ(send(self->fd2, rec + 100, rec_len - 100, 0), rec_len - 100);
pfd.fd = self->cfd2;
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 1), 1);
EXPECT_EQ(recv(self->cfd2, rec, sizeof(rec), 0), sizeof(buf));
EXPECT_EQ(memcmp(buf, rec, sizeof(buf)), 0);
}
TEST_F(tls_err, epoll_partial_rec)
{
struct epoll_event ev, events[10];
ssize_t rec_len;
char rec[256];
char buf[128];
int epollfd;
if (self->notls)
SKIP(return, "no TLS support");
epollfd = epoll_create1(0);
ASSERT_GE(epollfd, 0);
memset(&ev, 0, sizeof(ev));
ev.events = EPOLLIN;
ev.data.fd = self->cfd2;
ASSERT_GE(epoll_ctl(epollfd, EPOLL_CTL_ADD, self->cfd2, &ev), 0);
EXPECT_EQ(epoll_wait(epollfd, events, 10, 0), 0);
memrnd(buf, sizeof(buf));
EXPECT_EQ(send(self->fd, buf, sizeof(buf), 0), sizeof(buf));
rec_len = recv(self->cfd, rec, sizeof(rec), 0);
EXPECT_GT(rec_len, sizeof(buf));
/* Write 100B, not the full record ... */
EXPECT_EQ(send(self->fd2, rec, 100, 0), 100);
/* ... no full record should mean no POLLIN */
EXPECT_EQ(epoll_wait(epollfd, events, 10, 0), 0);
/* Now write the rest, and it should all pop out of the other end. */
EXPECT_EQ(send(self->fd2, rec + 100, rec_len - 100, 0), rec_len - 100);
EXPECT_EQ(epoll_wait(epollfd, events, 10, 0), 1);
EXPECT_EQ(recv(self->cfd2, rec, sizeof(rec), 0), sizeof(buf));
EXPECT_EQ(memcmp(buf, rec, sizeof(buf)), 0);
close(epollfd);
}
TEST_F(tls_err, poll_partial_rec_async)
{
struct pollfd pfd = { };
ssize_t rec_len;
char rec[256];
char buf[128];
char token;
int p[2];
int ret;
if (self->notls)
SKIP(return, "no TLS support");
ASSERT_GE(pipe(p), 0);
memrnd(buf, sizeof(buf));
EXPECT_EQ(send(self->fd, buf, sizeof(buf), 0), sizeof(buf));
rec_len = recv(self->cfd, rec, sizeof(rec), 0);
EXPECT_GT(rec_len, sizeof(buf));
ret = fork();
ASSERT_GE(ret, 0);
if (ret) {
int status, pid2;
close(p[1]);
usleep(1000); /* Give child a head start */
EXPECT_EQ(send(self->fd2, rec, 100, 0), 100);
EXPECT_EQ(read(p[0], &token, 1), 1); /* Barrier #1 */
EXPECT_EQ(send(self->fd2, rec + 100, rec_len - 100, 0),
rec_len - 100);
pid2 = wait(&status);
EXPECT_EQ(pid2, ret);
EXPECT_EQ(status, 0);
} else {
close(p[0]);
/* Child should sleep in poll(), never get a wake */
pfd.fd = self->cfd2;
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 5), 0);
EXPECT_EQ(write(p[1], &token, 1), 1); /* Barrier #1 */
pfd.fd = self->cfd2;
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 5), 1);
exit(!_metadata->passed);
}
}
TEST(non_established) {
struct tls12_crypto_info_aes_gcm_256 tls12;
struct sockaddr_in addr;
int sfd, ret, fd;
socklen_t len;
len = sizeof(addr);
memset(&tls12, 0, sizeof(tls12));
tls12.info.version = TLS_1_2_VERSION;
tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256;
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_ANY);
addr.sin_port = 0;
fd = socket(AF_INET, SOCK_STREAM, 0);
sfd = socket(AF_INET, SOCK_STREAM, 0);
ret = bind(sfd, &addr, sizeof(addr));
ASSERT_EQ(ret, 0);
ret = listen(sfd, 10);
ASSERT_EQ(ret, 0);
ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
EXPECT_EQ(ret, -1);
/* TLS ULP not supported */
if (errno == ENOENT)
return;
EXPECT_EQ(errno, ENOTCONN);
ret = setsockopt(sfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
EXPECT_EQ(ret, -1);
EXPECT_EQ(errno, ENOTCONN);
ret = getsockname(sfd, &addr, &len);
ASSERT_EQ(ret, 0);
ret = connect(fd, &addr, sizeof(addr));
ASSERT_EQ(ret, 0);
ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
ASSERT_EQ(ret, 0);
ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
EXPECT_EQ(ret, -1);
EXPECT_EQ(errno, EEXIST);
close(fd);
close(sfd);
}
TEST(keysizes) {
struct tls12_crypto_info_aes_gcm_256 tls12;
int ret, fd, cfd;
bool notls;
memset(&tls12, 0, sizeof(tls12));
tls12.info.version = TLS_1_2_VERSION;
tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256;
ulp_sock_pair(_metadata, &fd, &cfd, ¬ls);
if (!notls) {
ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
sizeof(tls12));
EXPECT_EQ(ret, 0);
ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
sizeof(tls12));
EXPECT_EQ(ret, 0);
}
close(fd);
close(cfd);
}
TEST(no_pad) {
struct tls12_crypto_info_aes_gcm_256 tls12;
int ret, fd, cfd, val;
socklen_t len;
bool notls;
memset(&tls12, 0, sizeof(tls12));
tls12.info.version = TLS_1_3_VERSION;
tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256;
ulp_sock_pair(_metadata, &fd, &cfd, ¬ls);
if (notls)
exit(KSFT_SKIP);
ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, sizeof(tls12));
EXPECT_EQ(ret, 0);
ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, sizeof(tls12));
EXPECT_EQ(ret, 0);
val = 1;
ret = setsockopt(cfd, SOL_TLS, TLS_RX_EXPECT_NO_PAD,
(void *)&val, sizeof(val));
EXPECT_EQ(ret, 0);
len = sizeof(val);
val = 2;
ret = getsockopt(cfd, SOL_TLS, TLS_RX_EXPECT_NO_PAD,
(void *)&val, &len);
EXPECT_EQ(ret, 0);
EXPECT_EQ(val, 1);
EXPECT_EQ(len, 4);
val = 0;
ret = setsockopt(cfd, SOL_TLS, TLS_RX_EXPECT_NO_PAD,
(void *)&val, sizeof(val));
EXPECT_EQ(ret, 0);
len = sizeof(val);
val = 2;
ret = getsockopt(cfd, SOL_TLS, TLS_RX_EXPECT_NO_PAD,
(void *)&val, &len);
EXPECT_EQ(ret, 0);
EXPECT_EQ(val, 0);
EXPECT_EQ(len, 4);
close(fd);
close(cfd);
}
TEST(tls_v6ops) {
struct tls_crypto_info_keys tls12;
struct sockaddr_in6 addr, addr2;
int sfd, ret, fd;
socklen_t len, len2;
tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_128, &tls12);
addr.sin6_family = AF_INET6;
addr.sin6_addr = in6addr_any;
addr.sin6_port = 0;
fd = socket(AF_INET6, SOCK_STREAM, 0);
sfd = socket(AF_INET6, SOCK_STREAM, 0);
ret = bind(sfd, &addr, sizeof(addr));
ASSERT_EQ(ret, 0);
ret = listen(sfd, 10);
ASSERT_EQ(ret, 0);
len = sizeof(addr);
ret = getsockname(sfd, &addr, &len);
ASSERT_EQ(ret, 0);
ret = connect(fd, &addr, sizeof(addr));
ASSERT_EQ(ret, 0);
len = sizeof(addr);
ret = getsockname(fd, &addr, &len);
ASSERT_EQ(ret, 0);
ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
if (ret) {
ASSERT_EQ(errno, ENOENT);
SKIP(return, "no TLS support");
}
ASSERT_EQ(ret, 0);
ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
ret = setsockopt(fd, SOL_TLS, TLS_RX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
len2 = sizeof(addr2);
ret = getsockname(fd, &addr2, &len2);
ASSERT_EQ(ret, 0);
EXPECT_EQ(len2, len);
EXPECT_EQ(memcmp(&addr, &addr2, len), 0);
close(fd);
close(sfd);
}
TEST(prequeue) {
struct tls_crypto_info_keys tls12;
char buf[20000], buf2[20000];
struct sockaddr_in addr;
int sfd, cfd, ret, fd;
socklen_t len;
len = sizeof(addr);
memrnd(buf, sizeof(buf));
tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_256, &tls12);
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_ANY);
addr.sin_port = 0;
fd = socket(AF_INET, SOCK_STREAM, 0);
sfd = socket(AF_INET, SOCK_STREAM, 0);
ASSERT_EQ(bind(sfd, &addr, sizeof(addr)), 0);
ASSERT_EQ(listen(sfd, 10), 0);
ASSERT_EQ(getsockname(sfd, &addr, &len), 0);
ASSERT_EQ(connect(fd, &addr, sizeof(addr)), 0);
ASSERT_GE(cfd = accept(sfd, &addr, &len), 0);
close(sfd);
ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
if (ret) {
ASSERT_EQ(errno, ENOENT);
SKIP(return, "no TLS support");
}
ASSERT_EQ(setsockopt(fd, SOL_TLS, TLS_TX, &tls12, tls12.len), 0);
EXPECT_EQ(send(fd, buf, sizeof(buf), MSG_DONTWAIT), sizeof(buf));
ASSERT_EQ(setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")), 0);
ASSERT_EQ(setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, tls12.len), 0);
EXPECT_EQ(recv(cfd, buf2, sizeof(buf2), MSG_WAITALL), sizeof(buf2));
EXPECT_EQ(memcmp(buf, buf2, sizeof(buf)), 0);
close(fd);
close(cfd);
}
static void __attribute__((constructor)) fips_check(void) {
int res;
FILE *f;
f = fopen("/proc/sys/crypto/fips_enabled", "r");
if (f) {
res = fscanf(f, "%d", &fips_enabled);
if (res != 1)
ksft_print_msg("ERROR: Couldn't read /proc/sys/crypto/fips_enabled\n");
fclose(f);
}
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/net/tls.c |
// SPDX-License-Identifier: GPL-2.0
/*
* It is possible to use SO_REUSEPORT to open multiple sockets bound to
* equivalent local addresses using AF_INET and AF_INET6 at the same time. If
* the AF_INET6 socket has IPV6_V6ONLY set, it's clear which socket should
* receive a given incoming packet. However, when it is not set, incoming v4
* packets should prefer the AF_INET socket(s). This behavior was defined with
* the original SO_REUSEPORT implementation, but broke with
* e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection")
* This test creates these mixed AF_INET/AF_INET6 sockets and asserts the
* AF_INET preference for v4 packets.
*/
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <errno.h>
#include <error.h>
#include <linux/in.h>
#include <linux/unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/epoll.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <unistd.h>
static const int PORT = 8888;
static void build_rcv_fd(int family, int proto, int *rcv_fds, int count)
{
struct sockaddr_storage addr;
struct sockaddr_in *addr4;
struct sockaddr_in6 *addr6;
int opt, i;
switch (family) {
case AF_INET:
addr4 = (struct sockaddr_in *)&addr;
addr4->sin_family = AF_INET;
addr4->sin_addr.s_addr = htonl(INADDR_ANY);
addr4->sin_port = htons(PORT);
break;
case AF_INET6:
addr6 = (struct sockaddr_in6 *)&addr;
addr6->sin6_family = AF_INET6;
addr6->sin6_addr = in6addr_any;
addr6->sin6_port = htons(PORT);
break;
default:
error(1, 0, "Unsupported family %d", family);
}
for (i = 0; i < count; ++i) {
rcv_fds[i] = socket(family, proto, 0);
if (rcv_fds[i] < 0)
error(1, errno, "failed to create receive socket");
opt = 1;
if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt,
sizeof(opt)))
error(1, errno, "failed to set SO_REUSEPORT");
if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr)))
error(1, errno, "failed to bind receive socket");
if (proto == SOCK_STREAM && listen(rcv_fds[i], 10))
error(1, errno, "failed to listen on receive port");
}
}
static void send_from_v4(int proto)
{
struct sockaddr_in saddr, daddr;
int fd;
saddr.sin_family = AF_INET;
saddr.sin_addr.s_addr = htonl(INADDR_ANY);
saddr.sin_port = 0;
daddr.sin_family = AF_INET;
daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
daddr.sin_port = htons(PORT);
fd = socket(AF_INET, proto, 0);
if (fd < 0)
error(1, errno, "failed to create send socket");
if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr)))
error(1, errno, "failed to bind send socket");
if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr)))
error(1, errno, "failed to connect send socket");
if (send(fd, "a", 1, 0) < 0)
error(1, errno, "failed to send message");
close(fd);
}
static int receive_once(int epfd, int proto)
{
struct epoll_event ev;
int i, fd;
char buf[8];
i = epoll_wait(epfd, &ev, 1, -1);
if (i < 0)
error(1, errno, "epoll_wait failed");
if (proto == SOCK_STREAM) {
fd = accept(ev.data.fd, NULL, NULL);
if (fd < 0)
error(1, errno, "failed to accept");
i = recv(fd, buf, sizeof(buf), 0);
close(fd);
} else {
i = recv(ev.data.fd, buf, sizeof(buf), 0);
}
if (i < 0)
error(1, errno, "failed to recv");
return ev.data.fd;
}
static void test(int *rcv_fds, int count, int proto)
{
struct epoll_event ev;
int epfd, i, test_fd;
int test_family;
socklen_t len;
epfd = epoll_create(1);
if (epfd < 0)
error(1, errno, "failed to create epoll");
ev.events = EPOLLIN;
for (i = 0; i < count; ++i) {
ev.data.fd = rcv_fds[i];
if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev))
error(1, errno, "failed to register sock epoll");
}
send_from_v4(proto);
test_fd = receive_once(epfd, proto);
len = sizeof(test_family);
if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
error(1, errno, "failed to read socket domain");
if (test_family != AF_INET)
error(1, 0, "expected to receive on v4 socket but got v6 (%d)",
test_family);
close(epfd);
}
int main(void)
{
int rcv_fds[32], i;
fprintf(stderr, "---- UDP IPv4 created before IPv6 ----\n");
build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 5);
build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[5]), 5);
test(rcv_fds, 10, SOCK_DGRAM);
for (i = 0; i < 10; ++i)
close(rcv_fds[i]);
fprintf(stderr, "---- UDP IPv6 created before IPv4 ----\n");
build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 5);
build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[5]), 5);
test(rcv_fds, 10, SOCK_DGRAM);
for (i = 0; i < 10; ++i)
close(rcv_fds[i]);
/* NOTE: UDP socket lookups traverse a different code path when there
* are > 10 sockets in a group.
*/
fprintf(stderr, "---- UDP IPv4 created before IPv6 (large) ----\n");
build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 16);
build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[16]), 16);
test(rcv_fds, 32, SOCK_DGRAM);
for (i = 0; i < 32; ++i)
close(rcv_fds[i]);
fprintf(stderr, "---- UDP IPv6 created before IPv4 (large) ----\n");
build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 16);
build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[16]), 16);
test(rcv_fds, 32, SOCK_DGRAM);
for (i = 0; i < 32; ++i)
close(rcv_fds[i]);
fprintf(stderr, "---- TCP IPv4 created before IPv6 ----\n");
build_rcv_fd(AF_INET, SOCK_STREAM, rcv_fds, 5);
build_rcv_fd(AF_INET6, SOCK_STREAM, &(rcv_fds[5]), 5);
test(rcv_fds, 10, SOCK_STREAM);
for (i = 0; i < 10; ++i)
close(rcv_fds[i]);
fprintf(stderr, "---- TCP IPv6 created before IPv4 ----\n");
build_rcv_fd(AF_INET6, SOCK_STREAM, rcv_fds, 5);
build_rcv_fd(AF_INET, SOCK_STREAM, &(rcv_fds[5]), 5);
test(rcv_fds, 10, SOCK_STREAM);
for (i = 0; i < 10; ++i)
close(rcv_fds[i]);
fprintf(stderr, "SUCCESS\n");
return 0;
}
| linux-master | tools/testing/selftests/net/reuseport_dualstack.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013 Red Hat, Inc.
* Author: Daniel Borkmann <[email protected]>
* Chetan Loke <[email protected]> (TPACKET_V3 usage example)
*
* A basic test of packet socket's TPACKET_V1/TPACKET_V2/TPACKET_V3 behavior.
*
* Control:
* Test the setup of the TPACKET socket with different patterns that are
* known to fail (TODO) resp. succeed (OK).
*
* Datapath:
* Open a pair of packet sockets and send resp. receive an a priori known
* packet pattern accross the sockets and check if it was received resp.
* sent correctly. Fanout in combination with RX_RING is currently not
* tested here.
*
* The test currently runs for
* - TPACKET_V1: RX_RING, TX_RING
* - TPACKET_V2: RX_RING, TX_RING
* - TPACKET_V3: RX_RING
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/mman.h>
#include <linux/if_packet.h>
#include <linux/filter.h>
#include <ctype.h>
#include <fcntl.h>
#include <unistd.h>
#include <bits/wordsize.h>
#include <net/ethernet.h>
#include <netinet/ip.h>
#include <arpa/inet.h>
#include <stdint.h>
#include <string.h>
#include <assert.h>
#include <net/if.h>
#include <inttypes.h>
#include <poll.h>
#include "psock_lib.h"
#include "../kselftest.h"
#ifndef bug_on
# define bug_on(cond) assert(!(cond))
#endif
#ifndef __aligned_tpacket
# define __aligned_tpacket __attribute__((aligned(TPACKET_ALIGNMENT)))
#endif
#ifndef __align_tpacket
# define __align_tpacket(x) __attribute__((aligned(TPACKET_ALIGN(x))))
#endif
#define NUM_PACKETS 100
#define ALIGN_8(x) (((x) + 8 - 1) & ~(8 - 1))
struct ring {
struct iovec *rd;
uint8_t *mm_space;
size_t mm_len, rd_len;
struct sockaddr_ll ll;
void (*walk)(int sock, struct ring *ring);
int type, rd_num, flen, version;
union {
struct tpacket_req req;
struct tpacket_req3 req3;
};
};
struct block_desc {
uint32_t version;
uint32_t offset_to_priv;
struct tpacket_hdr_v1 h1;
};
union frame_map {
struct {
struct tpacket_hdr tp_h __aligned_tpacket;
struct sockaddr_ll s_ll __align_tpacket(sizeof(struct tpacket_hdr));
} *v1;
struct {
struct tpacket2_hdr tp_h __aligned_tpacket;
struct sockaddr_ll s_ll __align_tpacket(sizeof(struct tpacket2_hdr));
} *v2;
void *raw;
};
static unsigned int total_packets, total_bytes;
static int pfsocket(int ver)
{
int ret, sock = socket(PF_PACKET, SOCK_RAW, 0);
if (sock == -1) {
perror("socket");
exit(1);
}
ret = setsockopt(sock, SOL_PACKET, PACKET_VERSION, &ver, sizeof(ver));
if (ret == -1) {
perror("setsockopt");
exit(1);
}
return sock;
}
static void status_bar_update(void)
{
if (total_packets % 10 == 0) {
fprintf(stderr, ".");
fflush(stderr);
}
}
static void test_payload(void *pay, size_t len)
{
struct ethhdr *eth = pay;
if (len < sizeof(struct ethhdr)) {
fprintf(stderr, "test_payload: packet too "
"small: %zu bytes!\n", len);
exit(1);
}
if (eth->h_proto != htons(ETH_P_IP)) {
fprintf(stderr, "test_payload: wrong ethernet "
"type: 0x%x!\n", ntohs(eth->h_proto));
exit(1);
}
}
static void create_payload(void *pay, size_t *len)
{
int i;
struct ethhdr *eth = pay;
struct iphdr *ip = pay + sizeof(*eth);
/* Lets create some broken crap, that still passes
* our BPF filter.
*/
*len = DATA_LEN + 42;
memset(pay, 0xff, ETH_ALEN * 2);
eth->h_proto = htons(ETH_P_IP);
for (i = 0; i < sizeof(*ip); ++i)
((uint8_t *) pay)[i + sizeof(*eth)] = (uint8_t) rand();
ip->ihl = 5;
ip->version = 4;
ip->protocol = 0x11;
ip->frag_off = 0;
ip->ttl = 64;
ip->tot_len = htons((uint16_t) *len - sizeof(*eth));
ip->saddr = htonl(INADDR_LOOPBACK);
ip->daddr = htonl(INADDR_LOOPBACK);
memset(pay + sizeof(*eth) + sizeof(*ip),
DATA_CHAR, DATA_LEN);
}
static inline int __v1_rx_kernel_ready(struct tpacket_hdr *hdr)
{
return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER);
}
static inline void __v1_rx_user_ready(struct tpacket_hdr *hdr)
{
hdr->tp_status = TP_STATUS_KERNEL;
__sync_synchronize();
}
static inline int __v2_rx_kernel_ready(struct tpacket2_hdr *hdr)
{
return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER);
}
static inline void __v2_rx_user_ready(struct tpacket2_hdr *hdr)
{
hdr->tp_status = TP_STATUS_KERNEL;
__sync_synchronize();
}
static inline int __v1_v2_rx_kernel_ready(void *base, int version)
{
switch (version) {
case TPACKET_V1:
return __v1_rx_kernel_ready(base);
case TPACKET_V2:
return __v2_rx_kernel_ready(base);
default:
bug_on(1);
return 0;
}
}
static inline void __v1_v2_rx_user_ready(void *base, int version)
{
switch (version) {
case TPACKET_V1:
__v1_rx_user_ready(base);
break;
case TPACKET_V2:
__v2_rx_user_ready(base);
break;
}
}
static void walk_v1_v2_rx(int sock, struct ring *ring)
{
struct pollfd pfd;
int udp_sock[2];
union frame_map ppd;
unsigned int frame_num = 0;
bug_on(ring->type != PACKET_RX_RING);
pair_udp_open(udp_sock, PORT_BASE);
memset(&pfd, 0, sizeof(pfd));
pfd.fd = sock;
pfd.events = POLLIN | POLLERR;
pfd.revents = 0;
pair_udp_send(udp_sock, NUM_PACKETS);
while (total_packets < NUM_PACKETS * 2) {
while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base,
ring->version)) {
ppd.raw = ring->rd[frame_num].iov_base;
switch (ring->version) {
case TPACKET_V1:
test_payload((uint8_t *) ppd.raw + ppd.v1->tp_h.tp_mac,
ppd.v1->tp_h.tp_snaplen);
total_bytes += ppd.v1->tp_h.tp_snaplen;
break;
case TPACKET_V2:
test_payload((uint8_t *) ppd.raw + ppd.v2->tp_h.tp_mac,
ppd.v2->tp_h.tp_snaplen);
total_bytes += ppd.v2->tp_h.tp_snaplen;
break;
}
status_bar_update();
total_packets++;
__v1_v2_rx_user_ready(ppd.raw, ring->version);
frame_num = (frame_num + 1) % ring->rd_num;
}
poll(&pfd, 1, 1);
}
pair_udp_close(udp_sock);
if (total_packets != 2 * NUM_PACKETS) {
fprintf(stderr, "walk_v%d_rx: received %u out of %u pkts\n",
ring->version, total_packets, NUM_PACKETS);
exit(1);
}
fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, total_bytes >> 1);
}
static inline int __v1_tx_kernel_ready(struct tpacket_hdr *hdr)
{
return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING));
}
static inline void __v1_tx_user_ready(struct tpacket_hdr *hdr)
{
hdr->tp_status = TP_STATUS_SEND_REQUEST;
__sync_synchronize();
}
static inline int __v2_tx_kernel_ready(struct tpacket2_hdr *hdr)
{
return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING));
}
static inline void __v2_tx_user_ready(struct tpacket2_hdr *hdr)
{
hdr->tp_status = TP_STATUS_SEND_REQUEST;
__sync_synchronize();
}
static inline int __v3_tx_kernel_ready(struct tpacket3_hdr *hdr)
{
return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING));
}
static inline void __v3_tx_user_ready(struct tpacket3_hdr *hdr)
{
hdr->tp_status = TP_STATUS_SEND_REQUEST;
__sync_synchronize();
}
static inline int __tx_kernel_ready(void *base, int version)
{
switch (version) {
case TPACKET_V1:
return __v1_tx_kernel_ready(base);
case TPACKET_V2:
return __v2_tx_kernel_ready(base);
case TPACKET_V3:
return __v3_tx_kernel_ready(base);
default:
bug_on(1);
return 0;
}
}
static inline void __tx_user_ready(void *base, int version)
{
switch (version) {
case TPACKET_V1:
__v1_tx_user_ready(base);
break;
case TPACKET_V2:
__v2_tx_user_ready(base);
break;
case TPACKET_V3:
__v3_tx_user_ready(base);
break;
}
}
static void __v1_v2_set_packet_loss_discard(int sock)
{
int ret, discard = 1;
ret = setsockopt(sock, SOL_PACKET, PACKET_LOSS, (void *) &discard,
sizeof(discard));
if (ret == -1) {
perror("setsockopt");
exit(1);
}
}
static inline void *get_next_frame(struct ring *ring, int n)
{
uint8_t *f0 = ring->rd[0].iov_base;
switch (ring->version) {
case TPACKET_V1:
case TPACKET_V2:
return ring->rd[n].iov_base;
case TPACKET_V3:
return f0 + (n * ring->req3.tp_frame_size);
default:
bug_on(1);
}
}
static void walk_tx(int sock, struct ring *ring)
{
struct pollfd pfd;
int rcv_sock, ret;
size_t packet_len;
union frame_map ppd;
char packet[1024];
unsigned int frame_num = 0, got = 0;
struct sockaddr_ll ll = {
.sll_family = PF_PACKET,
.sll_halen = ETH_ALEN,
};
int nframes;
/* TPACKET_V{1,2} sets up the ring->rd* related variables based
* on frames (e.g., rd_num is tp_frame_nr) whereas V3 sets these
* up based on blocks (e.g, rd_num is tp_block_nr)
*/
if (ring->version <= TPACKET_V2)
nframes = ring->rd_num;
else
nframes = ring->req3.tp_frame_nr;
bug_on(ring->type != PACKET_TX_RING);
bug_on(nframes < NUM_PACKETS);
rcv_sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
if (rcv_sock == -1) {
perror("socket");
exit(1);
}
pair_udp_setfilter(rcv_sock);
ll.sll_ifindex = if_nametoindex("lo");
ret = bind(rcv_sock, (struct sockaddr *) &ll, sizeof(ll));
if (ret == -1) {
perror("bind");
exit(1);
}
memset(&pfd, 0, sizeof(pfd));
pfd.fd = sock;
pfd.events = POLLOUT | POLLERR;
pfd.revents = 0;
total_packets = NUM_PACKETS;
create_payload(packet, &packet_len);
while (total_packets > 0) {
void *next = get_next_frame(ring, frame_num);
while (__tx_kernel_ready(next, ring->version) &&
total_packets > 0) {
ppd.raw = next;
switch (ring->version) {
case TPACKET_V1:
ppd.v1->tp_h.tp_snaplen = packet_len;
ppd.v1->tp_h.tp_len = packet_len;
memcpy((uint8_t *) ppd.raw + TPACKET_HDRLEN -
sizeof(struct sockaddr_ll), packet,
packet_len);
total_bytes += ppd.v1->tp_h.tp_snaplen;
break;
case TPACKET_V2:
ppd.v2->tp_h.tp_snaplen = packet_len;
ppd.v2->tp_h.tp_len = packet_len;
memcpy((uint8_t *) ppd.raw + TPACKET2_HDRLEN -
sizeof(struct sockaddr_ll), packet,
packet_len);
total_bytes += ppd.v2->tp_h.tp_snaplen;
break;
case TPACKET_V3: {
struct tpacket3_hdr *tx = next;
tx->tp_snaplen = packet_len;
tx->tp_len = packet_len;
tx->tp_next_offset = 0;
memcpy((uint8_t *)tx + TPACKET3_HDRLEN -
sizeof(struct sockaddr_ll), packet,
packet_len);
total_bytes += tx->tp_snaplen;
break;
}
}
status_bar_update();
total_packets--;
__tx_user_ready(next, ring->version);
frame_num = (frame_num + 1) % nframes;
}
poll(&pfd, 1, 1);
}
bug_on(total_packets != 0);
ret = sendto(sock, NULL, 0, 0, NULL, 0);
if (ret == -1) {
perror("sendto");
exit(1);
}
while ((ret = recvfrom(rcv_sock, packet, sizeof(packet),
0, NULL, NULL)) > 0 &&
total_packets < NUM_PACKETS) {
got += ret;
test_payload(packet, ret);
status_bar_update();
total_packets++;
}
close(rcv_sock);
if (total_packets != NUM_PACKETS) {
fprintf(stderr, "walk_v%d_rx: received %u out of %u pkts\n",
ring->version, total_packets, NUM_PACKETS);
exit(1);
}
fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, got);
}
static void walk_v1_v2(int sock, struct ring *ring)
{
if (ring->type == PACKET_RX_RING)
walk_v1_v2_rx(sock, ring);
else
walk_tx(sock, ring);
}
static uint64_t __v3_prev_block_seq_num = 0;
void __v3_test_block_seq_num(struct block_desc *pbd)
{
if (__v3_prev_block_seq_num + 1 != pbd->h1.seq_num) {
fprintf(stderr, "\nprev_block_seq_num:%"PRIu64", expected "
"seq:%"PRIu64" != actual seq:%"PRIu64"\n",
__v3_prev_block_seq_num, __v3_prev_block_seq_num + 1,
(uint64_t) pbd->h1.seq_num);
exit(1);
}
__v3_prev_block_seq_num = pbd->h1.seq_num;
}
static void __v3_test_block_len(struct block_desc *pbd, uint32_t bytes, int block_num)
{
if (pbd->h1.num_pkts && bytes != pbd->h1.blk_len) {
fprintf(stderr, "\nblock:%u with %upackets, expected "
"len:%u != actual len:%u\n", block_num,
pbd->h1.num_pkts, bytes, pbd->h1.blk_len);
exit(1);
}
}
static void __v3_test_block_header(struct block_desc *pbd, const int block_num)
{
if ((pbd->h1.block_status & TP_STATUS_USER) == 0) {
fprintf(stderr, "\nblock %u: not in TP_STATUS_USER\n", block_num);
exit(1);
}
__v3_test_block_seq_num(pbd);
}
static void __v3_walk_block(struct block_desc *pbd, const int block_num)
{
int num_pkts = pbd->h1.num_pkts, i;
unsigned long bytes = 0, bytes_with_padding = ALIGN_8(sizeof(*pbd));
struct tpacket3_hdr *ppd;
__v3_test_block_header(pbd, block_num);
ppd = (struct tpacket3_hdr *) ((uint8_t *) pbd +
pbd->h1.offset_to_first_pkt);
for (i = 0; i < num_pkts; ++i) {
bytes += ppd->tp_snaplen;
if (ppd->tp_next_offset)
bytes_with_padding += ppd->tp_next_offset;
else
bytes_with_padding += ALIGN_8(ppd->tp_snaplen + ppd->tp_mac);
test_payload((uint8_t *) ppd + ppd->tp_mac, ppd->tp_snaplen);
status_bar_update();
total_packets++;
ppd = (struct tpacket3_hdr *) ((uint8_t *) ppd + ppd->tp_next_offset);
__sync_synchronize();
}
__v3_test_block_len(pbd, bytes_with_padding, block_num);
total_bytes += bytes;
}
void __v3_flush_block(struct block_desc *pbd)
{
pbd->h1.block_status = TP_STATUS_KERNEL;
__sync_synchronize();
}
static void walk_v3_rx(int sock, struct ring *ring)
{
unsigned int block_num = 0;
struct pollfd pfd;
struct block_desc *pbd;
int udp_sock[2];
bug_on(ring->type != PACKET_RX_RING);
pair_udp_open(udp_sock, PORT_BASE);
memset(&pfd, 0, sizeof(pfd));
pfd.fd = sock;
pfd.events = POLLIN | POLLERR;
pfd.revents = 0;
pair_udp_send(udp_sock, NUM_PACKETS);
while (total_packets < NUM_PACKETS * 2) {
pbd = (struct block_desc *) ring->rd[block_num].iov_base;
while ((pbd->h1.block_status & TP_STATUS_USER) == 0)
poll(&pfd, 1, 1);
__v3_walk_block(pbd, block_num);
__v3_flush_block(pbd);
block_num = (block_num + 1) % ring->rd_num;
}
pair_udp_close(udp_sock);
if (total_packets != 2 * NUM_PACKETS) {
fprintf(stderr, "walk_v3_rx: received %u out of %u pkts\n",
total_packets, NUM_PACKETS);
exit(1);
}
fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, total_bytes >> 1);
}
static void walk_v3(int sock, struct ring *ring)
{
if (ring->type == PACKET_RX_RING)
walk_v3_rx(sock, ring);
else
walk_tx(sock, ring);
}
static void __v1_v2_fill(struct ring *ring, unsigned int blocks)
{
ring->req.tp_block_size = getpagesize() << 2;
ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7;
ring->req.tp_block_nr = blocks;
ring->req.tp_frame_nr = ring->req.tp_block_size /
ring->req.tp_frame_size *
ring->req.tp_block_nr;
ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr;
ring->walk = walk_v1_v2;
ring->rd_num = ring->req.tp_frame_nr;
ring->flen = ring->req.tp_frame_size;
}
static void __v3_fill(struct ring *ring, unsigned int blocks, int type)
{
if (type == PACKET_RX_RING) {
ring->req3.tp_retire_blk_tov = 64;
ring->req3.tp_sizeof_priv = 0;
ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
}
ring->req3.tp_block_size = getpagesize() << 2;
ring->req3.tp_frame_size = TPACKET_ALIGNMENT << 7;
ring->req3.tp_block_nr = blocks;
ring->req3.tp_frame_nr = ring->req3.tp_block_size /
ring->req3.tp_frame_size *
ring->req3.tp_block_nr;
ring->mm_len = ring->req3.tp_block_size * ring->req3.tp_block_nr;
ring->walk = walk_v3;
ring->rd_num = ring->req3.tp_block_nr;
ring->flen = ring->req3.tp_block_size;
}
static void setup_ring(int sock, struct ring *ring, int version, int type)
{
int ret = 0;
unsigned int blocks = 256;
ring->type = type;
ring->version = version;
switch (version) {
case TPACKET_V1:
case TPACKET_V2:
if (type == PACKET_TX_RING)
__v1_v2_set_packet_loss_discard(sock);
__v1_v2_fill(ring, blocks);
ret = setsockopt(sock, SOL_PACKET, type, &ring->req,
sizeof(ring->req));
break;
case TPACKET_V3:
__v3_fill(ring, blocks, type);
ret = setsockopt(sock, SOL_PACKET, type, &ring->req3,
sizeof(ring->req3));
break;
}
if (ret == -1) {
perror("setsockopt");
exit(1);
}
ring->rd_len = ring->rd_num * sizeof(*ring->rd);
ring->rd = malloc(ring->rd_len);
if (ring->rd == NULL) {
perror("malloc");
exit(1);
}
total_packets = 0;
total_bytes = 0;
}
static void mmap_ring(int sock, struct ring *ring)
{
int i;
ring->mm_space = mmap(0, ring->mm_len, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_LOCKED | MAP_POPULATE, sock, 0);
if (ring->mm_space == MAP_FAILED) {
perror("mmap");
exit(1);
}
memset(ring->rd, 0, ring->rd_len);
for (i = 0; i < ring->rd_num; ++i) {
ring->rd[i].iov_base = ring->mm_space + (i * ring->flen);
ring->rd[i].iov_len = ring->flen;
}
}
static void bind_ring(int sock, struct ring *ring)
{
int ret;
pair_udp_setfilter(sock);
ring->ll.sll_family = PF_PACKET;
ring->ll.sll_protocol = htons(ETH_P_ALL);
ring->ll.sll_ifindex = if_nametoindex("lo");
ring->ll.sll_hatype = 0;
ring->ll.sll_pkttype = 0;
ring->ll.sll_halen = 0;
ret = bind(sock, (struct sockaddr *) &ring->ll, sizeof(ring->ll));
if (ret == -1) {
perror("bind");
exit(1);
}
}
static void walk_ring(int sock, struct ring *ring)
{
ring->walk(sock, ring);
}
static void unmap_ring(int sock, struct ring *ring)
{
munmap(ring->mm_space, ring->mm_len);
free(ring->rd);
}
static int test_kernel_bit_width(void)
{
char in[512], *ptr;
int num = 0, fd;
ssize_t ret;
fd = open("/proc/kallsyms", O_RDONLY);
if (fd == -1) {
perror("open");
exit(1);
}
ret = read(fd, in, sizeof(in));
if (ret <= 0) {
perror("read");
exit(1);
}
close(fd);
ptr = in;
while(!isspace(*ptr)) {
num++;
ptr++;
}
return num * 4;
}
static int test_user_bit_width(void)
{
return __WORDSIZE;
}
static const char *tpacket_str[] = {
[TPACKET_V1] = "TPACKET_V1",
[TPACKET_V2] = "TPACKET_V2",
[TPACKET_V3] = "TPACKET_V3",
};
static const char *type_str[] = {
[PACKET_RX_RING] = "PACKET_RX_RING",
[PACKET_TX_RING] = "PACKET_TX_RING",
};
static int test_tpacket(int version, int type)
{
int sock;
struct ring ring;
fprintf(stderr, "test: %s with %s ", tpacket_str[version],
type_str[type]);
fflush(stderr);
if (version == TPACKET_V1 &&
test_kernel_bit_width() != test_user_bit_width()) {
fprintf(stderr, "test: skip %s %s since user and kernel "
"space have different bit width\n",
tpacket_str[version], type_str[type]);
return KSFT_SKIP;
}
sock = pfsocket(version);
memset(&ring, 0, sizeof(ring));
setup_ring(sock, &ring, version, type);
mmap_ring(sock, &ring);
bind_ring(sock, &ring);
walk_ring(sock, &ring);
unmap_ring(sock, &ring);
close(sock);
fprintf(stderr, "\n");
return 0;
}
int main(void)
{
int ret = 0;
ret |= test_tpacket(TPACKET_V1, PACKET_RX_RING);
ret |= test_tpacket(TPACKET_V1, PACKET_TX_RING);
ret |= test_tpacket(TPACKET_V2, PACKET_RX_RING);
ret |= test_tpacket(TPACKET_V2, PACKET_TX_RING);
ret |= test_tpacket(TPACKET_V3, PACKET_RX_RING);
ret |= test_tpacket(TPACKET_V3, PACKET_TX_RING);
if (ret)
return 1;
printf("OK. All tests passed\n");
return 0;
}
| linux-master | tools/testing/selftests/net/psock_tpacket.c |
// SPDX-License-Identifier: GPL-2.0
#include <arpa/inet.h>
#include <errno.h>
#include <error.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <unistd.h>
static int child_pid;
static unsigned long timediff(struct timeval s, struct timeval e)
{
unsigned long s_us, e_us;
s_us = s.tv_sec * 1000000 + s.tv_usec;
e_us = e.tv_sec * 1000000 + e.tv_usec;
if (s_us > e_us)
return 0;
return e_us - s_us;
}
static void client(int port)
{
int sock = 0;
struct sockaddr_in addr, laddr;
socklen_t len = sizeof(laddr);
struct linger sl;
int flag = 1;
int buffer;
struct timeval start, end;
unsigned long lat, sum_lat = 0, nr_lat = 0;
while (1) {
gettimeofday(&start, NULL);
sock = socket(AF_INET, SOCK_STREAM, 0);
if (sock < 0)
error(-1, errno, "socket creation");
sl.l_onoff = 1;
sl.l_linger = 0;
if (setsockopt(sock, SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)))
error(-1, errno, "setsockopt(linger)");
if (setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
&flag, sizeof(flag)))
error(-1, errno, "setsockopt(nodelay)");
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
if (inet_pton(AF_INET, "127.0.0.1", &addr.sin_addr) <= 0)
error(-1, errno, "inet_pton");
if (connect(sock, (struct sockaddr *)&addr, sizeof(addr)) < 0)
error(-1, errno, "connect");
send(sock, &buffer, sizeof(buffer), 0);
if (read(sock, &buffer, sizeof(buffer)) == -1)
error(-1, errno, "waiting read");
gettimeofday(&end, NULL);
lat = timediff(start, end);
sum_lat += lat;
nr_lat++;
if (lat < 100000)
goto close;
if (getsockname(sock, (struct sockaddr *)&laddr, &len) == -1)
error(-1, errno, "getsockname");
printf("port: %d, lat: %lu, avg: %lu, nr: %lu\n",
ntohs(laddr.sin_port), lat,
sum_lat / nr_lat, nr_lat);
close:
fflush(stdout);
close(sock);
}
}
static void server(int sock, struct sockaddr_in address)
{
int accepted;
int addrlen = sizeof(address);
int buffer;
while (1) {
accepted = accept(sock, (struct sockaddr *)&address,
(socklen_t *)&addrlen);
if (accepted < 0)
error(-1, errno, "accept");
if (read(accepted, &buffer, sizeof(buffer)) == -1)
error(-1, errno, "read");
close(accepted);
}
}
static void sig_handler(int signum)
{
kill(SIGTERM, child_pid);
exit(0);
}
int main(int argc, char const *argv[])
{
int sock;
int opt = 1;
struct sockaddr_in address;
struct sockaddr_in laddr;
socklen_t len = sizeof(laddr);
if (signal(SIGTERM, sig_handler) == SIG_ERR)
error(-1, errno, "signal");
sock = socket(AF_INET, SOCK_STREAM, 0);
if (sock < 0)
error(-1, errno, "socket");
if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT,
&opt, sizeof(opt)) == -1)
error(-1, errno, "setsockopt");
address.sin_family = AF_INET;
address.sin_addr.s_addr = INADDR_ANY;
/* dynamically allocate unused port */
address.sin_port = 0;
if (bind(sock, (struct sockaddr *)&address, sizeof(address)) < 0)
error(-1, errno, "bind");
if (listen(sock, 3) < 0)
error(-1, errno, "listen");
if (getsockname(sock, (struct sockaddr *)&laddr, &len) == -1)
error(-1, errno, "getsockname");
fprintf(stderr, "server port: %d\n", ntohs(laddr.sin_port));
child_pid = fork();
if (!child_pid)
client(ntohs(laddr.sin_port));
else
server(sock, laddr);
return 0;
}
| linux-master | tools/testing/selftests/net/fin_ack_lat.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test the SO_TXTIME API
*
* Takes a stream of { payload, delivery time }[], to be sent across two
* processes. Start this program on two separate network namespaces or
* connected hosts, one instance in transmit mode and the other in receive
* mode using the '-r' option. Receiver will compare arrival timestamps to
* the expected stream. Sender will read transmit timestamps from the error
* queue. The streams can differ due to out-of-order delivery and drops.
*/
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <error.h>
#include <errno.h>
#include <inttypes.h>
#include <linux/net_tstamp.h>
#include <linux/errqueue.h>
#include <linux/if_ether.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include <poll.h>
static int cfg_clockid = CLOCK_TAI;
static uint16_t cfg_port = 8000;
static int cfg_variance_us = 4000;
static uint64_t cfg_start_time_ns;
static int cfg_mark;
static bool cfg_rx;
static uint64_t glob_tstart;
static uint64_t tdeliver_max;
/* encode one timed transmission (of a 1B payload) */
struct timed_send {
char data;
int64_t delay_us;
};
#define MAX_NUM_PKT 8
static struct timed_send cfg_buf[MAX_NUM_PKT];
static int cfg_num_pkt;
static int cfg_errq_level;
static int cfg_errq_type;
static struct sockaddr_storage cfg_dst_addr;
static struct sockaddr_storage cfg_src_addr;
static socklen_t cfg_alen;
static uint64_t gettime_ns(clockid_t clock)
{
struct timespec ts;
if (clock_gettime(clock, &ts))
error(1, errno, "gettime");
return ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
static void do_send_one(int fdt, struct timed_send *ts)
{
char control[CMSG_SPACE(sizeof(uint64_t))];
struct msghdr msg = {0};
struct iovec iov = {0};
struct cmsghdr *cm;
uint64_t tdeliver;
int ret;
iov.iov_base = &ts->data;
iov.iov_len = 1;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_name = (struct sockaddr *)&cfg_dst_addr;
msg.msg_namelen = cfg_alen;
if (ts->delay_us >= 0) {
memset(control, 0, sizeof(control));
msg.msg_control = &control;
msg.msg_controllen = sizeof(control);
tdeliver = glob_tstart + ts->delay_us * 1000;
tdeliver_max = tdeliver_max > tdeliver ?
tdeliver_max : tdeliver;
cm = CMSG_FIRSTHDR(&msg);
cm->cmsg_level = SOL_SOCKET;
cm->cmsg_type = SCM_TXTIME;
cm->cmsg_len = CMSG_LEN(sizeof(tdeliver));
memcpy(CMSG_DATA(cm), &tdeliver, sizeof(tdeliver));
}
ret = sendmsg(fdt, &msg, 0);
if (ret == -1)
error(1, errno, "write");
if (ret == 0)
error(1, 0, "write: 0B");
}
static void do_recv_one(int fdr, struct timed_send *ts)
{
int64_t tstop, texpect;
char rbuf[2];
int ret;
ret = recv(fdr, rbuf, sizeof(rbuf), 0);
if (ret == -1 && errno == EAGAIN)
error(1, EAGAIN, "recv: timeout");
if (ret == -1)
error(1, errno, "read");
if (ret != 1)
error(1, 0, "read: %dB", ret);
tstop = (gettime_ns(cfg_clockid) - glob_tstart) / 1000;
texpect = ts->delay_us >= 0 ? ts->delay_us : 0;
fprintf(stderr, "payload:%c delay:%lld expected:%lld (us)\n",
rbuf[0], (long long)tstop, (long long)texpect);
if (rbuf[0] != ts->data)
error(1, 0, "payload mismatch. expected %c", ts->data);
if (llabs(tstop - texpect) > cfg_variance_us)
error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
}
static void do_recv_verify_empty(int fdr)
{
char rbuf[1];
int ret;
ret = recv(fdr, rbuf, sizeof(rbuf), 0);
if (ret != -1 || errno != EAGAIN)
error(1, 0, "recv: not empty as expected (%d, %d)", ret, errno);
}
static int do_recv_errqueue_timeout(int fdt)
{
char control[CMSG_SPACE(sizeof(struct sock_extended_err)) +
CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
sizeof(struct udphdr) + 1];
struct sock_extended_err *err;
int ret, num_tstamp = 0;
struct msghdr msg = {0};
struct iovec iov = {0};
struct cmsghdr *cm;
int64_t tstamp = 0;
iov.iov_base = data;
iov.iov_len = sizeof(data);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = control;
msg.msg_controllen = sizeof(control);
while (1) {
const char *reason;
ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
if (ret == -1 && errno == EAGAIN)
break;
if (ret == -1)
error(1, errno, "errqueue");
if (msg.msg_flags != MSG_ERRQUEUE)
error(1, 0, "errqueue: flags 0x%x\n", msg.msg_flags);
cm = CMSG_FIRSTHDR(&msg);
if (cm->cmsg_level != cfg_errq_level ||
cm->cmsg_type != cfg_errq_type)
error(1, 0, "errqueue: type 0x%x.0x%x\n",
cm->cmsg_level, cm->cmsg_type);
err = (struct sock_extended_err *)CMSG_DATA(cm);
if (err->ee_origin != SO_EE_ORIGIN_TXTIME)
error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin);
switch (err->ee_errno) {
case ECANCELED:
if (err->ee_code != SO_EE_CODE_TXTIME_MISSED)
error(1, 0, "errqueue: unknown ECANCELED %u\n",
err->ee_code);
reason = "missed txtime";
break;
case EINVAL:
if (err->ee_code != SO_EE_CODE_TXTIME_INVALID_PARAM)
error(1, 0, "errqueue: unknown EINVAL %u\n",
err->ee_code);
reason = "invalid txtime";
break;
default:
error(1, 0, "errqueue: errno %u code %u\n",
err->ee_errno, err->ee_code);
}
tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info;
tstamp -= (int64_t) glob_tstart;
tstamp /= 1000 * 1000;
fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped: %s\n",
data[ret - 1], tstamp, reason);
msg.msg_flags = 0;
msg.msg_controllen = sizeof(control);
num_tstamp++;
}
return num_tstamp;
}
static void recv_errqueue_msgs(int fdt)
{
struct pollfd pfd = { .fd = fdt, .events = POLLERR };
const int timeout_ms = 10;
int ret, num_tstamp = 0;
do {
ret = poll(&pfd, 1, timeout_ms);
if (ret == -1)
error(1, errno, "poll");
if (ret && (pfd.revents & POLLERR))
num_tstamp += do_recv_errqueue_timeout(fdt);
if (num_tstamp == cfg_num_pkt)
break;
} while (gettime_ns(cfg_clockid) < tdeliver_max);
}
static void start_time_wait(void)
{
uint64_t now;
int err;
if (!cfg_start_time_ns)
return;
now = gettime_ns(CLOCK_REALTIME);
if (cfg_start_time_ns < now)
return;
err = usleep((cfg_start_time_ns - now) / 1000);
if (err)
error(1, errno, "usleep");
}
static void setsockopt_txtime(int fd)
{
struct sock_txtime so_txtime_val = { .clockid = cfg_clockid };
struct sock_txtime so_txtime_val_read = { 0 };
socklen_t vallen = sizeof(so_txtime_val);
so_txtime_val.flags = SOF_TXTIME_REPORT_ERRORS;
if (setsockopt(fd, SOL_SOCKET, SO_TXTIME,
&so_txtime_val, sizeof(so_txtime_val)))
error(1, errno, "setsockopt txtime");
if (getsockopt(fd, SOL_SOCKET, SO_TXTIME,
&so_txtime_val_read, &vallen))
error(1, errno, "getsockopt txtime");
if (vallen != sizeof(so_txtime_val) ||
memcmp(&so_txtime_val, &so_txtime_val_read, vallen))
error(1, 0, "getsockopt txtime: mismatch");
}
static int setup_tx(struct sockaddr *addr, socklen_t alen)
{
int fd;
fd = socket(addr->sa_family, SOCK_DGRAM, 0);
if (fd == -1)
error(1, errno, "socket t");
if (connect(fd, addr, alen))
error(1, errno, "connect");
setsockopt_txtime(fd);
if (cfg_mark &&
setsockopt(fd, SOL_SOCKET, SO_MARK, &cfg_mark, sizeof(cfg_mark)))
error(1, errno, "setsockopt mark");
return fd;
}
static int setup_rx(struct sockaddr *addr, socklen_t alen)
{
struct timeval tv = { .tv_usec = 100 * 1000 };
int fd;
fd = socket(addr->sa_family, SOCK_DGRAM, 0);
if (fd == -1)
error(1, errno, "socket r");
if (bind(fd, addr, alen))
error(1, errno, "bind");
if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))
error(1, errno, "setsockopt rcv timeout");
return fd;
}
static void do_test_tx(struct sockaddr *addr, socklen_t alen)
{
int fdt, i;
fprintf(stderr, "\nSO_TXTIME ipv%c clock %s\n",
addr->sa_family == PF_INET ? '4' : '6',
cfg_clockid == CLOCK_TAI ? "tai" : "monotonic");
fdt = setup_tx(addr, alen);
start_time_wait();
glob_tstart = gettime_ns(cfg_clockid);
for (i = 0; i < cfg_num_pkt; i++)
do_send_one(fdt, &cfg_buf[i]);
recv_errqueue_msgs(fdt);
if (close(fdt))
error(1, errno, "close t");
}
static void do_test_rx(struct sockaddr *addr, socklen_t alen)
{
int fdr, i;
fdr = setup_rx(addr, alen);
start_time_wait();
glob_tstart = gettime_ns(cfg_clockid);
for (i = 0; i < cfg_num_pkt; i++)
do_recv_one(fdr, &cfg_buf[i]);
do_recv_verify_empty(fdr);
if (close(fdr))
error(1, errno, "close r");
}
static void setup_sockaddr(int domain, const char *str_addr,
struct sockaddr_storage *sockaddr)
{
struct sockaddr_in6 *addr6 = (void *) sockaddr;
struct sockaddr_in *addr4 = (void *) sockaddr;
switch (domain) {
case PF_INET:
memset(addr4, 0, sizeof(*addr4));
addr4->sin_family = AF_INET;
addr4->sin_port = htons(cfg_port);
if (str_addr &&
inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
error(1, 0, "ipv4 parse error: %s", str_addr);
break;
case PF_INET6:
memset(addr6, 0, sizeof(*addr6));
addr6->sin6_family = AF_INET6;
addr6->sin6_port = htons(cfg_port);
if (str_addr &&
inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
error(1, 0, "ipv6 parse error: %s", str_addr);
break;
}
}
static int parse_io(const char *optarg, struct timed_send *array)
{
char *arg, *tok;
int aoff = 0;
arg = strdup(optarg);
if (!arg)
error(1, errno, "strdup");
while ((tok = strtok(arg, ","))) {
arg = NULL; /* only pass non-zero on first call */
if (aoff / 2 == MAX_NUM_PKT)
error(1, 0, "exceeds max pkt count (%d)", MAX_NUM_PKT);
if (aoff & 1) { /* parse delay */
array->delay_us = strtol(tok, NULL, 0) * 1000;
array++;
} else { /* parse character */
array->data = tok[0];
}
aoff++;
}
free(arg);
return aoff / 2;
}
static void usage(const char *progname)
{
fprintf(stderr, "\nUsage: %s [options] <payload>\n"
"Options:\n"
" -4 only IPv4\n"
" -6 only IPv6\n"
" -c <clock> monotonic or tai (default)\n"
" -D <addr> destination IP address (server)\n"
" -S <addr> source IP address (client)\n"
" -r run rx mode\n"
" -t <nsec> start time (UTC nanoseconds)\n"
" -m <mark> socket mark\n"
"\n",
progname);
exit(1);
}
static void parse_opts(int argc, char **argv)
{
char *daddr = NULL, *saddr = NULL;
int domain = PF_UNSPEC;
int c;
while ((c = getopt(argc, argv, "46c:S:D:rt:m:")) != -1) {
switch (c) {
case '4':
if (domain != PF_UNSPEC)
error(1, 0, "Pass one of -4 or -6");
domain = PF_INET;
cfg_alen = sizeof(struct sockaddr_in);
cfg_errq_level = SOL_IP;
cfg_errq_type = IP_RECVERR;
break;
case '6':
if (domain != PF_UNSPEC)
error(1, 0, "Pass one of -4 or -6");
domain = PF_INET6;
cfg_alen = sizeof(struct sockaddr_in6);
cfg_errq_level = SOL_IPV6;
cfg_errq_type = IPV6_RECVERR;
break;
case 'c':
if (!strcmp(optarg, "tai"))
cfg_clockid = CLOCK_TAI;
else if (!strcmp(optarg, "monotonic") ||
!strcmp(optarg, "mono"))
cfg_clockid = CLOCK_MONOTONIC;
else
error(1, 0, "unknown clock id %s", optarg);
break;
case 'S':
saddr = optarg;
break;
case 'D':
daddr = optarg;
break;
case 'r':
cfg_rx = true;
break;
case 't':
cfg_start_time_ns = strtoll(optarg, NULL, 0);
break;
case 'm':
cfg_mark = strtol(optarg, NULL, 0);
break;
default:
usage(argv[0]);
}
}
if (argc - optind != 1)
usage(argv[0]);
if (domain == PF_UNSPEC)
error(1, 0, "Pass one of -4 or -6");
if (!daddr)
error(1, 0, "-D <server addr> required\n");
if (!cfg_rx && !saddr)
error(1, 0, "-S <client addr> required\n");
setup_sockaddr(domain, daddr, &cfg_dst_addr);
setup_sockaddr(domain, saddr, &cfg_src_addr);
cfg_num_pkt = parse_io(argv[optind], cfg_buf);
}
int main(int argc, char **argv)
{
parse_opts(argc, argv);
if (cfg_rx)
do_test_rx((void *)&cfg_dst_addr, cfg_alen);
else
do_test_tx((void *)&cfg_src_addr, cfg_alen);
return 0;
}
| linux-master | tools/testing/selftests/net/so_txtime.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Check if we can fully utilize 4-tuples for connect().
*
* Rules to bind sockets to the same port when all ephemeral ports are
* exhausted.
*
* 1. if there are TCP_LISTEN sockets on the port, fail to bind.
* 2. if there are sockets without SO_REUSEADDR, fail to bind.
* 3. if SO_REUSEADDR is disabled, fail to bind.
* 4. if SO_REUSEADDR is enabled and SO_REUSEPORT is disabled,
* succeed to bind.
* 5. if SO_REUSEADDR and SO_REUSEPORT are enabled and
* there is no socket having the both options and the same EUID,
* succeed to bind.
* 6. fail to bind.
*
* Author: Kuniyuki Iwashima <[email protected]>
*/
#include <arpa/inet.h>
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#include "../kselftest_harness.h"
struct reuse_opts {
int reuseaddr[2];
int reuseport[2];
};
struct reuse_opts unreusable_opts[12] = {
{{0, 0}, {0, 0}},
{{0, 0}, {0, 1}},
{{0, 0}, {1, 0}},
{{0, 0}, {1, 1}},
{{0, 1}, {0, 0}},
{{0, 1}, {0, 1}},
{{0, 1}, {1, 0}},
{{0, 1}, {1, 1}},
{{1, 0}, {0, 0}},
{{1, 0}, {0, 1}},
{{1, 0}, {1, 0}},
{{1, 0}, {1, 1}},
};
struct reuse_opts reusable_opts[4] = {
{{1, 1}, {0, 0}},
{{1, 1}, {0, 1}},
{{1, 1}, {1, 0}},
{{1, 1}, {1, 1}},
};
int bind_port(struct __test_metadata *_metadata, int reuseaddr, int reuseport)
{
struct sockaddr_in local_addr;
int len = sizeof(local_addr);
int fd, ret;
fd = socket(AF_INET, SOCK_STREAM, 0);
ASSERT_NE(-1, fd) TH_LOG("failed to open socket.");
ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr, sizeof(int));
ASSERT_EQ(0, ret) TH_LOG("failed to setsockopt: SO_REUSEADDR.");
ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &reuseport, sizeof(int));
ASSERT_EQ(0, ret) TH_LOG("failed to setsockopt: SO_REUSEPORT.");
local_addr.sin_family = AF_INET;
local_addr.sin_addr.s_addr = inet_addr("127.0.0.1");
local_addr.sin_port = 0;
if (bind(fd, (struct sockaddr *)&local_addr, len) == -1) {
close(fd);
return -1;
}
return fd;
}
TEST(reuseaddr_ports_exhausted_unreusable)
{
struct reuse_opts *opts;
int i, j, fd[2];
for (i = 0; i < 12; i++) {
opts = &unreusable_opts[i];
for (j = 0; j < 2; j++)
fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]);
ASSERT_NE(-1, fd[0]) TH_LOG("failed to bind.");
EXPECT_EQ(-1, fd[1]) TH_LOG("should fail to bind.");
for (j = 0; j < 2; j++)
if (fd[j] != -1)
close(fd[j]);
}
}
TEST(reuseaddr_ports_exhausted_reusable_same_euid)
{
struct reuse_opts *opts;
int i, j, fd[2];
for (i = 0; i < 4; i++) {
opts = &reusable_opts[i];
for (j = 0; j < 2; j++)
fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]);
ASSERT_NE(-1, fd[0]) TH_LOG("failed to bind.");
if (opts->reuseport[0] && opts->reuseport[1]) {
EXPECT_EQ(-1, fd[1]) TH_LOG("should fail to bind because both sockets succeed to be listened.");
} else {
EXPECT_NE(-1, fd[1]) TH_LOG("should succeed to bind to connect to different destinations.");
}
for (j = 0; j < 2; j++)
if (fd[j] != -1)
close(fd[j]);
}
}
TEST(reuseaddr_ports_exhausted_reusable_different_euid)
{
struct reuse_opts *opts;
int i, j, ret, fd[2];
uid_t euid[2] = {10, 20};
for (i = 0; i < 4; i++) {
opts = &reusable_opts[i];
for (j = 0; j < 2; j++) {
ret = seteuid(euid[j]);
ASSERT_EQ(0, ret) TH_LOG("failed to seteuid: %d.", euid[j]);
fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]);
ret = seteuid(0);
ASSERT_EQ(0, ret) TH_LOG("failed to seteuid: 0.");
}
ASSERT_NE(-1, fd[0]) TH_LOG("failed to bind.");
EXPECT_NE(-1, fd[1]) TH_LOG("should succeed to bind because one socket can be bound in each euid.");
if (fd[1] != -1) {
ret = listen(fd[0], 5);
ASSERT_EQ(0, ret) TH_LOG("failed to listen.");
ret = listen(fd[1], 5);
EXPECT_EQ(-1, ret) TH_LOG("should fail to listen because only one uid reserves the port in TCP_LISTEN.");
}
for (j = 0; j < 2; j++)
if (fd[j] != -1)
close(fd[j]);
}
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/net/reuseaddr_ports_exhausted.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <netinet/in.h>
#include <arpa/inet.h>
static void set_addr(struct sockaddr_storage *ss, char *ip, char *port, int *len)
{
if (ss->ss_family == AF_INET) {
struct sockaddr_in *a = (struct sockaddr_in *)ss;
a->sin_addr.s_addr = inet_addr(ip);
a->sin_port = htons(atoi(port));
*len = sizeof(*a);
} else {
struct sockaddr_in6 *a = (struct sockaddr_in6 *)ss;
a->sin6_family = AF_INET6;
inet_pton(AF_INET6, ip, &a->sin6_addr);
a->sin6_port = htons(atoi(port));
*len = sizeof(*a);
}
}
static int do_client(int argc, char *argv[])
{
struct sockaddr_storage ss;
char buf[] = "hello";
int csk, ret, len;
if (argc < 5) {
printf("%s client -4|6 IP PORT [IP PORT]\n", argv[0]);
return -1;
}
bzero((void *)&ss, sizeof(ss));
ss.ss_family = !strcmp(argv[2], "-4") ? AF_INET : AF_INET6;
csk = socket(ss.ss_family, SOCK_STREAM, IPPROTO_SCTP);
if (csk < 0) {
printf("failed to create socket\n");
return -1;
}
if (argc >= 7) {
set_addr(&ss, argv[5], argv[6], &len);
ret = bind(csk, (struct sockaddr *)&ss, len);
if (ret < 0) {
printf("failed to bind to address\n");
return -1;
}
}
set_addr(&ss, argv[3], argv[4], &len);
ret = connect(csk, (struct sockaddr *)&ss, len);
if (ret < 0) {
printf("failed to connect to peer\n");
return -1;
}
ret = send(csk, buf, strlen(buf) + 1, 0);
if (ret < 0) {
printf("failed to send msg %d\n", ret);
return -1;
}
close(csk);
return 0;
}
int main(int argc, char *argv[])
{
struct sockaddr_storage ss;
int lsk, csk, ret, len;
char buf[20];
if (argc < 2 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) {
printf("%s server|client ...\n", argv[0]);
return -1;
}
if (!strcmp(argv[1], "client"))
return do_client(argc, argv);
if (argc < 5) {
printf("%s server -4|6 IP PORT [IFACE]\n", argv[0]);
return -1;
}
ss.ss_family = !strcmp(argv[2], "-4") ? AF_INET : AF_INET6;
lsk = socket(ss.ss_family, SOCK_STREAM, IPPROTO_SCTP);
if (lsk < 0) {
printf("failed to create lsk\n");
return -1;
}
if (argc >= 6) {
ret = setsockopt(lsk, SOL_SOCKET, SO_BINDTODEVICE,
argv[5], strlen(argv[5]) + 1);
if (ret < 0) {
printf("failed to bind to device\n");
return -1;
}
}
set_addr(&ss, argv[3], argv[4], &len);
ret = bind(lsk, (struct sockaddr *)&ss, len);
if (ret < 0) {
printf("failed to bind to address\n");
return -1;
}
ret = listen(lsk, 5);
if (ret < 0) {
printf("failed to listen on port\n");
return -1;
}
csk = accept(lsk, (struct sockaddr *)NULL, (socklen_t *)NULL);
if (csk < 0) {
printf("failed to accept new client\n");
return -1;
}
ret = recv(csk, buf, sizeof(buf), 0);
if (ret <= 0) {
printf("failed to recv msg %d\n", ret);
return -1;
}
close(csk);
close(lsk);
return 0;
}
| linux-master | tools/testing/selftests/net/sctp_hello.c |
// SPDX-License-Identifier: GPL-2.0
/* Toeplitz test
*
* 1. Read packets and their rx_hash using PF_PACKET/TPACKET_V3
* 2. Compute the rx_hash in software based on the packet contents
* 3. Compare the two
*
* Optionally, either '-C $rx_irq_cpu_list' or '-r $rps_bitmap' may be given.
*
* If '-C $rx_irq_cpu_list' is given, also
*
* 4. Identify the cpu on which the packet arrived with PACKET_FANOUT_CPU
* 5. Compute the rxqueue that RSS would select based on this rx_hash
* 6. Using the $rx_irq_cpu_list map, identify the arriving cpu based on rxq irq
* 7. Compare the cpus from 4 and 6
*
* Else if '-r $rps_bitmap' is given, also
*
* 4. Identify the cpu on which the packet arrived with PACKET_FANOUT_CPU
* 5. Compute the cpu that RPS should select based on rx_hash and $rps_bitmap
* 6. Compare the cpus from 4 and 5
*/
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <errno.h>
#include <error.h>
#include <fcntl.h>
#include <getopt.h>
#include <linux/filter.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <net/if.h>
#include <netdb.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <poll.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/sysinfo.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include "../kselftest.h"
#define TOEPLITZ_KEY_MIN_LEN 40
#define TOEPLITZ_KEY_MAX_LEN 60
#define TOEPLITZ_STR_LEN(K) (((K) * 3) - 1) /* hex encoded: AA:BB:CC:...:ZZ */
#define TOEPLITZ_STR_MIN_LEN TOEPLITZ_STR_LEN(TOEPLITZ_KEY_MIN_LEN)
#define TOEPLITZ_STR_MAX_LEN TOEPLITZ_STR_LEN(TOEPLITZ_KEY_MAX_LEN)
#define FOUR_TUPLE_MAX_LEN ((sizeof(struct in6_addr) * 2) + (sizeof(uint16_t) * 2))
#define RSS_MAX_CPUS (1 << 16) /* real constraint is PACKET_FANOUT_MAX */
#define RPS_MAX_CPUS 16UL /* must be a power of 2 */
/* configuration options (cmdline arguments) */
static uint16_t cfg_dport = 8000;
static int cfg_family = AF_INET6;
static char *cfg_ifname = "eth0";
static int cfg_num_queues;
static int cfg_num_rps_cpus;
static bool cfg_sink;
static int cfg_type = SOCK_STREAM;
static int cfg_timeout_msec = 1000;
static bool cfg_verbose;
/* global vars */
static int num_cpus;
static int ring_block_nr;
static int ring_block_sz;
/* stats */
static int frames_received;
static int frames_nohash;
static int frames_error;
#define log_verbose(args...) do { if (cfg_verbose) fprintf(stderr, args); } while (0)
/* tpacket ring */
struct ring_state {
int fd;
char *mmap;
int idx;
int cpu;
};
static unsigned int rx_irq_cpus[RSS_MAX_CPUS]; /* map from rxq to cpu */
static int rps_silo_to_cpu[RPS_MAX_CPUS];
static unsigned char toeplitz_key[TOEPLITZ_KEY_MAX_LEN];
static struct ring_state rings[RSS_MAX_CPUS];
static inline uint32_t toeplitz(const unsigned char *four_tuple,
const unsigned char *key)
{
int i, bit, ret = 0;
uint32_t key32;
key32 = ntohl(*((uint32_t *)key));
key += 4;
for (i = 0; i < FOUR_TUPLE_MAX_LEN; i++) {
for (bit = 7; bit >= 0; bit--) {
if (four_tuple[i] & (1 << bit))
ret ^= key32;
key32 <<= 1;
key32 |= !!(key[0] & (1 << bit));
}
key++;
}
return ret;
}
/* Compare computed cpu with arrival cpu from packet_fanout_cpu */
static void verify_rss(uint32_t rx_hash, int cpu)
{
int queue = rx_hash % cfg_num_queues;
log_verbose(" rxq %d (cpu %d)", queue, rx_irq_cpus[queue]);
if (rx_irq_cpus[queue] != cpu) {
log_verbose(". error: rss cpu mismatch (%d)", cpu);
frames_error++;
}
}
static void verify_rps(uint64_t rx_hash, int cpu)
{
int silo = (rx_hash * cfg_num_rps_cpus) >> 32;
log_verbose(" silo %d (cpu %d)", silo, rps_silo_to_cpu[silo]);
if (rps_silo_to_cpu[silo] != cpu) {
log_verbose(". error: rps cpu mismatch (%d)", cpu);
frames_error++;
}
}
static void log_rxhash(int cpu, uint32_t rx_hash,
const char *addrs, int addr_len)
{
char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
uint16_t *ports;
if (!inet_ntop(cfg_family, addrs, saddr, sizeof(saddr)) ||
!inet_ntop(cfg_family, addrs + addr_len, daddr, sizeof(daddr)))
error(1, 0, "address parse error");
ports = (void *)addrs + (addr_len * 2);
log_verbose("cpu %d: rx_hash 0x%08x [saddr %s daddr %s sport %02hu dport %02hu]",
cpu, rx_hash, saddr, daddr,
ntohs(ports[0]), ntohs(ports[1]));
}
/* Compare computed rxhash with rxhash received from tpacket_v3 */
static void verify_rxhash(const char *pkt, uint32_t rx_hash, int cpu)
{
unsigned char four_tuple[FOUR_TUPLE_MAX_LEN] = {0};
uint32_t rx_hash_sw;
const char *addrs;
int addr_len;
if (cfg_family == AF_INET) {
addr_len = sizeof(struct in_addr);
addrs = pkt + offsetof(struct iphdr, saddr);
} else {
addr_len = sizeof(struct in6_addr);
addrs = pkt + offsetof(struct ip6_hdr, ip6_src);
}
memcpy(four_tuple, addrs, (addr_len * 2) + (sizeof(uint16_t) * 2));
rx_hash_sw = toeplitz(four_tuple, toeplitz_key);
if (cfg_verbose)
log_rxhash(cpu, rx_hash, addrs, addr_len);
if (rx_hash != rx_hash_sw) {
log_verbose(" != expected 0x%x\n", rx_hash_sw);
frames_error++;
return;
}
log_verbose(" OK");
if (cfg_num_queues)
verify_rss(rx_hash, cpu);
else if (cfg_num_rps_cpus)
verify_rps(rx_hash, cpu);
log_verbose("\n");
}
static char *recv_frame(const struct ring_state *ring, char *frame)
{
struct tpacket3_hdr *hdr = (void *)frame;
if (hdr->hv1.tp_rxhash)
verify_rxhash(frame + hdr->tp_net, hdr->hv1.tp_rxhash,
ring->cpu);
else
frames_nohash++;
return frame + hdr->tp_next_offset;
}
/* A single TPACKET_V3 block can hold multiple frames */
static bool recv_block(struct ring_state *ring)
{
struct tpacket_block_desc *block;
char *frame;
int i;
block = (void *)(ring->mmap + ring->idx * ring_block_sz);
if (!(block->hdr.bh1.block_status & TP_STATUS_USER))
return false;
frame = (char *)block;
frame += block->hdr.bh1.offset_to_first_pkt;
for (i = 0; i < block->hdr.bh1.num_pkts; i++) {
frame = recv_frame(ring, frame);
frames_received++;
}
block->hdr.bh1.block_status = TP_STATUS_KERNEL;
ring->idx = (ring->idx + 1) % ring_block_nr;
return true;
}
/* simple test: sleep once unconditionally and then process all rings */
static void process_rings(void)
{
int i;
usleep(1000 * cfg_timeout_msec);
for (i = 0; i < num_cpus; i++)
do {} while (recv_block(&rings[i]));
fprintf(stderr, "count: pass=%u nohash=%u fail=%u\n",
frames_received - frames_nohash - frames_error,
frames_nohash, frames_error);
}
static char *setup_ring(int fd)
{
struct tpacket_req3 req3 = {0};
void *ring;
req3.tp_retire_blk_tov = cfg_timeout_msec / 8;
req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
req3.tp_frame_size = 2048;
req3.tp_frame_nr = 1 << 10;
req3.tp_block_nr = 16;
req3.tp_block_size = req3.tp_frame_size * req3.tp_frame_nr;
req3.tp_block_size /= req3.tp_block_nr;
if (setsockopt(fd, SOL_PACKET, PACKET_RX_RING, &req3, sizeof(req3)))
error(1, errno, "setsockopt PACKET_RX_RING");
ring_block_sz = req3.tp_block_size;
ring_block_nr = req3.tp_block_nr;
ring = mmap(0, req3.tp_block_size * req3.tp_block_nr,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_LOCKED | MAP_POPULATE, fd, 0);
if (ring == MAP_FAILED)
error(1, 0, "mmap failed");
return ring;
}
static void __set_filter(int fd, int off_proto, uint8_t proto, int off_dport)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD + BPF_B + BPF_ABS, SKF_AD_OFF + SKF_AD_PKTTYPE),
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, PACKET_HOST, 0, 4),
BPF_STMT(BPF_LD + BPF_B + BPF_ABS, off_proto),
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, proto, 0, 2),
BPF_STMT(BPF_LD + BPF_H + BPF_ABS, off_dport),
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, cfg_dport, 1, 0),
BPF_STMT(BPF_RET + BPF_K, 0),
BPF_STMT(BPF_RET + BPF_K, 0xFFFF),
};
struct sock_fprog prog = {};
prog.filter = filter;
prog.len = ARRAY_SIZE(filter);
if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &prog, sizeof(prog)))
error(1, errno, "setsockopt filter");
}
/* filter on transport protocol and destination port */
static void set_filter(int fd)
{
const int off_dport = offsetof(struct tcphdr, dest); /* same for udp */
uint8_t proto;
proto = cfg_type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
if (cfg_family == AF_INET)
__set_filter(fd, offsetof(struct iphdr, protocol), proto,
sizeof(struct iphdr) + off_dport);
else
__set_filter(fd, offsetof(struct ip6_hdr, ip6_nxt), proto,
sizeof(struct ip6_hdr) + off_dport);
}
/* drop everything: used temporarily during setup */
static void set_filter_null(int fd)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET + BPF_K, 0),
};
struct sock_fprog prog = {};
prog.filter = filter;
prog.len = ARRAY_SIZE(filter);
if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &prog, sizeof(prog)))
error(1, errno, "setsockopt filter");
}
static int create_ring(char **ring)
{
struct fanout_args args = {
.id = 1,
.type_flags = PACKET_FANOUT_CPU,
.max_num_members = RSS_MAX_CPUS
};
struct sockaddr_ll ll = { 0 };
int fd, val;
fd = socket(PF_PACKET, SOCK_DGRAM, 0);
if (fd == -1)
error(1, errno, "socket creation failed");
val = TPACKET_V3;
if (setsockopt(fd, SOL_PACKET, PACKET_VERSION, &val, sizeof(val)))
error(1, errno, "setsockopt PACKET_VERSION");
*ring = setup_ring(fd);
/* block packets until all rings are added to the fanout group:
* else packets can arrive during setup and get misclassified
*/
set_filter_null(fd);
ll.sll_family = AF_PACKET;
ll.sll_ifindex = if_nametoindex(cfg_ifname);
ll.sll_protocol = cfg_family == AF_INET ? htons(ETH_P_IP) :
htons(ETH_P_IPV6);
if (bind(fd, (void *)&ll, sizeof(ll)))
error(1, errno, "bind");
/* must come after bind: verifies all programs in group match */
if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT, &args, sizeof(args))) {
/* on failure, retry using old API if that is sufficient:
* it has a hard limit of 256 sockets, so only try if
* (a) only testing rxhash, not RSS or (b) <= 256 cpus.
* in this API, the third argument is left implicit.
*/
if (cfg_num_queues || num_cpus > 256 ||
setsockopt(fd, SOL_PACKET, PACKET_FANOUT,
&args, sizeof(uint32_t)))
error(1, errno, "setsockopt PACKET_FANOUT cpu");
}
return fd;
}
/* setup inet(6) socket to blackhole the test traffic, if arg '-s' */
static int setup_sink(void)
{
int fd, val;
fd = socket(cfg_family, cfg_type, 0);
if (fd == -1)
error(1, errno, "socket %d.%d", cfg_family, cfg_type);
val = 1 << 20;
if (setsockopt(fd, SOL_SOCKET, SO_RCVBUFFORCE, &val, sizeof(val)))
error(1, errno, "setsockopt rcvbuf");
return fd;
}
static void setup_rings(void)
{
int i;
for (i = 0; i < num_cpus; i++) {
rings[i].cpu = i;
rings[i].fd = create_ring(&rings[i].mmap);
}
/* accept packets once all rings in the fanout group are up */
for (i = 0; i < num_cpus; i++)
set_filter(rings[i].fd);
}
static void cleanup_rings(void)
{
int i;
for (i = 0; i < num_cpus; i++) {
if (munmap(rings[i].mmap, ring_block_nr * ring_block_sz))
error(1, errno, "munmap");
if (close(rings[i].fd))
error(1, errno, "close");
}
}
static void parse_cpulist(const char *arg)
{
do {
rx_irq_cpus[cfg_num_queues++] = strtol(arg, NULL, 10);
arg = strchr(arg, ',');
if (!arg)
break;
arg++; // skip ','
} while (1);
}
static void show_cpulist(void)
{
int i;
for (i = 0; i < cfg_num_queues; i++)
fprintf(stderr, "rxq %d: cpu %d\n", i, rx_irq_cpus[i]);
}
static void show_silos(void)
{
int i;
for (i = 0; i < cfg_num_rps_cpus; i++)
fprintf(stderr, "silo %d: cpu %d\n", i, rps_silo_to_cpu[i]);
}
static void parse_toeplitz_key(const char *str, int slen, unsigned char *key)
{
int i, ret, off;
if (slen < TOEPLITZ_STR_MIN_LEN ||
slen > TOEPLITZ_STR_MAX_LEN + 1)
error(1, 0, "invalid toeplitz key");
for (i = 0, off = 0; off < slen; i++, off += 3) {
ret = sscanf(str + off, "%hhx", &key[i]);
if (ret != 1)
error(1, 0, "key parse error at %d off %d len %d",
i, off, slen);
}
}
static void parse_rps_bitmap(const char *arg)
{
unsigned long bitmap;
int i;
bitmap = strtoul(arg, NULL, 0);
if (bitmap & ~(RPS_MAX_CPUS - 1))
error(1, 0, "rps bitmap 0x%lx out of bounds 0..%lu",
bitmap, RPS_MAX_CPUS - 1);
for (i = 0; i < RPS_MAX_CPUS; i++)
if (bitmap & 1UL << i)
rps_silo_to_cpu[cfg_num_rps_cpus++] = i;
}
static void parse_opts(int argc, char **argv)
{
static struct option long_options[] = {
{"dport", required_argument, 0, 'd'},
{"cpus", required_argument, 0, 'C'},
{"key", required_argument, 0, 'k'},
{"iface", required_argument, 0, 'i'},
{"ipv4", no_argument, 0, '4'},
{"ipv6", no_argument, 0, '6'},
{"sink", no_argument, 0, 's'},
{"tcp", no_argument, 0, 't'},
{"timeout", required_argument, 0, 'T'},
{"udp", no_argument, 0, 'u'},
{"verbose", no_argument, 0, 'v'},
{"rps", required_argument, 0, 'r'},
{0, 0, 0, 0}
};
bool have_toeplitz = false;
int index, c;
while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:uv", long_options, &index)) != -1) {
switch (c) {
case '4':
cfg_family = AF_INET;
break;
case '6':
cfg_family = AF_INET6;
break;
case 'C':
parse_cpulist(optarg);
break;
case 'd':
cfg_dport = strtol(optarg, NULL, 0);
break;
case 'i':
cfg_ifname = optarg;
break;
case 'k':
parse_toeplitz_key(optarg, strlen(optarg),
toeplitz_key);
have_toeplitz = true;
break;
case 'r':
parse_rps_bitmap(optarg);
break;
case 's':
cfg_sink = true;
break;
case 't':
cfg_type = SOCK_STREAM;
break;
case 'T':
cfg_timeout_msec = strtol(optarg, NULL, 0);
break;
case 'u':
cfg_type = SOCK_DGRAM;
break;
case 'v':
cfg_verbose = true;
break;
default:
error(1, 0, "unknown option %c", optopt);
break;
}
}
if (!have_toeplitz)
error(1, 0, "Must supply rss key ('-k')");
num_cpus = get_nprocs();
if (num_cpus > RSS_MAX_CPUS)
error(1, 0, "increase RSS_MAX_CPUS");
if (cfg_num_queues && cfg_num_rps_cpus)
error(1, 0,
"Can't supply both RSS cpus ('-C') and RPS map ('-r')");
if (cfg_verbose) {
show_cpulist();
show_silos();
}
}
int main(int argc, char **argv)
{
const int min_tests = 10;
int fd_sink = -1;
parse_opts(argc, argv);
if (cfg_sink)
fd_sink = setup_sink();
setup_rings();
process_rings();
cleanup_rings();
if (cfg_sink && close(fd_sink))
error(1, errno, "close sink");
if (frames_received - frames_nohash < min_tests)
error(1, 0, "too few frames for verification");
return frames_error;
}
| linux-master | tools/testing/selftests/net/toeplitz.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <errno.h>
#include <error.h>
#include <linux/errqueue.h>
#include <linux/net_tstamp.h>
#include <netinet/if_ether.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/udp.h>
#include <poll.h>
#include <sched.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/poll.h>
#include <sys/types.h>
#include <unistd.h>
#include "../kselftest.h"
#ifndef ETH_MAX_MTU
#define ETH_MAX_MTU 0xFFFFU
#endif
#ifndef UDP_SEGMENT
#define UDP_SEGMENT 103
#endif
#ifndef SO_ZEROCOPY
#define SO_ZEROCOPY 60
#endif
#ifndef SO_EE_ORIGIN_ZEROCOPY
#define SO_EE_ORIGIN_ZEROCOPY 5
#endif
#ifndef MSG_ZEROCOPY
#define MSG_ZEROCOPY 0x4000000
#endif
#ifndef ENOTSUPP
#define ENOTSUPP 524
#endif
#define NUM_PKT 100
static bool cfg_cache_trash;
static int cfg_cpu = -1;
static int cfg_connected = true;
static int cfg_family = PF_UNSPEC;
static uint16_t cfg_mss;
static int cfg_payload_len = (1472 * 42);
static int cfg_port = 8000;
static int cfg_runtime_ms = -1;
static bool cfg_poll;
static int cfg_poll_loop_timeout_ms = 2000;
static bool cfg_segment;
static bool cfg_sendmmsg;
static bool cfg_tcp;
static uint32_t cfg_tx_ts = SOF_TIMESTAMPING_TX_SOFTWARE;
static bool cfg_tx_tstamp;
static bool cfg_audit;
static bool cfg_verbose;
static bool cfg_zerocopy;
static int cfg_msg_nr;
static uint16_t cfg_gso_size;
static unsigned long total_num_msgs;
static unsigned long total_num_sends;
static unsigned long stat_tx_ts;
static unsigned long stat_tx_ts_errors;
static unsigned long tstart;
static unsigned long tend;
static unsigned long stat_zcopies;
static socklen_t cfg_alen;
static struct sockaddr_storage cfg_dst_addr;
static bool interrupted;
static char buf[NUM_PKT][ETH_MAX_MTU];
static void sigint_handler(int signum)
{
if (signum == SIGINT)
interrupted = true;
}
static unsigned long gettimeofday_ms(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
}
static int set_cpu(int cpu)
{
cpu_set_t mask;
CPU_ZERO(&mask);
CPU_SET(cpu, &mask);
if (sched_setaffinity(0, sizeof(mask), &mask))
error(1, 0, "setaffinity %d", cpu);
return 0;
}
static void setup_sockaddr(int domain, const char *str_addr, void *sockaddr)
{
struct sockaddr_in6 *addr6 = (void *) sockaddr;
struct sockaddr_in *addr4 = (void *) sockaddr;
switch (domain) {
case PF_INET:
addr4->sin_family = AF_INET;
addr4->sin_port = htons(cfg_port);
if (inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
error(1, 0, "ipv4 parse error: %s", str_addr);
break;
case PF_INET6:
addr6->sin6_family = AF_INET6;
addr6->sin6_port = htons(cfg_port);
if (inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
error(1, 0, "ipv6 parse error: %s", str_addr);
break;
default:
error(1, 0, "illegal domain");
}
}
static void flush_cmsg(struct cmsghdr *cmsg)
{
struct sock_extended_err *err;
struct scm_timestamping *tss;
__u32 lo;
__u32 hi;
int i;
switch (cmsg->cmsg_level) {
case SOL_SOCKET:
if (cmsg->cmsg_type == SO_TIMESTAMPING) {
i = (cfg_tx_ts == SOF_TIMESTAMPING_TX_HARDWARE) ? 2 : 0;
tss = (struct scm_timestamping *)CMSG_DATA(cmsg);
if (tss->ts[i].tv_sec == 0)
stat_tx_ts_errors++;
} else {
error(1, 0, "unknown SOL_SOCKET cmsg type=%u\n",
cmsg->cmsg_type);
}
break;
case SOL_IP:
case SOL_IPV6:
switch (cmsg->cmsg_type) {
case IP_RECVERR:
case IPV6_RECVERR:
{
err = (struct sock_extended_err *)CMSG_DATA(cmsg);
switch (err->ee_origin) {
case SO_EE_ORIGIN_TIMESTAMPING:
/* Got a TX timestamp from error queue */
stat_tx_ts++;
break;
case SO_EE_ORIGIN_ICMP:
case SO_EE_ORIGIN_ICMP6:
if (cfg_verbose)
fprintf(stderr,
"received ICMP error: type=%u, code=%u\n",
err->ee_type, err->ee_code);
break;
case SO_EE_ORIGIN_ZEROCOPY:
{
lo = err->ee_info;
hi = err->ee_data;
/* range of IDs acknowledged */
stat_zcopies += hi - lo + 1;
break;
}
case SO_EE_ORIGIN_LOCAL:
if (cfg_verbose)
fprintf(stderr,
"received packet with local origin: %u\n",
err->ee_origin);
break;
default:
error(0, 1, "received packet with origin: %u",
err->ee_origin);
}
break;
}
default:
error(0, 1, "unknown IP msg type=%u\n",
cmsg->cmsg_type);
break;
}
break;
default:
error(0, 1, "unknown cmsg level=%u\n",
cmsg->cmsg_level);
}
}
static void flush_errqueue_recv(int fd)
{
char control[CMSG_SPACE(sizeof(struct scm_timestamping)) +
CMSG_SPACE(sizeof(struct sock_extended_err)) +
CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
struct msghdr msg = {0};
struct cmsghdr *cmsg;
int ret;
while (1) {
msg.msg_control = control;
msg.msg_controllen = sizeof(control);
ret = recvmsg(fd, &msg, MSG_ERRQUEUE);
if (ret == -1 && errno == EAGAIN)
break;
if (ret == -1)
error(1, errno, "errqueue");
if (msg.msg_flags != MSG_ERRQUEUE)
error(1, 0, "errqueue: flags 0x%x\n", msg.msg_flags);
if (cfg_audit) {
for (cmsg = CMSG_FIRSTHDR(&msg);
cmsg;
cmsg = CMSG_NXTHDR(&msg, cmsg))
flush_cmsg(cmsg);
}
msg.msg_flags = 0;
}
}
static void flush_errqueue(int fd, const bool do_poll,
unsigned long poll_timeout, const bool poll_err)
{
if (do_poll) {
struct pollfd fds = {0};
int ret;
fds.fd = fd;
ret = poll(&fds, 1, poll_timeout);
if (ret == 0) {
if ((cfg_verbose) && (poll_err))
fprintf(stderr, "poll timeout\n");
} else if (ret < 0) {
error(1, errno, "poll");
}
}
flush_errqueue_recv(fd);
}
static void flush_errqueue_retry(int fd, unsigned long num_sends)
{
unsigned long tnow, tstop;
bool first_try = true;
tnow = gettimeofday_ms();
tstop = tnow + cfg_poll_loop_timeout_ms;
do {
flush_errqueue(fd, true, tstop - tnow, first_try);
first_try = false;
tnow = gettimeofday_ms();
} while ((stat_zcopies != num_sends) && (tnow < tstop));
}
static int send_tcp(int fd, char *data)
{
int ret, done = 0, count = 0;
while (done < cfg_payload_len) {
ret = send(fd, data + done, cfg_payload_len - done,
cfg_zerocopy ? MSG_ZEROCOPY : 0);
if (ret == -1)
error(1, errno, "write");
done += ret;
count++;
}
return count;
}
static int send_udp(int fd, char *data)
{
int ret, total_len, len, count = 0;
total_len = cfg_payload_len;
while (total_len) {
len = total_len < cfg_mss ? total_len : cfg_mss;
ret = sendto(fd, data, len, cfg_zerocopy ? MSG_ZEROCOPY : 0,
cfg_connected ? NULL : (void *)&cfg_dst_addr,
cfg_connected ? 0 : cfg_alen);
if (ret == -1)
error(1, errno, "write");
if (ret != len)
error(1, errno, "write: %uB != %uB\n", ret, len);
total_len -= len;
count++;
}
return count;
}
static void send_ts_cmsg(struct cmsghdr *cm)
{
uint32_t *valp;
cm->cmsg_level = SOL_SOCKET;
cm->cmsg_type = SO_TIMESTAMPING;
cm->cmsg_len = CMSG_LEN(sizeof(cfg_tx_ts));
valp = (void *)CMSG_DATA(cm);
*valp = cfg_tx_ts;
}
static int send_udp_sendmmsg(int fd, char *data)
{
char control[CMSG_SPACE(sizeof(cfg_tx_ts))] = {0};
const int max_nr_msg = ETH_MAX_MTU / ETH_DATA_LEN;
struct mmsghdr mmsgs[max_nr_msg];
struct iovec iov[max_nr_msg];
unsigned int off = 0, left;
size_t msg_controllen = 0;
int i = 0, ret;
memset(mmsgs, 0, sizeof(mmsgs));
if (cfg_tx_tstamp) {
struct msghdr msg = {0};
struct cmsghdr *cmsg;
msg.msg_control = control;
msg.msg_controllen = sizeof(control);
cmsg = CMSG_FIRSTHDR(&msg);
send_ts_cmsg(cmsg);
msg_controllen += CMSG_SPACE(sizeof(cfg_tx_ts));
}
left = cfg_payload_len;
while (left) {
if (i == max_nr_msg)
error(1, 0, "sendmmsg: exceeds max_nr_msg");
iov[i].iov_base = data + off;
iov[i].iov_len = cfg_mss < left ? cfg_mss : left;
mmsgs[i].msg_hdr.msg_iov = iov + i;
mmsgs[i].msg_hdr.msg_iovlen = 1;
mmsgs[i].msg_hdr.msg_name = (void *)&cfg_dst_addr;
mmsgs[i].msg_hdr.msg_namelen = cfg_alen;
if (msg_controllen) {
mmsgs[i].msg_hdr.msg_control = control;
mmsgs[i].msg_hdr.msg_controllen = msg_controllen;
}
off += iov[i].iov_len;
left -= iov[i].iov_len;
i++;
}
ret = sendmmsg(fd, mmsgs, i, cfg_zerocopy ? MSG_ZEROCOPY : 0);
if (ret == -1)
error(1, errno, "sendmmsg");
return ret;
}
static void send_udp_segment_cmsg(struct cmsghdr *cm)
{
uint16_t *valp;
cm->cmsg_level = SOL_UDP;
cm->cmsg_type = UDP_SEGMENT;
cm->cmsg_len = CMSG_LEN(sizeof(cfg_gso_size));
valp = (void *)CMSG_DATA(cm);
*valp = cfg_gso_size;
}
static int send_udp_segment(int fd, char *data)
{
char control[CMSG_SPACE(sizeof(cfg_gso_size)) +
CMSG_SPACE(sizeof(cfg_tx_ts))] = {0};
struct msghdr msg = {0};
struct iovec iov = {0};
size_t msg_controllen;
struct cmsghdr *cmsg;
int ret;
iov.iov_base = data;
iov.iov_len = cfg_payload_len;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = control;
msg.msg_controllen = sizeof(control);
cmsg = CMSG_FIRSTHDR(&msg);
send_udp_segment_cmsg(cmsg);
msg_controllen = CMSG_SPACE(sizeof(cfg_mss));
if (cfg_tx_tstamp) {
cmsg = CMSG_NXTHDR(&msg, cmsg);
send_ts_cmsg(cmsg);
msg_controllen += CMSG_SPACE(sizeof(cfg_tx_ts));
}
msg.msg_controllen = msg_controllen;
msg.msg_name = (void *)&cfg_dst_addr;
msg.msg_namelen = cfg_alen;
ret = sendmsg(fd, &msg, cfg_zerocopy ? MSG_ZEROCOPY : 0);
if (ret == -1)
error(1, errno, "sendmsg");
if (ret != iov.iov_len)
error(1, 0, "sendmsg: %u != %llu\n", ret,
(unsigned long long)iov.iov_len);
return 1;
}
static void usage(const char *filepath)
{
error(1, 0, "Usage: %s [-46acmHPtTuvz] [-C cpu] [-D dst ip] [-l secs] "
"[-L secs] [-M messagenr] [-p port] [-s sendsize] [-S gsosize]",
filepath);
}
static void parse_opts(int argc, char **argv)
{
const char *bind_addr = NULL;
int max_len, hdrlen;
int c;
while ((c = getopt(argc, argv, "46acC:D:Hl:L:mM:p:s:PS:tTuvz")) != -1) {
switch (c) {
case '4':
if (cfg_family != PF_UNSPEC)
error(1, 0, "Pass one of -4 or -6");
cfg_family = PF_INET;
cfg_alen = sizeof(struct sockaddr_in);
break;
case '6':
if (cfg_family != PF_UNSPEC)
error(1, 0, "Pass one of -4 or -6");
cfg_family = PF_INET6;
cfg_alen = sizeof(struct sockaddr_in6);
break;
case 'a':
cfg_audit = true;
break;
case 'c':
cfg_cache_trash = true;
break;
case 'C':
cfg_cpu = strtol(optarg, NULL, 0);
break;
case 'D':
bind_addr = optarg;
break;
case 'l':
cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000;
break;
case 'L':
cfg_poll_loop_timeout_ms = strtoul(optarg, NULL, 10) * 1000;
break;
case 'm':
cfg_sendmmsg = true;
break;
case 'M':
cfg_msg_nr = strtoul(optarg, NULL, 10);
break;
case 'p':
cfg_port = strtoul(optarg, NULL, 0);
break;
case 'P':
cfg_poll = true;
break;
case 's':
cfg_payload_len = strtoul(optarg, NULL, 0);
break;
case 'S':
cfg_gso_size = strtoul(optarg, NULL, 0);
cfg_segment = true;
break;
case 'H':
cfg_tx_ts = SOF_TIMESTAMPING_TX_HARDWARE;
cfg_tx_tstamp = true;
break;
case 't':
cfg_tcp = true;
break;
case 'T':
cfg_tx_tstamp = true;
break;
case 'u':
cfg_connected = false;
break;
case 'v':
cfg_verbose = true;
break;
case 'z':
cfg_zerocopy = true;
break;
default:
exit(1);
}
}
if (!bind_addr)
bind_addr = cfg_family == PF_INET6 ? "::" : "0.0.0.0";
setup_sockaddr(cfg_family, bind_addr, &cfg_dst_addr);
if (optind != argc)
usage(argv[0]);
if (cfg_family == PF_UNSPEC)
error(1, 0, "must pass one of -4 or -6");
if (cfg_tcp && !cfg_connected)
error(1, 0, "connectionless tcp makes no sense");
if (cfg_segment && cfg_sendmmsg)
error(1, 0, "cannot combine segment offload and sendmmsg");
if (cfg_tx_tstamp && !(cfg_segment || cfg_sendmmsg))
error(1, 0, "Options -T and -H require either -S or -m option");
if (cfg_family == PF_INET)
hdrlen = sizeof(struct iphdr) + sizeof(struct udphdr);
else
hdrlen = sizeof(struct ip6_hdr) + sizeof(struct udphdr);
cfg_mss = ETH_DATA_LEN - hdrlen;
max_len = ETH_MAX_MTU - hdrlen;
if (!cfg_gso_size)
cfg_gso_size = cfg_mss;
if (cfg_payload_len > max_len)
error(1, 0, "payload length %u exceeds max %u",
cfg_payload_len, max_len);
}
static void set_pmtu_discover(int fd, bool is_ipv4)
{
int level, name, val;
if (is_ipv4) {
level = SOL_IP;
name = IP_MTU_DISCOVER;
val = IP_PMTUDISC_DO;
} else {
level = SOL_IPV6;
name = IPV6_MTU_DISCOVER;
val = IPV6_PMTUDISC_DO;
}
if (setsockopt(fd, level, name, &val, sizeof(val)))
error(1, errno, "setsockopt path mtu");
}
static void set_tx_timestamping(int fd)
{
int val = SOF_TIMESTAMPING_OPT_CMSG | SOF_TIMESTAMPING_OPT_ID |
SOF_TIMESTAMPING_OPT_TSONLY;
if (cfg_tx_ts == SOF_TIMESTAMPING_TX_SOFTWARE)
val |= SOF_TIMESTAMPING_SOFTWARE;
else
val |= SOF_TIMESTAMPING_RAW_HARDWARE;
if (setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, &val, sizeof(val)))
error(1, errno, "setsockopt tx timestamping");
}
static void print_audit_report(unsigned long num_msgs, unsigned long num_sends)
{
unsigned long tdelta;
tdelta = tend - tstart;
if (!tdelta)
return;
fprintf(stderr, "Summary over %lu.%03lu seconds...\n",
tdelta / 1000, tdelta % 1000);
fprintf(stderr,
"sum %s tx: %6lu MB/s %10lu calls (%lu/s) %10lu msgs (%lu/s)\n",
cfg_tcp ? "tcp" : "udp",
((num_msgs * cfg_payload_len) >> 10) / tdelta,
num_sends, num_sends * 1000 / tdelta,
num_msgs, num_msgs * 1000 / tdelta);
if (cfg_tx_tstamp) {
if (stat_tx_ts_errors)
error(1, 0,
"Expected clean TX Timestamps: %9lu msgs received %6lu errors",
stat_tx_ts, stat_tx_ts_errors);
if (stat_tx_ts != num_sends)
error(1, 0,
"Unexpected number of TX Timestamps: %9lu expected %9lu received",
num_sends, stat_tx_ts);
fprintf(stderr,
"Tx Timestamps: %19lu received %17lu errors\n",
stat_tx_ts, stat_tx_ts_errors);
}
if (cfg_zerocopy) {
if (stat_zcopies != num_sends)
error(1, 0, "Unexpected number of Zerocopy completions: %9lu expected %9lu received",
num_sends, stat_zcopies);
fprintf(stderr,
"Zerocopy acks: %19lu\n",
stat_zcopies);
}
}
static void print_report(unsigned long num_msgs, unsigned long num_sends)
{
fprintf(stderr,
"%s tx: %6lu MB/s %8lu calls/s %6lu msg/s\n",
cfg_tcp ? "tcp" : "udp",
(num_msgs * cfg_payload_len) >> 20,
num_sends, num_msgs);
if (cfg_audit) {
total_num_msgs += num_msgs;
total_num_sends += num_sends;
}
}
int main(int argc, char **argv)
{
unsigned long num_msgs, num_sends;
unsigned long tnow, treport, tstop;
int fd, i, val, ret;
parse_opts(argc, argv);
if (cfg_cpu > 0)
set_cpu(cfg_cpu);
for (i = 0; i < sizeof(buf[0]); i++)
buf[0][i] = 'a' + (i % 26);
for (i = 1; i < NUM_PKT; i++)
memcpy(buf[i], buf[0], sizeof(buf[0]));
signal(SIGINT, sigint_handler);
fd = socket(cfg_family, cfg_tcp ? SOCK_STREAM : SOCK_DGRAM, 0);
if (fd == -1)
error(1, errno, "socket");
if (cfg_zerocopy) {
val = 1;
ret = setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY,
&val, sizeof(val));
if (ret) {
if (errno == ENOPROTOOPT || errno == ENOTSUPP) {
fprintf(stderr, "SO_ZEROCOPY not supported");
exit(KSFT_SKIP);
}
error(1, errno, "setsockopt zerocopy");
}
}
if (cfg_connected &&
connect(fd, (void *)&cfg_dst_addr, cfg_alen))
error(1, errno, "connect");
if (cfg_segment)
set_pmtu_discover(fd, cfg_family == PF_INET);
if (cfg_tx_tstamp)
set_tx_timestamping(fd);
num_msgs = num_sends = 0;
tnow = gettimeofday_ms();
tstart = tnow;
tend = tnow;
tstop = tnow + cfg_runtime_ms;
treport = tnow + 1000;
i = 0;
do {
if (cfg_tcp)
num_sends += send_tcp(fd, buf[i]);
else if (cfg_segment)
num_sends += send_udp_segment(fd, buf[i]);
else if (cfg_sendmmsg)
num_sends += send_udp_sendmmsg(fd, buf[i]);
else
num_sends += send_udp(fd, buf[i]);
num_msgs++;
if ((cfg_zerocopy && ((num_msgs & 0xF) == 0)) || cfg_tx_tstamp)
flush_errqueue(fd, cfg_poll, 500, true);
if (cfg_msg_nr && num_msgs >= cfg_msg_nr)
break;
tnow = gettimeofday_ms();
if (tnow >= treport) {
print_report(num_msgs, num_sends);
num_msgs = num_sends = 0;
treport = tnow + 1000;
}
/* cold cache when writing buffer */
if (cfg_cache_trash)
i = ++i < NUM_PKT ? i : 0;
} while (!interrupted && (cfg_runtime_ms == -1 || tnow < tstop));
if (cfg_zerocopy || cfg_tx_tstamp)
flush_errqueue_retry(fd, num_sends);
if (close(fd))
error(1, errno, "close");
if (cfg_audit) {
tend = tnow;
total_num_msgs += num_msgs;
total_num_sends += num_sends;
print_audit_report(total_num_msgs, total_num_sends);
}
return 0;
}
| linux-master | tools/testing/selftests/net/udpgso_bench_tx.c |
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <error.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <limits.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <linux/rtnetlink.h>
#include <linux/genetlink.h>
#include "linux/mptcp.h"
#ifndef MPTCP_PM_NAME
#define MPTCP_PM_NAME "mptcp_pm"
#endif
#ifndef MPTCP_PM_EVENTS
#define MPTCP_PM_EVENTS "mptcp_pm_events"
#endif
#ifndef IPPROTO_MPTCP
#define IPPROTO_MPTCP 262
#endif
static void syntax(char *argv[])
{
fprintf(stderr, "%s add|ann|rem|csf|dsf|get|set|del|flush|dump|events|listen|accept [<args>]\n", argv[0]);
fprintf(stderr, "\tadd [flags signal|subflow|backup|fullmesh] [id <nr>] [dev <name>] <ip>\n");
fprintf(stderr, "\tann <local-ip> id <local-id> token <token> [port <local-port>] [dev <name>]\n");
fprintf(stderr, "\trem id <local-id> token <token>\n");
fprintf(stderr, "\tcsf lip <local-ip> lid <local-id> rip <remote-ip> rport <remote-port> token <token>\n");
fprintf(stderr, "\tdsf lip <local-ip> lport <local-port> rip <remote-ip> rport <remote-port> token <token>\n");
fprintf(stderr, "\tdel <id> [<ip>]\n");
fprintf(stderr, "\tget <id>\n");
fprintf(stderr, "\tset [<ip>] [id <nr>] flags [no]backup|[no]fullmesh [port <nr>] [token <token>] [rip <ip>] [rport <port>]\n");
fprintf(stderr, "\tflush\n");
fprintf(stderr, "\tdump\n");
fprintf(stderr, "\tlimits [<rcv addr max> <subflow max>]\n");
fprintf(stderr, "\tevents\n");
fprintf(stderr, "\tlisten <local-ip> <local-port>\n");
exit(0);
}
static int init_genl_req(char *data, int family, int cmd, int version)
{
struct nlmsghdr *nh = (void *)data;
struct genlmsghdr *gh;
int off = 0;
nh->nlmsg_type = family;
nh->nlmsg_flags = NLM_F_REQUEST;
nh->nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
off += NLMSG_ALIGN(sizeof(*nh));
gh = (void *)(data + off);
gh->cmd = cmd;
gh->version = version;
off += NLMSG_ALIGN(sizeof(*gh));
return off;
}
static int nl_error(struct nlmsghdr *nh)
{
struct nlmsgerr *err = (struct nlmsgerr *)NLMSG_DATA(nh);
int len = nh->nlmsg_len - sizeof(*nh);
uint32_t off;
if (len < sizeof(struct nlmsgerr)) {
error(1, 0, "netlink error message truncated %d min %ld", len,
sizeof(struct nlmsgerr));
return -1;
}
if (err->error) {
/* check messages from kernel */
struct rtattr *attrs = (struct rtattr *)NLMSG_DATA(nh);
fprintf(stderr, "netlink error %d (%s)\n",
err->error, strerror(-err->error));
while (RTA_OK(attrs, len)) {
if (attrs->rta_type == NLMSGERR_ATTR_MSG)
fprintf(stderr, "netlink ext ack msg: %s\n",
(char *)RTA_DATA(attrs));
if (attrs->rta_type == NLMSGERR_ATTR_OFFS) {
memcpy(&off, RTA_DATA(attrs), 4);
fprintf(stderr, "netlink err off %d\n",
(int)off);
}
attrs = RTA_NEXT(attrs, len);
}
return -1;
}
return 0;
}
static int capture_events(int fd, int event_group)
{
u_int8_t buffer[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) + 1024];
struct genlmsghdr *ghdr;
struct rtattr *attrs;
struct nlmsghdr *nh;
int ret = 0;
int res_len;
int msg_len;
fd_set rfds;
if (setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
&event_group, sizeof(event_group)) < 0)
error(1, errno, "could not join the " MPTCP_PM_EVENTS " mcast group");
do {
FD_ZERO(&rfds);
FD_SET(fd, &rfds);
res_len = NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) + 1024;
ret = select(FD_SETSIZE, &rfds, NULL, NULL, NULL);
if (ret < 0)
error(1, ret, "error in select() on NL socket");
res_len = recv(fd, buffer, res_len, 0);
if (res_len < 0)
error(1, res_len, "error on recv() from NL socket");
nh = (struct nlmsghdr *)buffer;
for (; NLMSG_OK(nh, res_len); nh = NLMSG_NEXT(nh, res_len)) {
if (nh->nlmsg_type == NLMSG_ERROR)
error(1, NLMSG_ERROR, "received invalid NL message");
ghdr = (struct genlmsghdr *)NLMSG_DATA(nh);
if (ghdr->cmd == 0)
continue;
fprintf(stderr, "type:%d", ghdr->cmd);
msg_len = nh->nlmsg_len - NLMSG_LENGTH(GENL_HDRLEN);
attrs = (struct rtattr *) ((char *) ghdr + GENL_HDRLEN);
while (RTA_OK(attrs, msg_len)) {
if (attrs->rta_type == MPTCP_ATTR_TOKEN)
fprintf(stderr, ",token:%u", *(__u32 *)RTA_DATA(attrs));
else if (attrs->rta_type == MPTCP_ATTR_FAMILY)
fprintf(stderr, ",family:%u", *(__u16 *)RTA_DATA(attrs));
else if (attrs->rta_type == MPTCP_ATTR_LOC_ID)
fprintf(stderr, ",loc_id:%u", *(__u8 *)RTA_DATA(attrs));
else if (attrs->rta_type == MPTCP_ATTR_REM_ID)
fprintf(stderr, ",rem_id:%u", *(__u8 *)RTA_DATA(attrs));
else if (attrs->rta_type == MPTCP_ATTR_SADDR4) {
u_int32_t saddr4 = ntohl(*(__u32 *)RTA_DATA(attrs));
fprintf(stderr, ",saddr4:%u.%u.%u.%u", saddr4 >> 24,
(saddr4 >> 16) & 0xFF, (saddr4 >> 8) & 0xFF,
(saddr4 & 0xFF));
} else if (attrs->rta_type == MPTCP_ATTR_SADDR6) {
char buf[INET6_ADDRSTRLEN];
if (inet_ntop(AF_INET6, RTA_DATA(attrs), buf,
sizeof(buf)) != NULL)
fprintf(stderr, ",saddr6:%s", buf);
} else if (attrs->rta_type == MPTCP_ATTR_DADDR4) {
u_int32_t daddr4 = ntohl(*(__u32 *)RTA_DATA(attrs));
fprintf(stderr, ",daddr4:%u.%u.%u.%u", daddr4 >> 24,
(daddr4 >> 16) & 0xFF, (daddr4 >> 8) & 0xFF,
(daddr4 & 0xFF));
} else if (attrs->rta_type == MPTCP_ATTR_DADDR6) {
char buf[INET6_ADDRSTRLEN];
if (inet_ntop(AF_INET6, RTA_DATA(attrs), buf,
sizeof(buf)) != NULL)
fprintf(stderr, ",daddr6:%s", buf);
} else if (attrs->rta_type == MPTCP_ATTR_SPORT)
fprintf(stderr, ",sport:%u",
ntohs(*(__u16 *)RTA_DATA(attrs)));
else if (attrs->rta_type == MPTCP_ATTR_DPORT)
fprintf(stderr, ",dport:%u",
ntohs(*(__u16 *)RTA_DATA(attrs)));
else if (attrs->rta_type == MPTCP_ATTR_BACKUP)
fprintf(stderr, ",backup:%u", *(__u8 *)RTA_DATA(attrs));
else if (attrs->rta_type == MPTCP_ATTR_ERROR)
fprintf(stderr, ",error:%u", *(__u8 *)RTA_DATA(attrs));
else if (attrs->rta_type == MPTCP_ATTR_SERVER_SIDE)
fprintf(stderr, ",server_side:%u", *(__u8 *)RTA_DATA(attrs));
attrs = RTA_NEXT(attrs, msg_len);
}
}
fprintf(stderr, "\n");
} while (1);
return 0;
}
/* do a netlink command and, if max > 0, fetch the reply ; nh's size >1024B */
static int do_nl_req(int fd, struct nlmsghdr *nh, int len, int max)
{
struct sockaddr_nl nladdr = { .nl_family = AF_NETLINK };
socklen_t addr_len;
void *data = nh;
int rem, ret;
int err = 0;
/* If no expected answer, ask for an ACK to look for errors if any */
if (max == 0) {
nh->nlmsg_flags |= NLM_F_ACK;
max = 1024;
}
nh->nlmsg_len = len;
ret = sendto(fd, data, len, 0, (void *)&nladdr, sizeof(nladdr));
if (ret != len)
error(1, errno, "send netlink: %uB != %uB\n", ret, len);
addr_len = sizeof(nladdr);
rem = ret = recvfrom(fd, data, max, 0, (void *)&nladdr, &addr_len);
if (ret < 0)
error(1, errno, "recv netlink: %uB\n", ret);
/* Beware: the NLMSG_NEXT macro updates the 'rem' argument */
for (; NLMSG_OK(nh, rem); nh = NLMSG_NEXT(nh, rem)) {
if (nh->nlmsg_type == NLMSG_DONE)
break;
if (nh->nlmsg_type == NLMSG_ERROR && nl_error(nh))
err = 1;
}
if (err)
error(1, 0, "bailing out due to netlink error[s]");
return ret;
}
static int genl_parse_getfamily(struct nlmsghdr *nlh, int *pm_family,
int *events_mcast_grp)
{
struct genlmsghdr *ghdr = NLMSG_DATA(nlh);
int len = nlh->nlmsg_len;
struct rtattr *attrs;
struct rtattr *grps;
struct rtattr *grp;
int got_events_grp;
int got_family;
int grps_len;
int grp_len;
if (nlh->nlmsg_type != GENL_ID_CTRL)
error(1, errno, "Not a controller message, len=%d type=0x%x\n",
nlh->nlmsg_len, nlh->nlmsg_type);
len -= NLMSG_LENGTH(GENL_HDRLEN);
if (len < 0)
error(1, errno, "wrong controller message len %d\n", len);
if (ghdr->cmd != CTRL_CMD_NEWFAMILY)
error(1, errno, "Unknown controller command %d\n", ghdr->cmd);
attrs = (struct rtattr *) ((char *) ghdr + GENL_HDRLEN);
got_family = 0;
got_events_grp = 0;
while (RTA_OK(attrs, len)) {
if (attrs->rta_type == CTRL_ATTR_FAMILY_ID) {
*pm_family = *(__u16 *)RTA_DATA(attrs);
got_family = 1;
} else if (attrs->rta_type == CTRL_ATTR_MCAST_GROUPS) {
grps = RTA_DATA(attrs);
grps_len = RTA_PAYLOAD(attrs);
while (RTA_OK(grps, grps_len)) {
grp = RTA_DATA(grps);
grp_len = RTA_PAYLOAD(grps);
got_events_grp = 0;
while (RTA_OK(grp, grp_len)) {
if (grp->rta_type == CTRL_ATTR_MCAST_GRP_ID)
*events_mcast_grp = *(__u32 *)RTA_DATA(grp);
else if (grp->rta_type == CTRL_ATTR_MCAST_GRP_NAME &&
!strcmp(RTA_DATA(grp), MPTCP_PM_EVENTS))
got_events_grp = 1;
grp = RTA_NEXT(grp, grp_len);
}
if (got_events_grp)
break;
grps = RTA_NEXT(grps, grps_len);
}
}
if (got_family && got_events_grp)
return 0;
attrs = RTA_NEXT(attrs, len);
}
error(1, errno, "can't find CTRL_ATTR_FAMILY_ID attr");
return -1;
}
static int resolve_mptcp_pm_netlink(int fd, int *pm_family, int *events_mcast_grp)
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
struct nlmsghdr *nh;
struct rtattr *rta;
int namelen;
int off = 0;
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, GENL_ID_CTRL, CTRL_CMD_GETFAMILY, 0);
rta = (void *)(data + off);
namelen = strlen(MPTCP_PM_NAME) + 1;
rta->rta_type = CTRL_ATTR_FAMILY_NAME;
rta->rta_len = RTA_LENGTH(namelen);
memcpy(RTA_DATA(rta), MPTCP_PM_NAME, namelen);
off += NLMSG_ALIGN(rta->rta_len);
do_nl_req(fd, nh, off, sizeof(data));
return genl_parse_getfamily((void *)data, pm_family, events_mcast_grp);
}
int dsf(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
struct rtattr *rta, *addr;
u_int16_t family, port;
struct nlmsghdr *nh;
u_int32_t token;
int addr_start;
int off = 0;
int arg;
const char *params[5];
memset(params, 0, 5 * sizeof(const char *));
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_SUBFLOW_DESTROY,
MPTCP_PM_VER);
if (argc < 12)
syntax(argv);
/* Params recorded in this order:
* <local-ip>, <local-port>, <remote-ip>, <remote-port>, <token>
*/
for (arg = 2; arg < argc; arg++) {
if (!strcmp(argv[arg], "lip")) {
if (++arg >= argc)
error(1, 0, " missing local IP");
params[0] = argv[arg];
} else if (!strcmp(argv[arg], "lport")) {
if (++arg >= argc)
error(1, 0, " missing local port");
params[1] = argv[arg];
} else if (!strcmp(argv[arg], "rip")) {
if (++arg >= argc)
error(1, 0, " missing remote IP");
params[2] = argv[arg];
} else if (!strcmp(argv[arg], "rport")) {
if (++arg >= argc)
error(1, 0, " missing remote port");
params[3] = argv[arg];
} else if (!strcmp(argv[arg], "token")) {
if (++arg >= argc)
error(1, 0, " missing token");
params[4] = argv[arg];
} else
error(1, 0, "unknown keyword %s", argv[arg]);
}
for (arg = 0; arg < 4; arg = arg + 2) {
/* addr header */
addr_start = off;
addr = (void *)(data + off);
addr->rta_type = NLA_F_NESTED |
((arg == 0) ? MPTCP_PM_ATTR_ADDR : MPTCP_PM_ATTR_ADDR_REMOTE);
addr->rta_len = RTA_LENGTH(0);
off += NLMSG_ALIGN(addr->rta_len);
/* addr data */
rta = (void *)(data + off);
if (inet_pton(AF_INET, params[arg], RTA_DATA(rta))) {
family = AF_INET;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
rta->rta_len = RTA_LENGTH(4);
} else if (inet_pton(AF_INET6, params[arg], RTA_DATA(rta))) {
family = AF_INET6;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
rta->rta_len = RTA_LENGTH(16);
} else
error(1, errno, "can't parse ip %s", params[arg]);
off += NLMSG_ALIGN(rta->rta_len);
/* family */
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &family, 2);
off += NLMSG_ALIGN(rta->rta_len);
/* port */
port = atoi(params[arg + 1]);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &port, 2);
off += NLMSG_ALIGN(rta->rta_len);
addr->rta_len = off - addr_start;
}
/* token */
token = strtoul(params[4], NULL, 10);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_TOKEN;
rta->rta_len = RTA_LENGTH(4);
memcpy(RTA_DATA(rta), &token, 4);
off += NLMSG_ALIGN(rta->rta_len);
do_nl_req(fd, nh, off, 0);
return 0;
}
int csf(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
const char *params[5];
struct nlmsghdr *nh;
struct rtattr *addr;
struct rtattr *rta;
u_int16_t family;
u_int32_t token;
u_int16_t port;
int addr_start;
u_int8_t id;
int off = 0;
int arg;
memset(params, 0, 5 * sizeof(const char *));
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_SUBFLOW_CREATE,
MPTCP_PM_VER);
if (argc < 12)
syntax(argv);
/* Params recorded in this order:
* <local-ip>, <local-id>, <remote-ip>, <remote-port>, <token>
*/
for (arg = 2; arg < argc; arg++) {
if (!strcmp(argv[arg], "lip")) {
if (++arg >= argc)
error(1, 0, " missing local IP");
params[0] = argv[arg];
} else if (!strcmp(argv[arg], "lid")) {
if (++arg >= argc)
error(1, 0, " missing local id");
params[1] = argv[arg];
} else if (!strcmp(argv[arg], "rip")) {
if (++arg >= argc)
error(1, 0, " missing remote ip");
params[2] = argv[arg];
} else if (!strcmp(argv[arg], "rport")) {
if (++arg >= argc)
error(1, 0, " missing remote port");
params[3] = argv[arg];
} else if (!strcmp(argv[arg], "token")) {
if (++arg >= argc)
error(1, 0, " missing token");
params[4] = argv[arg];
} else
error(1, 0, "unknown param %s", argv[arg]);
}
for (arg = 0; arg < 4; arg = arg + 2) {
/* addr header */
addr_start = off;
addr = (void *)(data + off);
addr->rta_type = NLA_F_NESTED |
((arg == 0) ? MPTCP_PM_ATTR_ADDR : MPTCP_PM_ATTR_ADDR_REMOTE);
addr->rta_len = RTA_LENGTH(0);
off += NLMSG_ALIGN(addr->rta_len);
/* addr data */
rta = (void *)(data + off);
if (inet_pton(AF_INET, params[arg], RTA_DATA(rta))) {
family = AF_INET;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
rta->rta_len = RTA_LENGTH(4);
} else if (inet_pton(AF_INET6, params[arg], RTA_DATA(rta))) {
family = AF_INET6;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
rta->rta_len = RTA_LENGTH(16);
} else
error(1, errno, "can't parse ip %s", params[arg]);
off += NLMSG_ALIGN(rta->rta_len);
/* family */
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &family, 2);
off += NLMSG_ALIGN(rta->rta_len);
if (arg == 2) {
/* port */
port = atoi(params[arg + 1]);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &port, 2);
off += NLMSG_ALIGN(rta->rta_len);
}
if (arg == 0) {
/* id */
id = atoi(params[arg + 1]);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
rta->rta_len = RTA_LENGTH(1);
memcpy(RTA_DATA(rta), &id, 1);
off += NLMSG_ALIGN(rta->rta_len);
}
addr->rta_len = off - addr_start;
}
/* token */
token = strtoul(params[4], NULL, 10);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_TOKEN;
rta->rta_len = RTA_LENGTH(4);
memcpy(RTA_DATA(rta), &token, 4);
off += NLMSG_ALIGN(rta->rta_len);
do_nl_req(fd, nh, off, 0);
return 0;
}
int remove_addr(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
struct nlmsghdr *nh;
struct rtattr *rta;
u_int32_t token;
u_int8_t id;
int off = 0;
int arg;
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_REMOVE,
MPTCP_PM_VER);
if (argc < 6)
syntax(argv);
for (arg = 2; arg < argc; arg++) {
if (!strcmp(argv[arg], "id")) {
if (++arg >= argc)
error(1, 0, " missing id value");
id = atoi(argv[arg]);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_LOC_ID;
rta->rta_len = RTA_LENGTH(1);
memcpy(RTA_DATA(rta), &id, 1);
off += NLMSG_ALIGN(rta->rta_len);
} else if (!strcmp(argv[arg], "token")) {
if (++arg >= argc)
error(1, 0, " missing token value");
token = strtoul(argv[arg], NULL, 10);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_TOKEN;
rta->rta_len = RTA_LENGTH(4);
memcpy(RTA_DATA(rta), &token, 4);
off += NLMSG_ALIGN(rta->rta_len);
} else
error(1, 0, "unknown keyword %s", argv[arg]);
}
do_nl_req(fd, nh, off, 0);
return 0;
}
int announce_addr(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
u_int32_t flags = MPTCP_PM_ADDR_FLAG_SIGNAL;
u_int32_t token = UINT_MAX;
struct rtattr *rta, *addr;
u_int32_t id = UINT_MAX;
struct nlmsghdr *nh;
u_int16_t family;
int addr_start;
int off = 0;
int arg;
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_ANNOUNCE,
MPTCP_PM_VER);
if (argc < 7)
syntax(argv);
/* local-ip header */
addr_start = off;
addr = (void *)(data + off);
addr->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR;
addr->rta_len = RTA_LENGTH(0);
off += NLMSG_ALIGN(addr->rta_len);
/* local-ip data */
/* record addr type */
rta = (void *)(data + off);
if (inet_pton(AF_INET, argv[2], RTA_DATA(rta))) {
family = AF_INET;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
rta->rta_len = RTA_LENGTH(4);
} else if (inet_pton(AF_INET6, argv[2], RTA_DATA(rta))) {
family = AF_INET6;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
rta->rta_len = RTA_LENGTH(16);
} else
error(1, errno, "can't parse ip %s", argv[2]);
off += NLMSG_ALIGN(rta->rta_len);
/* addr family */
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &family, 2);
off += NLMSG_ALIGN(rta->rta_len);
for (arg = 3; arg < argc; arg++) {
if (!strcmp(argv[arg], "id")) {
/* local-id */
if (++arg >= argc)
error(1, 0, " missing id value");
id = atoi(argv[arg]);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
rta->rta_len = RTA_LENGTH(1);
memcpy(RTA_DATA(rta), &id, 1);
off += NLMSG_ALIGN(rta->rta_len);
} else if (!strcmp(argv[arg], "dev")) {
/* for the if_index */
int32_t ifindex;
if (++arg >= argc)
error(1, 0, " missing dev name");
ifindex = if_nametoindex(argv[arg]);
if (!ifindex)
error(1, errno, "unknown device %s", argv[arg]);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_IF_IDX;
rta->rta_len = RTA_LENGTH(4);
memcpy(RTA_DATA(rta), &ifindex, 4);
off += NLMSG_ALIGN(rta->rta_len);
} else if (!strcmp(argv[arg], "port")) {
/* local-port (optional) */
u_int16_t port;
if (++arg >= argc)
error(1, 0, " missing port value");
port = atoi(argv[arg]);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &port, 2);
off += NLMSG_ALIGN(rta->rta_len);
} else if (!strcmp(argv[arg], "token")) {
/* MPTCP connection token */
if (++arg >= argc)
error(1, 0, " missing token value");
token = strtoul(argv[arg], NULL, 10);
} else
error(1, 0, "unknown keyword %s", argv[arg]);
}
/* addr flags */
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_FLAGS;
rta->rta_len = RTA_LENGTH(4);
memcpy(RTA_DATA(rta), &flags, 4);
off += NLMSG_ALIGN(rta->rta_len);
addr->rta_len = off - addr_start;
if (id == UINT_MAX || token == UINT_MAX)
error(1, 0, " missing mandatory inputs");
/* token */
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_TOKEN;
rta->rta_len = RTA_LENGTH(4);
memcpy(RTA_DATA(rta), &token, 4);
off += NLMSG_ALIGN(rta->rta_len);
do_nl_req(fd, nh, off, 0);
return 0;
}
int add_addr(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
struct rtattr *rta, *nest;
struct nlmsghdr *nh;
u_int32_t flags = 0;
u_int16_t family;
int nest_start;
u_int8_t id;
int off = 0;
int arg;
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_ADD_ADDR,
MPTCP_PM_VER);
if (argc < 3)
syntax(argv);
nest_start = off;
nest = (void *)(data + off);
nest->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR;
nest->rta_len = RTA_LENGTH(0);
off += NLMSG_ALIGN(nest->rta_len);
/* addr data */
rta = (void *)(data + off);
if (inet_pton(AF_INET, argv[2], RTA_DATA(rta))) {
family = AF_INET;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
rta->rta_len = RTA_LENGTH(4);
} else if (inet_pton(AF_INET6, argv[2], RTA_DATA(rta))) {
family = AF_INET6;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
rta->rta_len = RTA_LENGTH(16);
} else
error(1, errno, "can't parse ip %s", argv[2]);
off += NLMSG_ALIGN(rta->rta_len);
/* family */
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &family, 2);
off += NLMSG_ALIGN(rta->rta_len);
for (arg = 3; arg < argc; arg++) {
if (!strcmp(argv[arg], "flags")) {
char *tok, *str;
/* flags */
if (++arg >= argc)
error(1, 0, " missing flags value");
/* do not support flag list yet */
for (str = argv[arg]; (tok = strtok(str, ","));
str = NULL) {
if (!strcmp(tok, "subflow"))
flags |= MPTCP_PM_ADDR_FLAG_SUBFLOW;
else if (!strcmp(tok, "signal"))
flags |= MPTCP_PM_ADDR_FLAG_SIGNAL;
else if (!strcmp(tok, "backup"))
flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
else if (!strcmp(tok, "fullmesh"))
flags |= MPTCP_PM_ADDR_FLAG_FULLMESH;
else
error(1, errno,
"unknown flag %s", argv[arg]);
}
if (flags & MPTCP_PM_ADDR_FLAG_SIGNAL &&
flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
error(1, errno, "error flag fullmesh");
}
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_FLAGS;
rta->rta_len = RTA_LENGTH(4);
memcpy(RTA_DATA(rta), &flags, 4);
off += NLMSG_ALIGN(rta->rta_len);
} else if (!strcmp(argv[arg], "id")) {
if (++arg >= argc)
error(1, 0, " missing id value");
id = atoi(argv[arg]);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
rta->rta_len = RTA_LENGTH(1);
memcpy(RTA_DATA(rta), &id, 1);
off += NLMSG_ALIGN(rta->rta_len);
} else if (!strcmp(argv[arg], "dev")) {
int32_t ifindex;
if (++arg >= argc)
error(1, 0, " missing dev name");
ifindex = if_nametoindex(argv[arg]);
if (!ifindex)
error(1, errno, "unknown device %s", argv[arg]);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_IF_IDX;
rta->rta_len = RTA_LENGTH(4);
memcpy(RTA_DATA(rta), &ifindex, 4);
off += NLMSG_ALIGN(rta->rta_len);
} else if (!strcmp(argv[arg], "port")) {
u_int16_t port;
if (++arg >= argc)
error(1, 0, " missing port value");
if (!(flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
error(1, 0, " flags must be signal when using port");
port = atoi(argv[arg]);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &port, 2);
off += NLMSG_ALIGN(rta->rta_len);
} else
error(1, 0, "unknown keyword %s", argv[arg]);
}
nest->rta_len = off - nest_start;
do_nl_req(fd, nh, off, 0);
return 0;
}
int del_addr(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
struct rtattr *rta, *nest;
struct nlmsghdr *nh;
u_int16_t family;
int nest_start;
u_int8_t id;
int off = 0;
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_DEL_ADDR,
MPTCP_PM_VER);
/* the only argument is the address id (nonzero) */
if (argc != 3 && argc != 4)
syntax(argv);
id = atoi(argv[2]);
/* zero id with the IP address */
if (!id && argc != 4)
syntax(argv);
nest_start = off;
nest = (void *)(data + off);
nest->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR;
nest->rta_len = RTA_LENGTH(0);
off += NLMSG_ALIGN(nest->rta_len);
/* build a dummy addr with only the ID set */
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
rta->rta_len = RTA_LENGTH(1);
memcpy(RTA_DATA(rta), &id, 1);
off += NLMSG_ALIGN(rta->rta_len);
if (!id) {
/* addr data */
rta = (void *)(data + off);
if (inet_pton(AF_INET, argv[3], RTA_DATA(rta))) {
family = AF_INET;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
rta->rta_len = RTA_LENGTH(4);
} else if (inet_pton(AF_INET6, argv[3], RTA_DATA(rta))) {
family = AF_INET6;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
rta->rta_len = RTA_LENGTH(16);
} else {
error(1, errno, "can't parse ip %s", argv[3]);
}
off += NLMSG_ALIGN(rta->rta_len);
/* family */
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &family, 2);
off += NLMSG_ALIGN(rta->rta_len);
}
nest->rta_len = off - nest_start;
do_nl_req(fd, nh, off, 0);
return 0;
}
static void print_addr(struct rtattr *attrs, int len)
{
uint16_t family = 0;
uint16_t port = 0;
char str[1024];
uint32_t flags;
uint8_t id;
while (RTA_OK(attrs, len)) {
if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_FAMILY)
memcpy(&family, RTA_DATA(attrs), 2);
if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_PORT)
memcpy(&port, RTA_DATA(attrs), 2);
if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_ADDR4) {
if (family != AF_INET)
error(1, errno, "wrong IP (v4) for family %d",
family);
inet_ntop(AF_INET, RTA_DATA(attrs), str, sizeof(str));
printf("%s", str);
if (port)
printf(" %d", port);
}
if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_ADDR6) {
if (family != AF_INET6)
error(1, errno, "wrong IP (v6) for family %d",
family);
inet_ntop(AF_INET6, RTA_DATA(attrs), str, sizeof(str));
printf("%s", str);
if (port)
printf(" %d", port);
}
if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_ID) {
memcpy(&id, RTA_DATA(attrs), 1);
printf("id %d ", id);
}
if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_FLAGS) {
memcpy(&flags, RTA_DATA(attrs), 4);
printf("flags ");
if (flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
printf("signal");
flags &= ~MPTCP_PM_ADDR_FLAG_SIGNAL;
if (flags)
printf(",");
}
if (flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
printf("subflow");
flags &= ~MPTCP_PM_ADDR_FLAG_SUBFLOW;
if (flags)
printf(",");
}
if (flags & MPTCP_PM_ADDR_FLAG_BACKUP) {
printf("backup");
flags &= ~MPTCP_PM_ADDR_FLAG_BACKUP;
if (flags)
printf(",");
}
if (flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
printf("fullmesh");
flags &= ~MPTCP_PM_ADDR_FLAG_FULLMESH;
if (flags)
printf(",");
}
if (flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) {
printf("implicit");
flags &= ~MPTCP_PM_ADDR_FLAG_IMPLICIT;
if (flags)
printf(",");
}
/* bump unknown flags, if any */
if (flags)
printf("0x%x", flags);
printf(" ");
}
if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_IF_IDX) {
char name[IF_NAMESIZE], *ret;
int32_t ifindex;
memcpy(&ifindex, RTA_DATA(attrs), 4);
ret = if_indextoname(ifindex, name);
if (ret)
printf("dev %s ", ret);
else
printf("dev unknown/%d", ifindex);
}
attrs = RTA_NEXT(attrs, len);
}
printf("\n");
}
static void print_addrs(struct nlmsghdr *nh, int pm_family, int total_len)
{
struct rtattr *attrs;
for (; NLMSG_OK(nh, total_len); nh = NLMSG_NEXT(nh, total_len)) {
int len = nh->nlmsg_len;
if (nh->nlmsg_type == NLMSG_DONE)
break;
if (nh->nlmsg_type == NLMSG_ERROR)
nl_error(nh);
if (nh->nlmsg_type != pm_family)
continue;
len -= NLMSG_LENGTH(GENL_HDRLEN);
attrs = (struct rtattr *) ((char *) NLMSG_DATA(nh) +
GENL_HDRLEN);
while (RTA_OK(attrs, len)) {
if (attrs->rta_type ==
(MPTCP_PM_ATTR_ADDR | NLA_F_NESTED))
print_addr((void *)RTA_DATA(attrs),
attrs->rta_len);
attrs = RTA_NEXT(attrs, len);
}
}
}
int get_addr(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
struct rtattr *rta, *nest;
struct nlmsghdr *nh;
int nest_start;
u_int8_t id;
int off = 0;
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_GET_ADDR,
MPTCP_PM_VER);
/* the only argument is the address id */
if (argc != 3)
syntax(argv);
id = atoi(argv[2]);
nest_start = off;
nest = (void *)(data + off);
nest->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR;
nest->rta_len = RTA_LENGTH(0);
off += NLMSG_ALIGN(nest->rta_len);
/* build a dummy addr with only the ID set */
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
rta->rta_len = RTA_LENGTH(1);
memcpy(RTA_DATA(rta), &id, 1);
off += NLMSG_ALIGN(rta->rta_len);
nest->rta_len = off - nest_start;
print_addrs(nh, pm_family, do_nl_req(fd, nh, off, sizeof(data)));
return 0;
}
int dump_addrs(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
pid_t pid = getpid();
struct nlmsghdr *nh;
int off = 0;
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_GET_ADDR,
MPTCP_PM_VER);
nh->nlmsg_flags |= NLM_F_DUMP;
nh->nlmsg_seq = 1;
nh->nlmsg_pid = pid;
nh->nlmsg_len = off;
print_addrs(nh, pm_family, do_nl_req(fd, nh, off, sizeof(data)));
return 0;
}
int flush_addrs(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
struct nlmsghdr *nh;
int off = 0;
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_FLUSH_ADDRS,
MPTCP_PM_VER);
do_nl_req(fd, nh, off, 0);
return 0;
}
static void print_limits(struct nlmsghdr *nh, int pm_family, int total_len)
{
struct rtattr *attrs;
uint32_t max;
for (; NLMSG_OK(nh, total_len); nh = NLMSG_NEXT(nh, total_len)) {
int len = nh->nlmsg_len;
if (nh->nlmsg_type == NLMSG_DONE)
break;
if (nh->nlmsg_type == NLMSG_ERROR)
nl_error(nh);
if (nh->nlmsg_type != pm_family)
continue;
len -= NLMSG_LENGTH(GENL_HDRLEN);
attrs = (struct rtattr *) ((char *) NLMSG_DATA(nh) +
GENL_HDRLEN);
while (RTA_OK(attrs, len)) {
int type = attrs->rta_type;
if (type != MPTCP_PM_ATTR_RCV_ADD_ADDRS &&
type != MPTCP_PM_ATTR_SUBFLOWS)
goto next;
memcpy(&max, RTA_DATA(attrs), 4);
printf("%s %u\n", type == MPTCP_PM_ATTR_SUBFLOWS ?
"subflows" : "accept", max);
next:
attrs = RTA_NEXT(attrs, len);
}
}
}
int get_set_limits(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
uint32_t rcv_addr = 0, subflows = 0;
int cmd, len = sizeof(data);
struct nlmsghdr *nh;
int off = 0;
/* limit */
if (argc == 4) {
rcv_addr = atoi(argv[2]);
subflows = atoi(argv[3]);
cmd = MPTCP_PM_CMD_SET_LIMITS;
} else {
cmd = MPTCP_PM_CMD_GET_LIMITS;
}
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, cmd, MPTCP_PM_VER);
/* limit */
if (cmd == MPTCP_PM_CMD_SET_LIMITS) {
struct rtattr *rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_RCV_ADD_ADDRS;
rta->rta_len = RTA_LENGTH(4);
memcpy(RTA_DATA(rta), &rcv_addr, 4);
off += NLMSG_ALIGN(rta->rta_len);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_SUBFLOWS;
rta->rta_len = RTA_LENGTH(4);
memcpy(RTA_DATA(rta), &subflows, 4);
off += NLMSG_ALIGN(rta->rta_len);
/* do not expect a reply */
len = 0;
}
len = do_nl_req(fd, nh, off, len);
if (cmd == MPTCP_PM_CMD_GET_LIMITS)
print_limits(nh, pm_family, len);
return 0;
}
int add_listener(int argc, char *argv[])
{
struct sockaddr_storage addr;
struct sockaddr_in6 *a6;
struct sockaddr_in *a4;
u_int16_t family;
int enable = 1;
int sock;
int err;
if (argc < 4)
syntax(argv);
memset(&addr, 0, sizeof(struct sockaddr_storage));
a4 = (struct sockaddr_in *)&addr;
a6 = (struct sockaddr_in6 *)&addr;
if (inet_pton(AF_INET, argv[2], &a4->sin_addr)) {
family = AF_INET;
a4->sin_family = family;
a4->sin_port = htons(atoi(argv[3]));
} else if (inet_pton(AF_INET6, argv[2], &a6->sin6_addr)) {
family = AF_INET6;
a6->sin6_family = family;
a6->sin6_port = htons(atoi(argv[3]));
} else
error(1, errno, "can't parse ip %s", argv[2]);
sock = socket(family, SOCK_STREAM, IPPROTO_MPTCP);
if (sock < 0)
error(1, errno, "can't create listener sock\n");
if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable))) {
close(sock);
error(1, errno, "can't set SO_REUSEADDR on listener sock\n");
}
err = bind(sock, (struct sockaddr *)&addr,
((family == AF_INET) ? sizeof(struct sockaddr_in) :
sizeof(struct sockaddr_in6)));
if (err == 0 && listen(sock, 30) == 0)
pause();
close(sock);
return 0;
}
int set_flags(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
struct rtattr *rta, *nest;
struct nlmsghdr *nh;
u_int32_t flags = 0;
u_int32_t token = 0;
u_int16_t rport = 0;
u_int16_t family;
void *rip = NULL;
int nest_start;
int use_id = 0;
u_int8_t id;
int off = 0;
int arg = 2;
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_SET_FLAGS,
MPTCP_PM_VER);
if (argc < 3)
syntax(argv);
nest_start = off;
nest = (void *)(data + off);
nest->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR;
nest->rta_len = RTA_LENGTH(0);
off += NLMSG_ALIGN(nest->rta_len);
if (!strcmp(argv[arg], "id")) {
if (++arg >= argc)
error(1, 0, " missing id value");
use_id = 1;
id = atoi(argv[arg]);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
rta->rta_len = RTA_LENGTH(1);
memcpy(RTA_DATA(rta), &id, 1);
off += NLMSG_ALIGN(rta->rta_len);
} else {
/* addr data */
rta = (void *)(data + off);
if (inet_pton(AF_INET, argv[arg], RTA_DATA(rta))) {
family = AF_INET;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
rta->rta_len = RTA_LENGTH(4);
} else if (inet_pton(AF_INET6, argv[arg], RTA_DATA(rta))) {
family = AF_INET6;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
rta->rta_len = RTA_LENGTH(16);
} else {
error(1, errno, "can't parse ip %s", argv[arg]);
}
off += NLMSG_ALIGN(rta->rta_len);
/* family */
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &family, 2);
off += NLMSG_ALIGN(rta->rta_len);
}
if (++arg >= argc)
error(1, 0, " missing flags keyword");
for (; arg < argc; arg++) {
if (!strcmp(argv[arg], "token")) {
if (++arg >= argc)
error(1, 0, " missing token value");
/* token */
token = strtoul(argv[arg], NULL, 10);
} else if (!strcmp(argv[arg], "flags")) {
char *tok, *str;
/* flags */
if (++arg >= argc)
error(1, 0, " missing flags value");
for (str = argv[arg]; (tok = strtok(str, ","));
str = NULL) {
if (!strcmp(tok, "backup"))
flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
else if (!strcmp(tok, "fullmesh"))
flags |= MPTCP_PM_ADDR_FLAG_FULLMESH;
else if (strcmp(tok, "nobackup") &&
strcmp(tok, "nofullmesh"))
error(1, errno,
"unknown flag %s", argv[arg]);
}
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_FLAGS;
rta->rta_len = RTA_LENGTH(4);
memcpy(RTA_DATA(rta), &flags, 4);
off += NLMSG_ALIGN(rta->rta_len);
} else if (!strcmp(argv[arg], "port")) {
u_int16_t port;
if (use_id)
error(1, 0, " port can't be used with id");
if (++arg >= argc)
error(1, 0, " missing port value");
port = atoi(argv[arg]);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &port, 2);
off += NLMSG_ALIGN(rta->rta_len);
} else if (!strcmp(argv[arg], "rport")) {
if (++arg >= argc)
error(1, 0, " missing remote port");
rport = atoi(argv[arg]);
} else if (!strcmp(argv[arg], "rip")) {
if (++arg >= argc)
error(1, 0, " missing remote ip");
rip = argv[arg];
} else {
error(1, 0, "unknown keyword %s", argv[arg]);
}
}
nest->rta_len = off - nest_start;
/* token */
if (token) {
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_TOKEN;
rta->rta_len = RTA_LENGTH(4);
memcpy(RTA_DATA(rta), &token, 4);
off += NLMSG_ALIGN(rta->rta_len);
}
/* remote addr/port */
if (rip) {
nest_start = off;
nest = (void *)(data + off);
nest->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR_REMOTE;
nest->rta_len = RTA_LENGTH(0);
off += NLMSG_ALIGN(nest->rta_len);
/* addr data */
rta = (void *)(data + off);
if (inet_pton(AF_INET, rip, RTA_DATA(rta))) {
family = AF_INET;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
rta->rta_len = RTA_LENGTH(4);
} else if (inet_pton(AF_INET6, rip, RTA_DATA(rta))) {
family = AF_INET6;
rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
rta->rta_len = RTA_LENGTH(16);
} else {
error(1, errno, "can't parse ip %s", (char *)rip);
}
off += NLMSG_ALIGN(rta->rta_len);
/* family */
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &family, 2);
off += NLMSG_ALIGN(rta->rta_len);
if (rport) {
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &rport, 2);
off += NLMSG_ALIGN(rta->rta_len);
}
nest->rta_len = off - nest_start;
}
do_nl_req(fd, nh, off, 0);
return 0;
}
int main(int argc, char *argv[])
{
int events_mcast_grp;
int pm_family;
int fd;
if (argc < 2)
syntax(argv);
fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
if (fd == -1)
error(1, errno, "socket netlink");
resolve_mptcp_pm_netlink(fd, &pm_family, &events_mcast_grp);
if (!strcmp(argv[1], "add"))
return add_addr(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "ann"))
return announce_addr(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "rem"))
return remove_addr(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "csf"))
return csf(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "dsf"))
return dsf(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "del"))
return del_addr(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "flush"))
return flush_addrs(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "get"))
return get_addr(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "dump"))
return dump_addrs(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "limits"))
return get_set_limits(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "set"))
return set_flags(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "events"))
return capture_events(fd, events_mcast_grp);
else if (!strcmp(argv[1], "listen"))
return add_listener(argc, argv);
fprintf(stderr, "unknown sub-command: %s", argv[1]);
syntax(argv);
return 0;
}
| linux-master | tools/testing/selftests/net/mptcp/pm_nl_ctl.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <time.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <netdb.h>
#include <netinet/in.h>
#include <linux/tcp.h>
static int pf = AF_INET;
#ifndef IPPROTO_MPTCP
#define IPPROTO_MPTCP 262
#endif
#ifndef SOL_MPTCP
#define SOL_MPTCP 284
#endif
#ifndef MPTCP_INFO
struct mptcp_info {
__u8 mptcpi_subflows;
__u8 mptcpi_add_addr_signal;
__u8 mptcpi_add_addr_accepted;
__u8 mptcpi_subflows_max;
__u8 mptcpi_add_addr_signal_max;
__u8 mptcpi_add_addr_accepted_max;
__u32 mptcpi_flags;
__u32 mptcpi_token;
__u64 mptcpi_write_seq;
__u64 mptcpi_snd_una;
__u64 mptcpi_rcv_nxt;
__u8 mptcpi_local_addr_used;
__u8 mptcpi_local_addr_max;
__u8 mptcpi_csum_enabled;
__u32 mptcpi_retransmits;
__u64 mptcpi_bytes_retrans;
__u64 mptcpi_bytes_sent;
__u64 mptcpi_bytes_received;
__u64 mptcpi_bytes_acked;
};
struct mptcp_subflow_data {
__u32 size_subflow_data; /* size of this structure in userspace */
__u32 num_subflows; /* must be 0, set by kernel */
__u32 size_kernel; /* must be 0, set by kernel */
__u32 size_user; /* size of one element in data[] */
} __attribute__((aligned(8)));
struct mptcp_subflow_addrs {
union {
__kernel_sa_family_t sa_family;
struct sockaddr sa_local;
struct sockaddr_in sin_local;
struct sockaddr_in6 sin6_local;
struct __kernel_sockaddr_storage ss_local;
};
union {
struct sockaddr sa_remote;
struct sockaddr_in sin_remote;
struct sockaddr_in6 sin6_remote;
struct __kernel_sockaddr_storage ss_remote;
};
};
#define MPTCP_INFO 1
#define MPTCP_TCPINFO 2
#define MPTCP_SUBFLOW_ADDRS 3
#endif
#ifndef MPTCP_FULL_INFO
struct mptcp_subflow_info {
__u32 id;
struct mptcp_subflow_addrs addrs;
};
struct mptcp_full_info {
__u32 size_tcpinfo_kernel; /* must be 0, set by kernel */
__u32 size_tcpinfo_user;
__u32 size_sfinfo_kernel; /* must be 0, set by kernel */
__u32 size_sfinfo_user;
__u32 num_subflows; /* must be 0, set by kernel (real subflow count) */
__u32 size_arrays_user; /* max subflows that userspace is interested in;
* the buffers at subflow_info/tcp_info
* are respectively at least:
* size_arrays * size_sfinfo_user
* size_arrays * size_tcpinfo_user
* bytes wide
*/
__aligned_u64 subflow_info;
__aligned_u64 tcp_info;
struct mptcp_info mptcp_info;
};
#define MPTCP_FULL_INFO 4
#endif
struct so_state {
struct mptcp_info mi;
struct mptcp_info last_sample;
struct tcp_info tcp_info;
struct mptcp_subflow_addrs addrs;
uint64_t mptcpi_rcv_delta;
uint64_t tcpi_rcv_delta;
bool pkt_stats_avail;
};
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
static void die_perror(const char *msg)
{
perror(msg);
exit(1);
}
static void die_usage(int r)
{
fprintf(stderr, "Usage: mptcp_sockopt [-6]\n");
exit(r);
}
static void xerror(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
fputc('\n', stderr);
exit(1);
}
static const char *getxinfo_strerr(int err)
{
if (err == EAI_SYSTEM)
return strerror(errno);
return gai_strerror(err);
}
static void xgetaddrinfo(const char *node, const char *service,
const struct addrinfo *hints,
struct addrinfo **res)
{
int err = getaddrinfo(node, service, hints, res);
if (err) {
const char *errstr = getxinfo_strerr(err);
fprintf(stderr, "Fatal: getaddrinfo(%s:%s): %s\n",
node ? node : "", service ? service : "", errstr);
exit(1);
}
}
static int sock_listen_mptcp(const char * const listenaddr,
const char * const port)
{
int sock = -1;
struct addrinfo hints = {
.ai_protocol = IPPROTO_TCP,
.ai_socktype = SOCK_STREAM,
.ai_flags = AI_PASSIVE | AI_NUMERICHOST
};
hints.ai_family = pf;
struct addrinfo *a, *addr;
int one = 1;
xgetaddrinfo(listenaddr, port, &hints, &addr);
hints.ai_family = pf;
for (a = addr; a; a = a->ai_next) {
sock = socket(a->ai_family, a->ai_socktype, IPPROTO_MPTCP);
if (sock < 0)
continue;
if (-1 == setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one,
sizeof(one)))
perror("setsockopt");
if (bind(sock, a->ai_addr, a->ai_addrlen) == 0)
break; /* success */
perror("bind");
close(sock);
sock = -1;
}
freeaddrinfo(addr);
if (sock < 0)
xerror("could not create listen socket");
if (listen(sock, 20))
die_perror("listen");
return sock;
}
static int sock_connect_mptcp(const char * const remoteaddr,
const char * const port, int proto)
{
struct addrinfo hints = {
.ai_protocol = IPPROTO_TCP,
.ai_socktype = SOCK_STREAM,
};
struct addrinfo *a, *addr;
int sock = -1;
hints.ai_family = pf;
xgetaddrinfo(remoteaddr, port, &hints, &addr);
for (a = addr; a; a = a->ai_next) {
sock = socket(a->ai_family, a->ai_socktype, proto);
if (sock < 0)
continue;
if (connect(sock, a->ai_addr, a->ai_addrlen) == 0)
break; /* success */
die_perror("connect");
}
if (sock < 0)
xerror("could not create connect socket");
freeaddrinfo(addr);
return sock;
}
static void parse_opts(int argc, char **argv)
{
int c;
while ((c = getopt(argc, argv, "h6")) != -1) {
switch (c) {
case 'h':
die_usage(0);
break;
case '6':
pf = AF_INET6;
break;
default:
die_usage(1);
break;
}
}
}
static void do_getsockopt_bogus_sf_data(int fd, int optname)
{
struct mptcp_subflow_data good_data;
struct bogus_data {
struct mptcp_subflow_data d;
char buf[2];
} bd;
socklen_t olen, _olen;
int ret;
memset(&bd, 0, sizeof(bd));
memset(&good_data, 0, sizeof(good_data));
olen = sizeof(good_data);
good_data.size_subflow_data = olen;
ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &olen);
assert(ret < 0); /* 0 size_subflow_data */
assert(olen == sizeof(good_data));
bd.d = good_data;
ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &olen);
assert(ret == 0);
assert(olen == sizeof(good_data));
assert(bd.d.num_subflows == 1);
assert(bd.d.size_kernel > 0);
assert(bd.d.size_user == 0);
bd.d = good_data;
_olen = rand() % olen;
olen = _olen;
ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &olen);
assert(ret < 0); /* bogus olen */
assert(olen == _olen); /* must be unchanged */
bd.d = good_data;
olen = sizeof(good_data);
bd.d.size_kernel = 1;
ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &olen);
assert(ret < 0); /* size_kernel not 0 */
bd.d = good_data;
olen = sizeof(good_data);
bd.d.num_subflows = 1;
ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &olen);
assert(ret < 0); /* num_subflows not 0 */
/* forward compat check: larger struct mptcp_subflow_data on 'old' kernel */
bd.d = good_data;
olen = sizeof(bd);
bd.d.size_subflow_data = sizeof(bd);
ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &olen);
assert(ret == 0);
/* olen must be truncated to real data size filled by kernel: */
assert(olen == sizeof(good_data));
assert(bd.d.size_subflow_data == sizeof(bd));
bd.d = good_data;
bd.d.size_subflow_data += 1;
bd.d.size_user = 1;
olen = bd.d.size_subflow_data + 1;
_olen = olen;
ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &_olen);
assert(ret == 0);
/* no truncation, kernel should have filled 1 byte of optname payload in buf[1]: */
assert(olen == _olen);
assert(bd.d.size_subflow_data == sizeof(good_data) + 1);
assert(bd.buf[0] == 0);
}
static void do_getsockopt_mptcp_info(struct so_state *s, int fd, size_t w)
{
struct mptcp_info i;
socklen_t olen;
int ret;
olen = sizeof(i);
ret = getsockopt(fd, SOL_MPTCP, MPTCP_INFO, &i, &olen);
if (ret < 0)
die_perror("getsockopt MPTCP_INFO");
s->pkt_stats_avail = olen >= sizeof(i);
s->last_sample = i;
if (s->mi.mptcpi_write_seq == 0)
s->mi = i;
assert(s->mi.mptcpi_write_seq + w == i.mptcpi_write_seq);
s->mptcpi_rcv_delta = i.mptcpi_rcv_nxt - s->mi.mptcpi_rcv_nxt;
}
static void do_getsockopt_tcp_info(struct so_state *s, int fd, size_t r, size_t w)
{
struct my_tcp_info {
struct mptcp_subflow_data d;
struct tcp_info ti[2];
} ti;
int ret, tries = 5;
socklen_t olen;
do {
memset(&ti, 0, sizeof(ti));
ti.d.size_subflow_data = sizeof(struct mptcp_subflow_data);
ti.d.size_user = sizeof(struct tcp_info);
olen = sizeof(ti);
ret = getsockopt(fd, SOL_MPTCP, MPTCP_TCPINFO, &ti, &olen);
if (ret < 0)
xerror("getsockopt MPTCP_TCPINFO (tries %d, %m)");
assert(olen <= sizeof(ti));
assert(ti.d.size_kernel > 0);
assert(ti.d.size_user ==
MIN(ti.d.size_kernel, sizeof(struct tcp_info)));
assert(ti.d.num_subflows == 1);
assert(olen > (socklen_t)sizeof(struct mptcp_subflow_data));
olen -= sizeof(struct mptcp_subflow_data);
assert(olen == ti.d.size_user);
s->tcp_info = ti.ti[0];
if (ti.ti[0].tcpi_bytes_sent == w &&
ti.ti[0].tcpi_bytes_received == r)
goto done;
if (r == 0 && ti.ti[0].tcpi_bytes_sent == w &&
ti.ti[0].tcpi_bytes_received) {
s->tcpi_rcv_delta = ti.ti[0].tcpi_bytes_received;
goto done;
}
/* wait and repeat, might be that tx is still ongoing */
sleep(1);
} while (tries-- > 0);
xerror("tcpi_bytes_sent %" PRIu64 ", want %zu. tcpi_bytes_received %" PRIu64 ", want %zu",
ti.ti[0].tcpi_bytes_sent, w, ti.ti[0].tcpi_bytes_received, r);
done:
do_getsockopt_bogus_sf_data(fd, MPTCP_TCPINFO);
}
static void do_getsockopt_subflow_addrs(struct so_state *s, int fd)
{
struct sockaddr_storage remote, local;
socklen_t olen, rlen, llen;
int ret;
struct my_addrs {
struct mptcp_subflow_data d;
struct mptcp_subflow_addrs addr[2];
} addrs;
memset(&addrs, 0, sizeof(addrs));
memset(&local, 0, sizeof(local));
memset(&remote, 0, sizeof(remote));
addrs.d.size_subflow_data = sizeof(struct mptcp_subflow_data);
addrs.d.size_user = sizeof(struct mptcp_subflow_addrs);
olen = sizeof(addrs);
ret = getsockopt(fd, SOL_MPTCP, MPTCP_SUBFLOW_ADDRS, &addrs, &olen);
if (ret < 0)
die_perror("getsockopt MPTCP_SUBFLOW_ADDRS");
assert(olen <= sizeof(addrs));
assert(addrs.d.size_kernel > 0);
assert(addrs.d.size_user ==
MIN(addrs.d.size_kernel, sizeof(struct mptcp_subflow_addrs)));
assert(addrs.d.num_subflows == 1);
assert(olen > (socklen_t)sizeof(struct mptcp_subflow_data));
olen -= sizeof(struct mptcp_subflow_data);
assert(olen == addrs.d.size_user);
llen = sizeof(local);
ret = getsockname(fd, (struct sockaddr *)&local, &llen);
if (ret < 0)
die_perror("getsockname");
rlen = sizeof(remote);
ret = getpeername(fd, (struct sockaddr *)&remote, &rlen);
if (ret < 0)
die_perror("getpeername");
assert(rlen > 0);
assert(rlen == llen);
assert(remote.ss_family == local.ss_family);
assert(memcmp(&local, &addrs.addr[0].ss_local, sizeof(local)) == 0);
assert(memcmp(&remote, &addrs.addr[0].ss_remote, sizeof(remote)) == 0);
s->addrs = addrs.addr[0];
memset(&addrs, 0, sizeof(addrs));
addrs.d.size_subflow_data = sizeof(struct mptcp_subflow_data);
addrs.d.size_user = sizeof(sa_family_t);
olen = sizeof(addrs.d) + sizeof(sa_family_t);
ret = getsockopt(fd, SOL_MPTCP, MPTCP_SUBFLOW_ADDRS, &addrs, &olen);
assert(ret == 0);
assert(olen == sizeof(addrs.d) + sizeof(sa_family_t));
assert(addrs.addr[0].sa_family == pf);
assert(addrs.addr[0].sa_family == local.ss_family);
assert(memcmp(&local, &addrs.addr[0].ss_local, sizeof(local)) != 0);
assert(memcmp(&remote, &addrs.addr[0].ss_remote, sizeof(remote)) != 0);
do_getsockopt_bogus_sf_data(fd, MPTCP_SUBFLOW_ADDRS);
}
static void do_getsockopt_mptcp_full_info(struct so_state *s, int fd)
{
size_t data_size = sizeof(struct mptcp_full_info);
struct mptcp_subflow_info sfinfo[2];
struct tcp_info tcp_info[2];
struct mptcp_full_info mfi;
socklen_t olen;
int ret;
memset(&mfi, 0, data_size);
memset(tcp_info, 0, sizeof(tcp_info));
memset(sfinfo, 0, sizeof(sfinfo));
mfi.size_tcpinfo_user = sizeof(struct tcp_info);
mfi.size_sfinfo_user = sizeof(struct mptcp_subflow_info);
mfi.size_arrays_user = 2;
mfi.subflow_info = (unsigned long)&sfinfo[0];
mfi.tcp_info = (unsigned long)&tcp_info[0];
olen = data_size;
ret = getsockopt(fd, SOL_MPTCP, MPTCP_FULL_INFO, &mfi, &olen);
if (ret < 0) {
if (errno == EOPNOTSUPP) {
perror("MPTCP_FULL_INFO test skipped");
return;
}
xerror("getsockopt MPTCP_FULL_INFO");
}
assert(olen <= data_size);
assert(mfi.size_tcpinfo_kernel > 0);
assert(mfi.size_tcpinfo_user ==
MIN(mfi.size_tcpinfo_kernel, sizeof(struct tcp_info)));
assert(mfi.size_sfinfo_kernel > 0);
assert(mfi.size_sfinfo_user ==
MIN(mfi.size_sfinfo_kernel, sizeof(struct mptcp_subflow_info)));
assert(mfi.num_subflows == 1);
/* Tolerate future extension to mptcp_info struct and running newer
* test on top of older kernel.
* Anyway any kernel supporting MPTCP_FULL_INFO must at least include
* the following in mptcp_info.
*/
assert(olen > (socklen_t)__builtin_offsetof(struct mptcp_full_info, tcp_info));
assert(mfi.mptcp_info.mptcpi_subflows == 0);
assert(mfi.mptcp_info.mptcpi_bytes_sent == s->last_sample.mptcpi_bytes_sent);
assert(mfi.mptcp_info.mptcpi_bytes_received == s->last_sample.mptcpi_bytes_received);
assert(sfinfo[0].id == 1);
assert(tcp_info[0].tcpi_bytes_sent == s->tcp_info.tcpi_bytes_sent);
assert(tcp_info[0].tcpi_bytes_received == s->tcp_info.tcpi_bytes_received);
assert(!memcmp(&sfinfo->addrs, &s->addrs, sizeof(struct mptcp_subflow_addrs)));
}
static void do_getsockopts(struct so_state *s, int fd, size_t r, size_t w)
{
do_getsockopt_mptcp_info(s, fd, w);
do_getsockopt_tcp_info(s, fd, r, w);
do_getsockopt_subflow_addrs(s, fd);
if (r)
do_getsockopt_mptcp_full_info(s, fd);
}
static void connect_one_server(int fd, int pipefd)
{
char buf[4096], buf2[4096];
size_t len, i, total;
struct so_state s;
bool eof = false;
ssize_t ret;
memset(&s, 0, sizeof(s));
len = rand() % (sizeof(buf) - 1);
if (len < 128)
len = 128;
for (i = 0; i < len ; i++) {
buf[i] = rand() % 26;
buf[i] += 'A';
}
buf[i] = '\n';
do_getsockopts(&s, fd, 0, 0);
/* un-block server */
ret = read(pipefd, buf2, 4);
assert(ret == 4);
close(pipefd);
assert(strncmp(buf2, "xmit", 4) == 0);
ret = write(fd, buf, len);
if (ret < 0)
die_perror("write");
if (ret != (ssize_t)len)
xerror("short write");
total = 0;
do {
ret = read(fd, buf2 + total, sizeof(buf2) - total);
if (ret < 0)
die_perror("read");
if (ret == 0) {
eof = true;
break;
}
total += ret;
} while (total < len);
if (total != len)
xerror("total %lu, len %lu eof %d\n", total, len, eof);
if (memcmp(buf, buf2, len))
xerror("data corruption");
if (s.tcpi_rcv_delta)
assert(s.tcpi_rcv_delta <= total);
do_getsockopts(&s, fd, ret, ret);
if (eof)
total += 1; /* sequence advances due to FIN */
assert(s.mptcpi_rcv_delta == (uint64_t)total);
close(fd);
}
static void process_one_client(int fd, int pipefd)
{
ssize_t ret, ret2, ret3;
struct so_state s;
char buf[4096];
memset(&s, 0, sizeof(s));
do_getsockopts(&s, fd, 0, 0);
ret = write(pipefd, "xmit", 4);
assert(ret == 4);
ret = read(fd, buf, sizeof(buf));
if (ret < 0)
die_perror("read");
assert(s.mptcpi_rcv_delta <= (uint64_t)ret);
if (s.tcpi_rcv_delta)
assert(s.tcpi_rcv_delta == (uint64_t)ret);
ret2 = write(fd, buf, ret);
if (ret2 < 0)
die_perror("write");
/* wait for hangup */
ret3 = read(fd, buf, 1);
if (ret3 != 0)
xerror("expected EOF, got %lu", ret3);
do_getsockopts(&s, fd, ret, ret2);
if (s.mptcpi_rcv_delta != (uint64_t)ret + 1)
xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64, s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - ret);
/* be nice when running on top of older kernel */
if (s.pkt_stats_avail) {
if (s.last_sample.mptcpi_bytes_sent != ret2)
xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64,
s.last_sample.mptcpi_bytes_sent, ret2,
s.last_sample.mptcpi_bytes_sent - ret2);
if (s.last_sample.mptcpi_bytes_received != ret)
xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64,
s.last_sample.mptcpi_bytes_received, ret,
s.last_sample.mptcpi_bytes_received - ret);
if (s.last_sample.mptcpi_bytes_acked != ret)
xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64,
s.last_sample.mptcpi_bytes_acked, ret2,
s.last_sample.mptcpi_bytes_acked - ret2);
}
close(fd);
}
static int xaccept(int s)
{
int fd = accept(s, NULL, 0);
if (fd < 0)
die_perror("accept");
return fd;
}
static int server(int pipefd)
{
int fd = -1, r;
switch (pf) {
case AF_INET:
fd = sock_listen_mptcp("127.0.0.1", "15432");
break;
case AF_INET6:
fd = sock_listen_mptcp("::1", "15432");
break;
default:
xerror("Unknown pf %d\n", pf);
break;
}
r = write(pipefd, "conn", 4);
assert(r == 4);
alarm(15);
r = xaccept(fd);
process_one_client(r, pipefd);
return 0;
}
static void test_ip_tos_sockopt(int fd)
{
uint8_t tos_in, tos_out;
socklen_t s;
int r;
tos_in = rand() & 0xfc;
r = setsockopt(fd, SOL_IP, IP_TOS, &tos_in, sizeof(tos_out));
if (r != 0)
die_perror("setsockopt IP_TOS");
tos_out = 0;
s = sizeof(tos_out);
r = getsockopt(fd, SOL_IP, IP_TOS, &tos_out, &s);
if (r != 0)
die_perror("getsockopt IP_TOS");
if (tos_in != tos_out)
xerror("tos %x != %x socklen_t %d\n", tos_in, tos_out, s);
if (s != 1)
xerror("tos should be 1 byte");
s = 0;
r = getsockopt(fd, SOL_IP, IP_TOS, &tos_out, &s);
if (r != 0)
die_perror("getsockopt IP_TOS 0");
if (s != 0)
xerror("expect socklen_t == 0");
s = -1;
r = getsockopt(fd, SOL_IP, IP_TOS, &tos_out, &s);
if (r != -1 && errno != EINVAL)
die_perror("getsockopt IP_TOS did not indicate -EINVAL");
if (s != -1)
xerror("expect socklen_t == -1");
}
static int client(int pipefd)
{
int fd = -1;
alarm(15);
switch (pf) {
case AF_INET:
fd = sock_connect_mptcp("127.0.0.1", "15432", IPPROTO_MPTCP);
break;
case AF_INET6:
fd = sock_connect_mptcp("::1", "15432", IPPROTO_MPTCP);
break;
default:
xerror("Unknown pf %d\n", pf);
}
test_ip_tos_sockopt(fd);
connect_one_server(fd, pipefd);
return 0;
}
static pid_t xfork(void)
{
pid_t p = fork();
if (p < 0)
die_perror("fork");
return p;
}
static int rcheck(int wstatus, const char *what)
{
if (WIFEXITED(wstatus)) {
if (WEXITSTATUS(wstatus) == 0)
return 0;
fprintf(stderr, "%s exited, status=%d\n", what, WEXITSTATUS(wstatus));
return WEXITSTATUS(wstatus);
} else if (WIFSIGNALED(wstatus)) {
xerror("%s killed by signal %d\n", what, WTERMSIG(wstatus));
} else if (WIFSTOPPED(wstatus)) {
xerror("%s stopped by signal %d\n", what, WSTOPSIG(wstatus));
}
return 111;
}
static void init_rng(void)
{
int fd = open("/dev/urandom", O_RDONLY);
if (fd >= 0) {
unsigned int foo;
ssize_t ret;
/* can't fail */
ret = read(fd, &foo, sizeof(foo));
assert(ret == sizeof(foo));
close(fd);
srand(foo);
} else {
srand(time(NULL));
}
}
int main(int argc, char *argv[])
{
int e1, e2, wstatus;
pid_t s, c, ret;
int pipefds[2];
parse_opts(argc, argv);
init_rng();
e1 = pipe(pipefds);
if (e1 < 0)
die_perror("pipe");
s = xfork();
if (s == 0)
return server(pipefds[1]);
close(pipefds[1]);
/* wait until server bound a socket */
e1 = read(pipefds[0], &e1, 4);
assert(e1 == 4);
c = xfork();
if (c == 0)
return client(pipefds[0]);
close(pipefds[0]);
ret = waitpid(s, &wstatus, 0);
if (ret == -1)
die_perror("waitpid");
e1 = rcheck(wstatus, "server");
ret = waitpid(c, &wstatus, 0);
if (ret == -1)
die_perror("waitpid");
e2 = rcheck(wstatus, "client");
return e1 ? e1 : e2;
}
| linux-master | tools/testing/selftests/net/mptcp/mptcp_sockopt.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <time.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <netdb.h>
#include <netinet/in.h>
#include <linux/tcp.h>
#include <linux/sockios.h>
#ifndef IPPROTO_MPTCP
#define IPPROTO_MPTCP 262
#endif
#ifndef SOL_MPTCP
#define SOL_MPTCP 284
#endif
static int pf = AF_INET;
static int proto_tx = IPPROTO_MPTCP;
static int proto_rx = IPPROTO_MPTCP;
static void die_perror(const char *msg)
{
perror(msg);
exit(1);
}
static void die_usage(int r)
{
fprintf(stderr, "Usage: mptcp_inq [-6] [ -t tcp|mptcp ] [ -r tcp|mptcp]\n");
exit(r);
}
static void xerror(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
fputc('\n', stderr);
exit(1);
}
static const char *getxinfo_strerr(int err)
{
if (err == EAI_SYSTEM)
return strerror(errno);
return gai_strerror(err);
}
static void xgetaddrinfo(const char *node, const char *service,
const struct addrinfo *hints,
struct addrinfo **res)
{
int err = getaddrinfo(node, service, hints, res);
if (err) {
const char *errstr = getxinfo_strerr(err);
fprintf(stderr, "Fatal: getaddrinfo(%s:%s): %s\n",
node ? node : "", service ? service : "", errstr);
exit(1);
}
}
static int sock_listen_mptcp(const char * const listenaddr,
const char * const port)
{
int sock = -1;
struct addrinfo hints = {
.ai_protocol = IPPROTO_TCP,
.ai_socktype = SOCK_STREAM,
.ai_flags = AI_PASSIVE | AI_NUMERICHOST
};
hints.ai_family = pf;
struct addrinfo *a, *addr;
int one = 1;
xgetaddrinfo(listenaddr, port, &hints, &addr);
hints.ai_family = pf;
for (a = addr; a; a = a->ai_next) {
sock = socket(a->ai_family, a->ai_socktype, proto_rx);
if (sock < 0)
continue;
if (-1 == setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one,
sizeof(one)))
perror("setsockopt");
if (bind(sock, a->ai_addr, a->ai_addrlen) == 0)
break; /* success */
perror("bind");
close(sock);
sock = -1;
}
freeaddrinfo(addr);
if (sock < 0)
xerror("could not create listen socket");
if (listen(sock, 20))
die_perror("listen");
return sock;
}
static int sock_connect_mptcp(const char * const remoteaddr,
const char * const port, int proto)
{
struct addrinfo hints = {
.ai_protocol = IPPROTO_TCP,
.ai_socktype = SOCK_STREAM,
};
struct addrinfo *a, *addr;
int sock = -1;
hints.ai_family = pf;
xgetaddrinfo(remoteaddr, port, &hints, &addr);
for (a = addr; a; a = a->ai_next) {
sock = socket(a->ai_family, a->ai_socktype, proto);
if (sock < 0)
continue;
if (connect(sock, a->ai_addr, a->ai_addrlen) == 0)
break; /* success */
die_perror("connect");
}
if (sock < 0)
xerror("could not create connect socket");
freeaddrinfo(addr);
return sock;
}
static int protostr_to_num(const char *s)
{
if (strcasecmp(s, "tcp") == 0)
return IPPROTO_TCP;
if (strcasecmp(s, "mptcp") == 0)
return IPPROTO_MPTCP;
die_usage(1);
return 0;
}
static void parse_opts(int argc, char **argv)
{
int c;
while ((c = getopt(argc, argv, "h6t:r:")) != -1) {
switch (c) {
case 'h':
die_usage(0);
break;
case '6':
pf = AF_INET6;
break;
case 't':
proto_tx = protostr_to_num(optarg);
break;
case 'r':
proto_rx = protostr_to_num(optarg);
break;
default:
die_usage(1);
break;
}
}
}
/* wait up to timeout milliseconds */
static void wait_for_ack(int fd, int timeout, size_t total)
{
int i;
for (i = 0; i < timeout; i++) {
int nsd, ret, queued = -1;
struct timespec req;
ret = ioctl(fd, TIOCOUTQ, &queued);
if (ret < 0)
die_perror("TIOCOUTQ");
ret = ioctl(fd, SIOCOUTQNSD, &nsd);
if (ret < 0)
die_perror("SIOCOUTQNSD");
if ((size_t)queued > total)
xerror("TIOCOUTQ %u, but only %zu expected\n", queued, total);
assert(nsd <= queued);
if (queued == 0)
return;
/* wait for peer to ack rx of all data */
req.tv_sec = 0;
req.tv_nsec = 1 * 1000 * 1000ul; /* 1ms */
nanosleep(&req, NULL);
}
xerror("still tx data queued after %u ms\n", timeout);
}
static void connect_one_server(int fd, int unixfd)
{
size_t len, i, total, sent;
char buf[4096], buf2[4096];
ssize_t ret;
len = rand() % (sizeof(buf) - 1);
if (len < 128)
len = 128;
for (i = 0; i < len ; i++) {
buf[i] = rand() % 26;
buf[i] += 'A';
}
buf[i] = '\n';
/* un-block server */
ret = read(unixfd, buf2, 4);
assert(ret == 4);
assert(strncmp(buf2, "xmit", 4) == 0);
ret = write(unixfd, &len, sizeof(len));
assert(ret == (ssize_t)sizeof(len));
ret = write(fd, buf, len);
if (ret < 0)
die_perror("write");
if (ret != (ssize_t)len)
xerror("short write");
ret = read(unixfd, buf2, 4);
assert(strncmp(buf2, "huge", 4) == 0);
total = rand() % (16 * 1024 * 1024);
total += (1 * 1024 * 1024);
sent = total;
ret = write(unixfd, &total, sizeof(total));
assert(ret == (ssize_t)sizeof(total));
wait_for_ack(fd, 5000, len);
while (total > 0) {
if (total > sizeof(buf))
len = sizeof(buf);
else
len = total;
ret = write(fd, buf, len);
if (ret < 0)
die_perror("write");
total -= ret;
/* we don't have to care about buf content, only
* number of total bytes sent
*/
}
ret = read(unixfd, buf2, 4);
assert(ret == 4);
assert(strncmp(buf2, "shut", 4) == 0);
wait_for_ack(fd, 5000, sent);
ret = write(fd, buf, 1);
assert(ret == 1);
close(fd);
ret = write(unixfd, "closed", 6);
assert(ret == 6);
close(unixfd);
}
static void get_tcp_inq(struct msghdr *msgh, unsigned int *inqv)
{
struct cmsghdr *cmsg;
for (cmsg = CMSG_FIRSTHDR(msgh); cmsg ; cmsg = CMSG_NXTHDR(msgh, cmsg)) {
if (cmsg->cmsg_level == IPPROTO_TCP && cmsg->cmsg_type == TCP_CM_INQ) {
memcpy(inqv, CMSG_DATA(cmsg), sizeof(*inqv));
return;
}
}
xerror("could not find TCP_CM_INQ cmsg type");
}
static void process_one_client(int fd, int unixfd)
{
unsigned int tcp_inq;
size_t expect_len;
char msg_buf[4096];
char buf[4096];
char tmp[16];
struct iovec iov = {
.iov_base = buf,
.iov_len = 1,
};
struct msghdr msg = {
.msg_iov = &iov,
.msg_iovlen = 1,
.msg_control = msg_buf,
.msg_controllen = sizeof(msg_buf),
};
ssize_t ret, tot;
ret = write(unixfd, "xmit", 4);
assert(ret == 4);
ret = read(unixfd, &expect_len, sizeof(expect_len));
assert(ret == (ssize_t)sizeof(expect_len));
if (expect_len > sizeof(buf))
xerror("expect len %zu exceeds buffer size", expect_len);
for (;;) {
struct timespec req;
unsigned int queued;
ret = ioctl(fd, FIONREAD, &queued);
if (ret < 0)
die_perror("FIONREAD");
if (queued > expect_len)
xerror("FIONREAD returned %u, but only %zu expected\n",
queued, expect_len);
if (queued == expect_len)
break;
req.tv_sec = 0;
req.tv_nsec = 1000 * 1000ul;
nanosleep(&req, NULL);
}
/* read one byte, expect cmsg to return expected - 1 */
ret = recvmsg(fd, &msg, 0);
if (ret < 0)
die_perror("recvmsg");
if (msg.msg_controllen == 0)
xerror("msg_controllen is 0");
get_tcp_inq(&msg, &tcp_inq);
assert((size_t)tcp_inq == (expect_len - 1));
iov.iov_len = sizeof(buf);
ret = recvmsg(fd, &msg, 0);
if (ret < 0)
die_perror("recvmsg");
/* should have gotten exact remainder of all pending data */
assert(ret == (ssize_t)tcp_inq);
/* should be 0, all drained */
get_tcp_inq(&msg, &tcp_inq);
assert(tcp_inq == 0);
/* request a large swath of data. */
ret = write(unixfd, "huge", 4);
assert(ret == 4);
ret = read(unixfd, &expect_len, sizeof(expect_len));
assert(ret == (ssize_t)sizeof(expect_len));
/* peer should send us a few mb of data */
if (expect_len <= sizeof(buf))
xerror("expect len %zu too small\n", expect_len);
tot = 0;
do {
iov.iov_len = sizeof(buf);
ret = recvmsg(fd, &msg, 0);
if (ret < 0)
die_perror("recvmsg");
tot += ret;
get_tcp_inq(&msg, &tcp_inq);
if (tcp_inq > expect_len - tot)
xerror("inq %d, remaining %d total_len %d\n",
tcp_inq, expect_len - tot, (int)expect_len);
assert(tcp_inq <= expect_len - tot);
} while ((size_t)tot < expect_len);
ret = write(unixfd, "shut", 4);
assert(ret == 4);
/* wait for hangup. Should have received one more byte of data. */
ret = read(unixfd, tmp, sizeof(tmp));
assert(ret == 6);
assert(strncmp(tmp, "closed", 6) == 0);
sleep(1);
iov.iov_len = 1;
ret = recvmsg(fd, &msg, 0);
if (ret < 0)
die_perror("recvmsg");
assert(ret == 1);
get_tcp_inq(&msg, &tcp_inq);
/* tcp_inq should be 1 due to received fin. */
assert(tcp_inq == 1);
iov.iov_len = 1;
ret = recvmsg(fd, &msg, 0);
if (ret < 0)
die_perror("recvmsg");
/* expect EOF */
assert(ret == 0);
get_tcp_inq(&msg, &tcp_inq);
assert(tcp_inq == 1);
close(fd);
}
static int xaccept(int s)
{
int fd = accept(s, NULL, 0);
if (fd < 0)
die_perror("accept");
return fd;
}
static int server(int unixfd)
{
int fd = -1, r, on = 1;
switch (pf) {
case AF_INET:
fd = sock_listen_mptcp("127.0.0.1", "15432");
break;
case AF_INET6:
fd = sock_listen_mptcp("::1", "15432");
break;
default:
xerror("Unknown pf %d\n", pf);
break;
}
r = write(unixfd, "conn", 4);
assert(r == 4);
alarm(15);
r = xaccept(fd);
if (-1 == setsockopt(r, IPPROTO_TCP, TCP_INQ, &on, sizeof(on)))
die_perror("setsockopt");
process_one_client(r, unixfd);
return 0;
}
static int client(int unixfd)
{
int fd = -1;
alarm(15);
switch (pf) {
case AF_INET:
fd = sock_connect_mptcp("127.0.0.1", "15432", proto_tx);
break;
case AF_INET6:
fd = sock_connect_mptcp("::1", "15432", proto_tx);
break;
default:
xerror("Unknown pf %d\n", pf);
}
connect_one_server(fd, unixfd);
return 0;
}
static void init_rng(void)
{
int fd = open("/dev/urandom", O_RDONLY);
unsigned int foo;
if (fd > 0) {
int ret = read(fd, &foo, sizeof(foo));
if (ret < 0)
srand(fd + foo);
close(fd);
}
srand(foo);
}
static pid_t xfork(void)
{
pid_t p = fork();
if (p < 0)
die_perror("fork");
else if (p == 0)
init_rng();
return p;
}
static int rcheck(int wstatus, const char *what)
{
if (WIFEXITED(wstatus)) {
if (WEXITSTATUS(wstatus) == 0)
return 0;
fprintf(stderr, "%s exited, status=%d\n", what, WEXITSTATUS(wstatus));
return WEXITSTATUS(wstatus);
} else if (WIFSIGNALED(wstatus)) {
xerror("%s killed by signal %d\n", what, WTERMSIG(wstatus));
} else if (WIFSTOPPED(wstatus)) {
xerror("%s stopped by signal %d\n", what, WSTOPSIG(wstatus));
}
return 111;
}
int main(int argc, char *argv[])
{
int e1, e2, wstatus;
pid_t s, c, ret;
int unixfds[2];
parse_opts(argc, argv);
e1 = socketpair(AF_UNIX, SOCK_DGRAM, 0, unixfds);
if (e1 < 0)
die_perror("pipe");
s = xfork();
if (s == 0)
return server(unixfds[1]);
close(unixfds[1]);
/* wait until server bound a socket */
e1 = read(unixfds[0], &e1, 4);
assert(e1 == 4);
c = xfork();
if (c == 0)
return client(unixfds[0]);
close(unixfds[0]);
ret = waitpid(s, &wstatus, 0);
if (ret == -1)
die_perror("waitpid");
e1 = rcheck(wstatus, "server");
ret = waitpid(c, &wstatus, 0);
if (ret == -1)
die_perror("waitpid");
e2 = rcheck(wstatus, "client");
return e1 ? e1 : e2;
}
| linux-master | tools/testing/selftests/net/mptcp/mptcp_inq.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <limits.h>
#include <fcntl.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <signal.h>
#include <unistd.h>
#include <time.h>
#include <sys/ioctl.h>
#include <sys/poll.h>
#include <sys/sendfile.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <netdb.h>
#include <netinet/in.h>
#include <linux/tcp.h>
#include <linux/time_types.h>
#include <linux/sockios.h>
extern int optind;
#ifndef IPPROTO_MPTCP
#define IPPROTO_MPTCP 262
#endif
#ifndef TCP_ULP
#define TCP_ULP 31
#endif
static int poll_timeout = 10 * 1000;
static bool listen_mode;
static bool quit;
enum cfg_mode {
CFG_MODE_POLL,
CFG_MODE_MMAP,
CFG_MODE_SENDFILE,
};
enum cfg_peek {
CFG_NONE_PEEK,
CFG_WITH_PEEK,
CFG_AFTER_PEEK,
};
static enum cfg_mode cfg_mode = CFG_MODE_POLL;
static enum cfg_peek cfg_peek = CFG_NONE_PEEK;
static const char *cfg_host;
static const char *cfg_port = "12000";
static int cfg_sock_proto = IPPROTO_MPTCP;
static int pf = AF_INET;
static int cfg_sndbuf;
static int cfg_rcvbuf;
static bool cfg_join;
static bool cfg_remove;
static unsigned int cfg_time;
static unsigned int cfg_do_w;
static int cfg_wait;
static uint32_t cfg_mark;
static char *cfg_input;
static int cfg_repeat = 1;
static int cfg_truncate;
static int cfg_rcv_trunc;
struct cfg_cmsg_types {
unsigned int cmsg_enabled:1;
unsigned int timestampns:1;
unsigned int tcp_inq:1;
};
struct cfg_sockopt_types {
unsigned int transparent:1;
unsigned int mptfo:1;
};
struct tcp_inq_state {
unsigned int last;
bool expect_eof;
};
struct wstate {
char buf[8192];
unsigned int len;
unsigned int off;
unsigned int total_len;
};
static struct tcp_inq_state tcp_inq;
static struct cfg_cmsg_types cfg_cmsg_types;
static struct cfg_sockopt_types cfg_sockopt_types;
static void die_usage(void)
{
fprintf(stderr, "Usage: mptcp_connect [-6] [-c cmsg] [-f offset] [-i file] [-I num] [-j] [-l] "
"[-m mode] [-M mark] [-o option] [-p port] [-P mode] [-r num] [-R num] "
"[-s MPTCP|TCP] [-S num] [-t num] [-T num] [-w sec] connect_address\n");
fprintf(stderr, "\t-6 use ipv6\n");
fprintf(stderr, "\t-c cmsg -- test cmsg type <cmsg>\n");
fprintf(stderr, "\t-f offset -- stop the I/O after receiving and sending the specified amount "
"of bytes. If there are unread bytes in the receive queue, that will cause a MPTCP "
"fastclose at close/shutdown. If offset is negative, expect the peer to close before "
"all the local data as been sent, thus toleration errors on write and EPIPE signals\n");
fprintf(stderr, "\t-i file -- read the data to send from the given file instead of stdin");
fprintf(stderr, "\t-I num -- repeat the transfer 'num' times. In listen mode accepts num "
"incoming connections, in client mode, disconnect and reconnect to the server\n");
fprintf(stderr, "\t-j -- add additional sleep at connection start and tear down "
"-- for MPJ tests\n");
fprintf(stderr, "\t-l -- listens mode, accepts incoming connection\n");
fprintf(stderr, "\t-m [poll|mmap|sendfile] -- use poll(default)/mmap+write/sendfile\n");
fprintf(stderr, "\t-M mark -- set socket packet mark\n");
fprintf(stderr, "\t-o option -- test sockopt <option>\n");
fprintf(stderr, "\t-p num -- use port num\n");
fprintf(stderr,
"\t-P [saveWithPeek|saveAfterPeek] -- save data with/after MSG_PEEK form tcp socket\n");
fprintf(stderr, "\t-r num -- enable slow mode, limiting each write to num bytes "
"-- for remove addr tests\n");
fprintf(stderr, "\t-R num -- set SO_RCVBUF to num\n");
fprintf(stderr, "\t-s [MPTCP|TCP] -- use mptcp(default) or tcp sockets\n");
fprintf(stderr, "\t-S num -- set SO_SNDBUF to num\n");
fprintf(stderr, "\t-t num -- set poll timeout to num\n");
fprintf(stderr, "\t-T num -- set expected runtime to num ms\n");
fprintf(stderr, "\t-w num -- wait num sec before closing the socket\n");
exit(1);
}
static void xerror(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
exit(1);
}
static void handle_signal(int nr)
{
quit = true;
}
static const char *getxinfo_strerr(int err)
{
if (err == EAI_SYSTEM)
return strerror(errno);
return gai_strerror(err);
}
static void xgetnameinfo(const struct sockaddr *addr, socklen_t addrlen,
char *host, socklen_t hostlen,
char *serv, socklen_t servlen)
{
int flags = NI_NUMERICHOST | NI_NUMERICSERV;
int err = getnameinfo(addr, addrlen, host, hostlen, serv, servlen,
flags);
if (err) {
const char *errstr = getxinfo_strerr(err);
fprintf(stderr, "Fatal: getnameinfo: %s\n", errstr);
exit(1);
}
}
static void xgetaddrinfo(const char *node, const char *service,
const struct addrinfo *hints,
struct addrinfo **res)
{
int err = getaddrinfo(node, service, hints, res);
if (err) {
const char *errstr = getxinfo_strerr(err);
fprintf(stderr, "Fatal: getaddrinfo(%s:%s): %s\n",
node ? node : "", service ? service : "", errstr);
exit(1);
}
}
static void set_rcvbuf(int fd, unsigned int size)
{
int err;
err = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &size, sizeof(size));
if (err) {
perror("set SO_RCVBUF");
exit(1);
}
}
static void set_sndbuf(int fd, unsigned int size)
{
int err;
err = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &size, sizeof(size));
if (err) {
perror("set SO_SNDBUF");
exit(1);
}
}
static void set_mark(int fd, uint32_t mark)
{
int err;
err = setsockopt(fd, SOL_SOCKET, SO_MARK, &mark, sizeof(mark));
if (err) {
perror("set SO_MARK");
exit(1);
}
}
static void set_transparent(int fd, int pf)
{
int one = 1;
switch (pf) {
case AF_INET:
if (-1 == setsockopt(fd, SOL_IP, IP_TRANSPARENT, &one, sizeof(one)))
perror("IP_TRANSPARENT");
break;
case AF_INET6:
if (-1 == setsockopt(fd, IPPROTO_IPV6, IPV6_TRANSPARENT, &one, sizeof(one)))
perror("IPV6_TRANSPARENT");
break;
}
}
static void set_mptfo(int fd, int pf)
{
int qlen = 25;
if (setsockopt(fd, IPPROTO_TCP, TCP_FASTOPEN, &qlen, sizeof(qlen)) == -1)
perror("TCP_FASTOPEN");
}
static int do_ulp_so(int sock, const char *name)
{
return setsockopt(sock, IPPROTO_TCP, TCP_ULP, name, strlen(name));
}
#define X(m) xerror("%s:%u: %s: failed for proto %d at line %u", __FILE__, __LINE__, (m), proto, line)
static void sock_test_tcpulp(int sock, int proto, unsigned int line)
{
socklen_t buflen = 8;
char buf[8] = "";
int ret = getsockopt(sock, IPPROTO_TCP, TCP_ULP, buf, &buflen);
if (ret != 0)
X("getsockopt");
if (buflen > 0) {
if (strcmp(buf, "mptcp") != 0)
xerror("unexpected ULP '%s' for proto %d at line %u", buf, proto, line);
ret = do_ulp_so(sock, "tls");
if (ret == 0)
X("setsockopt");
} else if (proto == IPPROTO_MPTCP) {
ret = do_ulp_so(sock, "tls");
if (ret != -1)
X("setsockopt");
}
ret = do_ulp_so(sock, "mptcp");
if (ret != -1)
X("setsockopt");
#undef X
}
#define SOCK_TEST_TCPULP(s, p) sock_test_tcpulp((s), (p), __LINE__)
static int sock_listen_mptcp(const char * const listenaddr,
const char * const port)
{
int sock = -1;
struct addrinfo hints = {
.ai_protocol = IPPROTO_TCP,
.ai_socktype = SOCK_STREAM,
.ai_flags = AI_PASSIVE | AI_NUMERICHOST
};
hints.ai_family = pf;
struct addrinfo *a, *addr;
int one = 1;
xgetaddrinfo(listenaddr, port, &hints, &addr);
hints.ai_family = pf;
for (a = addr; a; a = a->ai_next) {
sock = socket(a->ai_family, a->ai_socktype, cfg_sock_proto);
if (sock < 0)
continue;
SOCK_TEST_TCPULP(sock, cfg_sock_proto);
if (-1 == setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one,
sizeof(one)))
perror("setsockopt");
if (cfg_sockopt_types.transparent)
set_transparent(sock, pf);
if (cfg_sockopt_types.mptfo)
set_mptfo(sock, pf);
if (bind(sock, a->ai_addr, a->ai_addrlen) == 0)
break; /* success */
perror("bind");
close(sock);
sock = -1;
}
freeaddrinfo(addr);
if (sock < 0) {
fprintf(stderr, "Could not create listen socket\n");
return sock;
}
SOCK_TEST_TCPULP(sock, cfg_sock_proto);
if (listen(sock, 20)) {
perror("listen");
close(sock);
return -1;
}
SOCK_TEST_TCPULP(sock, cfg_sock_proto);
return sock;
}
static int sock_connect_mptcp(const char * const remoteaddr,
const char * const port, int proto,
struct addrinfo **peer,
int infd, struct wstate *winfo)
{
struct addrinfo hints = {
.ai_protocol = IPPROTO_TCP,
.ai_socktype = SOCK_STREAM,
};
struct addrinfo *a, *addr;
int syn_copied = 0;
int sock = -1;
hints.ai_family = pf;
xgetaddrinfo(remoteaddr, port, &hints, &addr);
for (a = addr; a; a = a->ai_next) {
sock = socket(a->ai_family, a->ai_socktype, proto);
if (sock < 0) {
perror("socket");
continue;
}
SOCK_TEST_TCPULP(sock, proto);
if (cfg_mark)
set_mark(sock, cfg_mark);
if (cfg_sockopt_types.mptfo) {
if (!winfo->total_len)
winfo->total_len = winfo->len = read(infd, winfo->buf,
sizeof(winfo->buf));
syn_copied = sendto(sock, winfo->buf, winfo->len, MSG_FASTOPEN,
a->ai_addr, a->ai_addrlen);
if (syn_copied >= 0) {
winfo->off = syn_copied;
winfo->len -= syn_copied;
*peer = a;
break; /* success */
}
} else {
if (connect(sock, a->ai_addr, a->ai_addrlen) == 0) {
*peer = a;
break; /* success */
}
}
if (cfg_sockopt_types.mptfo) {
perror("sendto()");
close(sock);
sock = -1;
} else {
perror("connect()");
close(sock);
sock = -1;
}
}
freeaddrinfo(addr);
if (sock != -1)
SOCK_TEST_TCPULP(sock, proto);
return sock;
}
static size_t do_rnd_write(const int fd, char *buf, const size_t len)
{
static bool first = true;
unsigned int do_w;
ssize_t bw;
do_w = rand() & 0xffff;
if (do_w == 0 || do_w > len)
do_w = len;
if (cfg_join && first && do_w > 100)
do_w = 100;
if (cfg_remove && do_w > cfg_do_w)
do_w = cfg_do_w;
bw = write(fd, buf, do_w);
if (bw < 0)
return bw;
/* let the join handshake complete, before going on */
if (cfg_join && first) {
usleep(200000);
first = false;
}
if (cfg_remove)
usleep(200000);
return bw;
}
static size_t do_write(const int fd, char *buf, const size_t len)
{
size_t offset = 0;
while (offset < len) {
size_t written;
ssize_t bw;
bw = write(fd, buf + offset, len - offset);
if (bw < 0) {
perror("write");
return 0;
}
written = (size_t)bw;
offset += written;
}
return offset;
}
static void process_cmsg(struct msghdr *msgh)
{
struct __kernel_timespec ts;
bool inq_found = false;
bool ts_found = false;
unsigned int inq = 0;
struct cmsghdr *cmsg;
for (cmsg = CMSG_FIRSTHDR(msgh); cmsg ; cmsg = CMSG_NXTHDR(msgh, cmsg)) {
if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SO_TIMESTAMPNS_NEW) {
memcpy(&ts, CMSG_DATA(cmsg), sizeof(ts));
ts_found = true;
continue;
}
if (cmsg->cmsg_level == IPPROTO_TCP && cmsg->cmsg_type == TCP_CM_INQ) {
memcpy(&inq, CMSG_DATA(cmsg), sizeof(inq));
inq_found = true;
continue;
}
}
if (cfg_cmsg_types.timestampns) {
if (!ts_found)
xerror("TIMESTAMPNS not present\n");
}
if (cfg_cmsg_types.tcp_inq) {
if (!inq_found)
xerror("TCP_INQ not present\n");
if (inq > 1024)
xerror("tcp_inq %u is larger than one kbyte\n", inq);
tcp_inq.last = inq;
}
}
static ssize_t do_recvmsg_cmsg(const int fd, char *buf, const size_t len)
{
char msg_buf[8192];
struct iovec iov = {
.iov_base = buf,
.iov_len = len,
};
struct msghdr msg = {
.msg_iov = &iov,
.msg_iovlen = 1,
.msg_control = msg_buf,
.msg_controllen = sizeof(msg_buf),
};
int flags = 0;
unsigned int last_hint = tcp_inq.last;
int ret = recvmsg(fd, &msg, flags);
if (ret <= 0) {
if (ret == 0 && tcp_inq.expect_eof)
return ret;
if (ret == 0 && cfg_cmsg_types.tcp_inq)
if (last_hint != 1 && last_hint != 0)
xerror("EOF but last tcp_inq hint was %u\n", last_hint);
return ret;
}
if (tcp_inq.expect_eof)
xerror("expected EOF, last_hint %u, now %u\n",
last_hint, tcp_inq.last);
if (msg.msg_controllen && !cfg_cmsg_types.cmsg_enabled)
xerror("got %lu bytes of cmsg data, expected 0\n",
(unsigned long)msg.msg_controllen);
if (msg.msg_controllen == 0 && cfg_cmsg_types.cmsg_enabled)
xerror("%s\n", "got no cmsg data");
if (msg.msg_controllen)
process_cmsg(&msg);
if (cfg_cmsg_types.tcp_inq) {
if ((size_t)ret < len && last_hint > (unsigned int)ret) {
if (ret + 1 != (int)last_hint) {
int next = read(fd, msg_buf, sizeof(msg_buf));
xerror("read %u of %u, last_hint was %u tcp_inq hint now %u next_read returned %d/%m\n",
ret, (unsigned int)len, last_hint, tcp_inq.last, next);
} else {
tcp_inq.expect_eof = true;
}
}
}
return ret;
}
static ssize_t do_rnd_read(const int fd, char *buf, const size_t len)
{
int ret = 0;
char tmp[16384];
size_t cap = rand();
cap &= 0xffff;
if (cap == 0)
cap = 1;
else if (cap > len)
cap = len;
if (cfg_peek == CFG_WITH_PEEK) {
ret = recv(fd, buf, cap, MSG_PEEK);
ret = (ret < 0) ? ret : read(fd, tmp, ret);
} else if (cfg_peek == CFG_AFTER_PEEK) {
ret = recv(fd, buf, cap, MSG_PEEK);
ret = (ret < 0) ? ret : read(fd, buf, cap);
} else if (cfg_cmsg_types.cmsg_enabled) {
ret = do_recvmsg_cmsg(fd, buf, cap);
} else {
ret = read(fd, buf, cap);
}
return ret;
}
static void set_nonblock(int fd, bool nonblock)
{
int flags = fcntl(fd, F_GETFL);
if (flags == -1)
return;
if (nonblock)
fcntl(fd, F_SETFL, flags | O_NONBLOCK);
else
fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
}
static void shut_wr(int fd)
{
/* Close our write side, ev. give some time
* for address notification and/or checking
* the current status
*/
if (cfg_wait)
usleep(cfg_wait);
shutdown(fd, SHUT_WR);
}
static int copyfd_io_poll(int infd, int peerfd, int outfd,
bool *in_closed_after_out, struct wstate *winfo)
{
struct pollfd fds = {
.fd = peerfd,
.events = POLLIN | POLLOUT,
};
unsigned int total_wlen = 0, total_rlen = 0;
set_nonblock(peerfd, true);
for (;;) {
char rbuf[8192];
ssize_t len;
if (fds.events == 0 || quit)
break;
switch (poll(&fds, 1, poll_timeout)) {
case -1:
if (errno == EINTR)
continue;
perror("poll");
return 1;
case 0:
fprintf(stderr, "%s: poll timed out (events: "
"POLLIN %u, POLLOUT %u)\n", __func__,
fds.events & POLLIN, fds.events & POLLOUT);
return 2;
}
if (fds.revents & POLLIN) {
ssize_t rb = sizeof(rbuf);
/* limit the total amount of read data to the trunc value*/
if (cfg_truncate > 0) {
if (rb + total_rlen > cfg_truncate)
rb = cfg_truncate - total_rlen;
len = read(peerfd, rbuf, rb);
} else {
len = do_rnd_read(peerfd, rbuf, sizeof(rbuf));
}
if (len == 0) {
/* no more data to receive:
* peer has closed its write side
*/
fds.events &= ~POLLIN;
if ((fds.events & POLLOUT) == 0) {
*in_closed_after_out = true;
/* and nothing more to send */
break;
}
/* Else, still have data to transmit */
} else if (len < 0) {
if (cfg_rcv_trunc)
return 0;
perror("read");
return 3;
}
total_rlen += len;
do_write(outfd, rbuf, len);
}
if (fds.revents & POLLOUT) {
if (winfo->len == 0) {
winfo->off = 0;
winfo->len = read(infd, winfo->buf, sizeof(winfo->buf));
}
if (winfo->len > 0) {
ssize_t bw;
/* limit the total amount of written data to the trunc value */
if (cfg_truncate > 0 && winfo->len + total_wlen > cfg_truncate)
winfo->len = cfg_truncate - total_wlen;
bw = do_rnd_write(peerfd, winfo->buf + winfo->off, winfo->len);
if (bw < 0) {
if (cfg_rcv_trunc)
return 0;
perror("write");
return 111;
}
winfo->off += bw;
winfo->len -= bw;
total_wlen += bw;
} else if (winfo->len == 0) {
/* We have no more data to send. */
fds.events &= ~POLLOUT;
if ((fds.events & POLLIN) == 0)
/* ... and peer also closed already */
break;
shut_wr(peerfd);
} else {
if (errno == EINTR)
continue;
perror("read");
return 4;
}
}
if (fds.revents & (POLLERR | POLLNVAL)) {
if (cfg_rcv_trunc)
return 0;
fprintf(stderr, "Unexpected revents: "
"POLLERR/POLLNVAL(%x)\n", fds.revents);
return 5;
}
if (cfg_truncate > 0 && total_wlen >= cfg_truncate &&
total_rlen >= cfg_truncate)
break;
}
/* leave some time for late join/announce */
if (cfg_remove && !quit)
usleep(cfg_wait);
return 0;
}
static int do_recvfile(int infd, int outfd)
{
ssize_t r;
do {
char buf[16384];
r = do_rnd_read(infd, buf, sizeof(buf));
if (r > 0) {
if (write(outfd, buf, r) != r)
break;
} else if (r < 0) {
perror("read");
}
} while (r > 0);
return (int)r;
}
static int spool_buf(int fd, struct wstate *winfo)
{
while (winfo->len) {
int ret = write(fd, winfo->buf + winfo->off, winfo->len);
if (ret < 0) {
perror("write");
return 4;
}
winfo->off += ret;
winfo->len -= ret;
}
return 0;
}
static int do_mmap(int infd, int outfd, unsigned int size,
struct wstate *winfo)
{
char *inbuf = mmap(NULL, size, PROT_READ, MAP_SHARED, infd, 0);
ssize_t ret = 0, off = winfo->total_len;
size_t rem;
if (inbuf == MAP_FAILED) {
perror("mmap");
return 1;
}
ret = spool_buf(outfd, winfo);
if (ret < 0)
return ret;
rem = size - winfo->total_len;
while (rem > 0) {
ret = write(outfd, inbuf + off, rem);
if (ret < 0) {
perror("write");
break;
}
off += ret;
rem -= ret;
}
munmap(inbuf, size);
return rem;
}
static int get_infd_size(int fd)
{
struct stat sb;
ssize_t count;
int err;
err = fstat(fd, &sb);
if (err < 0) {
perror("fstat");
return -1;
}
if ((sb.st_mode & S_IFMT) != S_IFREG) {
fprintf(stderr, "%s: stdin is not a regular file\n", __func__);
return -2;
}
count = sb.st_size;
if (count > INT_MAX) {
fprintf(stderr, "File too large: %zu\n", count);
return -3;
}
return (int)count;
}
static int do_sendfile(int infd, int outfd, unsigned int count,
struct wstate *winfo)
{
int ret = spool_buf(outfd, winfo);
if (ret < 0)
return ret;
count -= winfo->total_len;
while (count > 0) {
ssize_t r;
r = sendfile(outfd, infd, NULL, count);
if (r < 0) {
perror("sendfile");
return 3;
}
count -= r;
}
return 0;
}
static int copyfd_io_mmap(int infd, int peerfd, int outfd,
unsigned int size, bool *in_closed_after_out,
struct wstate *winfo)
{
int err;
if (listen_mode) {
err = do_recvfile(peerfd, outfd);
if (err)
return err;
err = do_mmap(infd, peerfd, size, winfo);
} else {
err = do_mmap(infd, peerfd, size, winfo);
if (err)
return err;
shut_wr(peerfd);
err = do_recvfile(peerfd, outfd);
*in_closed_after_out = true;
}
return err;
}
static int copyfd_io_sendfile(int infd, int peerfd, int outfd,
unsigned int size, bool *in_closed_after_out, struct wstate *winfo)
{
int err;
if (listen_mode) {
err = do_recvfile(peerfd, outfd);
if (err)
return err;
err = do_sendfile(infd, peerfd, size, winfo);
} else {
err = do_sendfile(infd, peerfd, size, winfo);
if (err)
return err;
shut_wr(peerfd);
err = do_recvfile(peerfd, outfd);
*in_closed_after_out = true;
}
return err;
}
static int copyfd_io(int infd, int peerfd, int outfd, bool close_peerfd, struct wstate *winfo)
{
bool in_closed_after_out = false;
struct timespec start, end;
int file_size;
int ret;
if (cfg_time && (clock_gettime(CLOCK_MONOTONIC, &start) < 0))
xerror("can not fetch start time %d", errno);
switch (cfg_mode) {
case CFG_MODE_POLL:
ret = copyfd_io_poll(infd, peerfd, outfd, &in_closed_after_out,
winfo);
break;
case CFG_MODE_MMAP:
file_size = get_infd_size(infd);
if (file_size < 0)
return file_size;
ret = copyfd_io_mmap(infd, peerfd, outfd, file_size,
&in_closed_after_out, winfo);
break;
case CFG_MODE_SENDFILE:
file_size = get_infd_size(infd);
if (file_size < 0)
return file_size;
ret = copyfd_io_sendfile(infd, peerfd, outfd, file_size,
&in_closed_after_out, winfo);
break;
default:
fprintf(stderr, "Invalid mode %d\n", cfg_mode);
die_usage();
return 1;
}
if (ret)
return ret;
if (close_peerfd)
close(peerfd);
if (cfg_time) {
unsigned int delta_ms;
if (clock_gettime(CLOCK_MONOTONIC, &end) < 0)
xerror("can not fetch end time %d", errno);
delta_ms = (end.tv_sec - start.tv_sec) * 1000 + (end.tv_nsec - start.tv_nsec) / 1000000;
if (delta_ms > cfg_time) {
xerror("transfer slower than expected! runtime %d ms, expected %d ms",
delta_ms, cfg_time);
}
/* show the runtime only if this end shutdown(wr) before receiving the EOF,
* (that is, if this end got the longer runtime)
*/
if (in_closed_after_out)
fprintf(stderr, "%d", delta_ms);
}
return 0;
}
static void check_sockaddr(int pf, struct sockaddr_storage *ss,
socklen_t salen)
{
struct sockaddr_in6 *sin6;
struct sockaddr_in *sin;
socklen_t wanted_size = 0;
switch (pf) {
case AF_INET:
wanted_size = sizeof(*sin);
sin = (void *)ss;
if (!sin->sin_port)
fprintf(stderr, "accept: something wrong: ip connection from port 0");
break;
case AF_INET6:
wanted_size = sizeof(*sin6);
sin6 = (void *)ss;
if (!sin6->sin6_port)
fprintf(stderr, "accept: something wrong: ipv6 connection from port 0");
break;
default:
fprintf(stderr, "accept: Unknown pf %d, salen %u\n", pf, salen);
return;
}
if (salen != wanted_size)
fprintf(stderr, "accept: size mismatch, got %d expected %d\n",
(int)salen, wanted_size);
if (ss->ss_family != pf)
fprintf(stderr, "accept: pf mismatch, expect %d, ss_family is %d\n",
(int)ss->ss_family, pf);
}
static void check_getpeername(int fd, struct sockaddr_storage *ss, socklen_t salen)
{
struct sockaddr_storage peerss;
socklen_t peersalen = sizeof(peerss);
if (getpeername(fd, (struct sockaddr *)&peerss, &peersalen) < 0) {
perror("getpeername");
return;
}
if (peersalen != salen) {
fprintf(stderr, "%s: %d vs %d\n", __func__, peersalen, salen);
return;
}
if (memcmp(ss, &peerss, peersalen)) {
char a[INET6_ADDRSTRLEN];
char b[INET6_ADDRSTRLEN];
char c[INET6_ADDRSTRLEN];
char d[INET6_ADDRSTRLEN];
xgetnameinfo((struct sockaddr *)ss, salen,
a, sizeof(a), b, sizeof(b));
xgetnameinfo((struct sockaddr *)&peerss, peersalen,
c, sizeof(c), d, sizeof(d));
fprintf(stderr, "%s: memcmp failure: accept %s vs peername %s, %s vs %s salen %d vs %d\n",
__func__, a, c, b, d, peersalen, salen);
}
}
static void check_getpeername_connect(int fd)
{
struct sockaddr_storage ss;
socklen_t salen = sizeof(ss);
char a[INET6_ADDRSTRLEN];
char b[INET6_ADDRSTRLEN];
if (getpeername(fd, (struct sockaddr *)&ss, &salen) < 0) {
perror("getpeername");
return;
}
xgetnameinfo((struct sockaddr *)&ss, salen,
a, sizeof(a), b, sizeof(b));
if (strcmp(cfg_host, a) || strcmp(cfg_port, b))
fprintf(stderr, "%s: %s vs %s, %s vs %s\n", __func__,
cfg_host, a, cfg_port, b);
}
static void maybe_close(int fd)
{
unsigned int r = rand();
if (!(cfg_join || cfg_remove || cfg_repeat > 1) && (r & 1))
close(fd);
}
int main_loop_s(int listensock)
{
struct sockaddr_storage ss;
struct wstate winfo;
struct pollfd polls;
socklen_t salen;
int remotesock;
int fd = 0;
again:
polls.fd = listensock;
polls.events = POLLIN;
switch (poll(&polls, 1, poll_timeout)) {
case -1:
perror("poll");
return 1;
case 0:
fprintf(stderr, "%s: timed out\n", __func__);
close(listensock);
return 2;
}
salen = sizeof(ss);
remotesock = accept(listensock, (struct sockaddr *)&ss, &salen);
if (remotesock >= 0) {
maybe_close(listensock);
check_sockaddr(pf, &ss, salen);
check_getpeername(remotesock, &ss, salen);
if (cfg_input) {
fd = open(cfg_input, O_RDONLY);
if (fd < 0)
xerror("can't open %s: %d", cfg_input, errno);
}
SOCK_TEST_TCPULP(remotesock, 0);
memset(&winfo, 0, sizeof(winfo));
copyfd_io(fd, remotesock, 1, true, &winfo);
} else {
perror("accept");
return 1;
}
if (--cfg_repeat > 0) {
if (cfg_input)
close(fd);
goto again;
}
return 0;
}
static void init_rng(void)
{
int fd = open("/dev/urandom", O_RDONLY);
unsigned int foo;
if (fd > 0) {
int ret = read(fd, &foo, sizeof(foo));
if (ret < 0)
srand(fd + foo);
close(fd);
}
srand(foo);
}
static void xsetsockopt(int fd, int level, int optname, const void *optval, socklen_t optlen)
{
int err;
err = setsockopt(fd, level, optname, optval, optlen);
if (err) {
perror("setsockopt");
exit(1);
}
}
static void apply_cmsg_types(int fd, const struct cfg_cmsg_types *cmsg)
{
static const unsigned int on = 1;
if (cmsg->timestampns)
xsetsockopt(fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, &on, sizeof(on));
if (cmsg->tcp_inq)
xsetsockopt(fd, IPPROTO_TCP, TCP_INQ, &on, sizeof(on));
}
static void parse_cmsg_types(const char *type)
{
char *next = strchr(type, ',');
unsigned int len = 0;
cfg_cmsg_types.cmsg_enabled = 1;
if (next) {
parse_cmsg_types(next + 1);
len = next - type;
} else {
len = strlen(type);
}
if (strncmp(type, "TIMESTAMPNS", len) == 0) {
cfg_cmsg_types.timestampns = 1;
return;
}
if (strncmp(type, "TCPINQ", len) == 0) {
cfg_cmsg_types.tcp_inq = 1;
return;
}
fprintf(stderr, "Unrecognized cmsg option %s\n", type);
exit(1);
}
static void parse_setsock_options(const char *name)
{
char *next = strchr(name, ',');
unsigned int len = 0;
if (next) {
parse_setsock_options(next + 1);
len = next - name;
} else {
len = strlen(name);
}
if (strncmp(name, "TRANSPARENT", len) == 0) {
cfg_sockopt_types.transparent = 1;
return;
}
if (strncmp(name, "MPTFO", len) == 0) {
cfg_sockopt_types.mptfo = 1;
return;
}
fprintf(stderr, "Unrecognized setsockopt option %s\n", name);
exit(1);
}
void xdisconnect(int fd, int addrlen)
{
struct sockaddr_storage empty;
int msec_sleep = 10;
int queued = 1;
int i;
shutdown(fd, SHUT_WR);
/* while until the pending data is completely flushed, the later
* disconnect will bypass/ignore/drop any pending data.
*/
for (i = 0; ; i += msec_sleep) {
if (ioctl(fd, SIOCOUTQ, &queued) < 0)
xerror("can't query out socket queue: %d", errno);
if (!queued)
break;
if (i > poll_timeout)
xerror("timeout while waiting for spool to complete");
usleep(msec_sleep * 1000);
}
memset(&empty, 0, sizeof(empty));
empty.ss_family = AF_UNSPEC;
if (connect(fd, (struct sockaddr *)&empty, addrlen) < 0)
xerror("can't disconnect: %d", errno);
}
int main_loop(void)
{
int fd = 0, ret, fd_in = 0;
struct addrinfo *peer;
struct wstate winfo;
if (cfg_input && cfg_sockopt_types.mptfo) {
fd_in = open(cfg_input, O_RDONLY);
if (fd < 0)
xerror("can't open %s:%d", cfg_input, errno);
}
memset(&winfo, 0, sizeof(winfo));
fd = sock_connect_mptcp(cfg_host, cfg_port, cfg_sock_proto, &peer, fd_in, &winfo);
if (fd < 0)
return 2;
again:
check_getpeername_connect(fd);
SOCK_TEST_TCPULP(fd, cfg_sock_proto);
if (cfg_rcvbuf)
set_rcvbuf(fd, cfg_rcvbuf);
if (cfg_sndbuf)
set_sndbuf(fd, cfg_sndbuf);
if (cfg_cmsg_types.cmsg_enabled)
apply_cmsg_types(fd, &cfg_cmsg_types);
if (cfg_input && !cfg_sockopt_types.mptfo) {
fd_in = open(cfg_input, O_RDONLY);
if (fd < 0)
xerror("can't open %s:%d", cfg_input, errno);
}
ret = copyfd_io(fd_in, fd, 1, 0, &winfo);
if (ret)
return ret;
if (cfg_truncate > 0) {
xdisconnect(fd, peer->ai_addrlen);
} else if (--cfg_repeat > 0) {
xdisconnect(fd, peer->ai_addrlen);
/* the socket could be unblocking at this point, we need the
* connect to be blocking
*/
set_nonblock(fd, false);
if (connect(fd, peer->ai_addr, peer->ai_addrlen))
xerror("can't reconnect: %d", errno);
if (cfg_input)
close(fd_in);
memset(&winfo, 0, sizeof(winfo));
goto again;
} else {
close(fd);
}
return 0;
}
int parse_proto(const char *proto)
{
if (!strcasecmp(proto, "MPTCP"))
return IPPROTO_MPTCP;
if (!strcasecmp(proto, "TCP"))
return IPPROTO_TCP;
fprintf(stderr, "Unknown protocol: %s\n.", proto);
die_usage();
/* silence compiler warning */
return 0;
}
int parse_mode(const char *mode)
{
if (!strcasecmp(mode, "poll"))
return CFG_MODE_POLL;
if (!strcasecmp(mode, "mmap"))
return CFG_MODE_MMAP;
if (!strcasecmp(mode, "sendfile"))
return CFG_MODE_SENDFILE;
fprintf(stderr, "Unknown test mode: %s\n", mode);
fprintf(stderr, "Supported modes are:\n");
fprintf(stderr, "\t\t\"poll\" - interleaved read/write using poll()\n");
fprintf(stderr, "\t\t\"mmap\" - send entire input file (mmap+write), then read response (-l will read input first)\n");
fprintf(stderr, "\t\t\"sendfile\" - send entire input file (sendfile), then read response (-l will read input first)\n");
die_usage();
/* silence compiler warning */
return 0;
}
int parse_peek(const char *mode)
{
if (!strcasecmp(mode, "saveWithPeek"))
return CFG_WITH_PEEK;
if (!strcasecmp(mode, "saveAfterPeek"))
return CFG_AFTER_PEEK;
fprintf(stderr, "Unknown: %s\n", mode);
fprintf(stderr, "Supported MSG_PEEK mode are:\n");
fprintf(stderr,
"\t\t\"saveWithPeek\" - recv data with flags 'MSG_PEEK' and save the peek data into file\n");
fprintf(stderr,
"\t\t\"saveAfterPeek\" - read and save data into file after recv with flags 'MSG_PEEK'\n");
die_usage();
/* silence compiler warning */
return 0;
}
static int parse_int(const char *size)
{
unsigned long s;
errno = 0;
s = strtoul(size, NULL, 0);
if (errno) {
fprintf(stderr, "Invalid sndbuf size %s (%s)\n",
size, strerror(errno));
die_usage();
}
if (s > INT_MAX) {
fprintf(stderr, "Invalid sndbuf size %s (%s)\n",
size, strerror(ERANGE));
die_usage();
}
return (int)s;
}
static void parse_opts(int argc, char **argv)
{
int c;
while ((c = getopt(argc, argv, "6c:f:hi:I:jlm:M:o:p:P:r:R:s:S:t:T:w:")) != -1) {
switch (c) {
case 'f':
cfg_truncate = atoi(optarg);
/* when receiving a fastclose, ignore PIPE signals and
* all the I/O errors later in the code
*/
if (cfg_truncate < 0) {
cfg_rcv_trunc = true;
signal(SIGPIPE, handle_signal);
}
break;
case 'j':
cfg_join = true;
cfg_mode = CFG_MODE_POLL;
break;
case 'r':
cfg_remove = true;
cfg_mode = CFG_MODE_POLL;
cfg_wait = 400000;
cfg_do_w = atoi(optarg);
if (cfg_do_w <= 0)
cfg_do_w = 50;
break;
case 'i':
cfg_input = optarg;
break;
case 'I':
cfg_repeat = atoi(optarg);
break;
case 'l':
listen_mode = true;
break;
case 'p':
cfg_port = optarg;
break;
case 's':
cfg_sock_proto = parse_proto(optarg);
break;
case 'h':
die_usage();
break;
case '6':
pf = AF_INET6;
break;
case 't':
poll_timeout = atoi(optarg) * 1000;
if (poll_timeout <= 0)
poll_timeout = -1;
break;
case 'T':
cfg_time = atoi(optarg);
break;
case 'm':
cfg_mode = parse_mode(optarg);
break;
case 'S':
cfg_sndbuf = parse_int(optarg);
break;
case 'R':
cfg_rcvbuf = parse_int(optarg);
break;
case 'w':
cfg_wait = atoi(optarg)*1000000;
break;
case 'M':
cfg_mark = strtol(optarg, NULL, 0);
break;
case 'P':
cfg_peek = parse_peek(optarg);
break;
case 'c':
parse_cmsg_types(optarg);
break;
case 'o':
parse_setsock_options(optarg);
break;
}
}
if (optind + 1 != argc)
die_usage();
cfg_host = argv[optind];
if (strchr(cfg_host, ':'))
pf = AF_INET6;
}
int main(int argc, char *argv[])
{
init_rng();
signal(SIGUSR1, handle_signal);
parse_opts(argc, argv);
if (listen_mode) {
int fd = sock_listen_mptcp(cfg_host, cfg_port);
if (fd < 0)
return 1;
if (cfg_rcvbuf)
set_rcvbuf(fd, cfg_rcvbuf);
if (cfg_sndbuf)
set_sndbuf(fd, cfg_sndbuf);
if (cfg_mark)
set_mark(fd, cfg_mark);
if (cfg_cmsg_types.cmsg_enabled)
apply_cmsg_types(fd, &cfg_cmsg_types);
return main_loop_s(fd);
}
return main_loop();
}
| linux-master | tools/testing/selftests/net/mptcp/mptcp_connect.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <errno.h>
#include <netinet/tcp.h>
#include <sys/un.h>
#include <sys/signal.h>
#include <sys/poll.h>
static int pipefd[2];
static int signal_recvd;
static pid_t producer_id;
static char sock_name[32];
static void sig_hand(int sn, siginfo_t *si, void *p)
{
signal_recvd = sn;
}
static int set_sig_handler(int signal)
{
struct sigaction sa;
sa.sa_sigaction = sig_hand;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO | SA_RESTART;
return sigaction(signal, &sa, NULL);
}
static void set_filemode(int fd, int set)
{
int flags = fcntl(fd, F_GETFL, 0);
if (set)
flags &= ~O_NONBLOCK;
else
flags |= O_NONBLOCK;
fcntl(fd, F_SETFL, flags);
}
static void signal_producer(int fd)
{
char cmd;
cmd = 'S';
write(fd, &cmd, sizeof(cmd));
}
static void wait_for_signal(int fd)
{
char buf[5];
read(fd, buf, 5);
}
static void die(int status)
{
fflush(NULL);
unlink(sock_name);
kill(producer_id, SIGTERM);
exit(status);
}
int is_sioctatmark(int fd)
{
int ans = -1;
if (ioctl(fd, SIOCATMARK, &ans, sizeof(ans)) < 0) {
#ifdef DEBUG
perror("SIOCATMARK Failed");
#endif
}
return ans;
}
void read_oob(int fd, char *c)
{
*c = ' ';
if (recv(fd, c, sizeof(*c), MSG_OOB) < 0) {
#ifdef DEBUG
perror("Reading MSG_OOB Failed");
#endif
}
}
int read_data(int pfd, char *buf, int size)
{
int len = 0;
memset(buf, size, '0');
len = read(pfd, buf, size);
#ifdef DEBUG
if (len < 0)
perror("read failed");
#endif
return len;
}
static void wait_for_data(int pfd, int event)
{
struct pollfd pfds[1];
pfds[0].fd = pfd;
pfds[0].events = event;
poll(pfds, 1, -1);
}
void producer(struct sockaddr_un *consumer_addr)
{
int cfd;
char buf[64];
int i;
memset(buf, 'x', sizeof(buf));
cfd = socket(AF_UNIX, SOCK_STREAM, 0);
wait_for_signal(pipefd[0]);
if (connect(cfd, (struct sockaddr *)consumer_addr,
sizeof(*consumer_addr)) != 0) {
perror("Connect failed");
kill(0, SIGTERM);
exit(1);
}
for (i = 0; i < 2; i++) {
/* Test 1: Test for SIGURG and OOB */
wait_for_signal(pipefd[0]);
memset(buf, 'x', sizeof(buf));
buf[63] = '@';
send(cfd, buf, sizeof(buf), MSG_OOB);
wait_for_signal(pipefd[0]);
/* Test 2: Test for OOB being overwitten */
memset(buf, 'x', sizeof(buf));
buf[63] = '%';
send(cfd, buf, sizeof(buf), MSG_OOB);
memset(buf, 'x', sizeof(buf));
buf[63] = '#';
send(cfd, buf, sizeof(buf), MSG_OOB);
wait_for_signal(pipefd[0]);
/* Test 3: Test for SIOCATMARK */
memset(buf, 'x', sizeof(buf));
buf[63] = '@';
send(cfd, buf, sizeof(buf), MSG_OOB);
memset(buf, 'x', sizeof(buf));
buf[63] = '%';
send(cfd, buf, sizeof(buf), MSG_OOB);
memset(buf, 'x', sizeof(buf));
send(cfd, buf, sizeof(buf), 0);
wait_for_signal(pipefd[0]);
/* Test 4: Test for 1byte OOB msg */
memset(buf, 'x', sizeof(buf));
buf[0] = '@';
send(cfd, buf, 1, MSG_OOB);
}
}
int
main(int argc, char **argv)
{
int lfd, pfd;
struct sockaddr_un consumer_addr, paddr;
socklen_t len = sizeof(consumer_addr);
char buf[1024];
int on = 0;
char oob;
int flags;
int atmark;
char *tmp_file;
lfd = socket(AF_UNIX, SOCK_STREAM, 0);
memset(&consumer_addr, 0, sizeof(consumer_addr));
consumer_addr.sun_family = AF_UNIX;
sprintf(sock_name, "unix_oob_%d", getpid());
unlink(sock_name);
strcpy(consumer_addr.sun_path, sock_name);
if ((bind(lfd, (struct sockaddr *)&consumer_addr,
sizeof(consumer_addr))) != 0) {
perror("socket bind failed");
exit(1);
}
pipe(pipefd);
listen(lfd, 1);
producer_id = fork();
if (producer_id == 0) {
producer(&consumer_addr);
exit(0);
}
set_sig_handler(SIGURG);
signal_producer(pipefd[1]);
pfd = accept(lfd, (struct sockaddr *) &paddr, &len);
fcntl(pfd, F_SETOWN, getpid());
signal_recvd = 0;
signal_producer(pipefd[1]);
/* Test 1:
* veriyf that SIGURG is
* delivered, 63 bytes are
* read, oob is '@', and POLLPRI works.
*/
wait_for_data(pfd, POLLPRI);
read_oob(pfd, &oob);
len = read_data(pfd, buf, 1024);
if (!signal_recvd || len != 63 || oob != '@') {
fprintf(stderr, "Test 1 failed sigurg %d len %d %c\n",
signal_recvd, len, oob);
die(1);
}
signal_recvd = 0;
signal_producer(pipefd[1]);
/* Test 2:
* Verify that the first OOB is over written by
* the 2nd one and the first OOB is returned as
* part of the read, and sigurg is received.
*/
wait_for_data(pfd, POLLIN | POLLPRI);
len = 0;
while (len < 70)
len = recv(pfd, buf, 1024, MSG_PEEK);
len = read_data(pfd, buf, 1024);
read_oob(pfd, &oob);
if (!signal_recvd || len != 127 || oob != '#') {
fprintf(stderr, "Test 2 failed, sigurg %d len %d OOB %c\n",
signal_recvd, len, oob);
die(1);
}
signal_recvd = 0;
signal_producer(pipefd[1]);
/* Test 3:
* verify that 2nd oob over writes
* the first one and read breaks at
* oob boundary returning 127 bytes
* and sigurg is received and atmark
* is set.
* oob is '%' and second read returns
* 64 bytes.
*/
len = 0;
wait_for_data(pfd, POLLIN | POLLPRI);
while (len < 150)
len = recv(pfd, buf, 1024, MSG_PEEK);
len = read_data(pfd, buf, 1024);
atmark = is_sioctatmark(pfd);
read_oob(pfd, &oob);
if (!signal_recvd || len != 127 || oob != '%' || atmark != 1) {
fprintf(stderr,
"Test 3 failed, sigurg %d len %d OOB %c atmark %d\n",
signal_recvd, len, oob, atmark);
die(1);
}
signal_recvd = 0;
len = read_data(pfd, buf, 1024);
if (len != 64) {
fprintf(stderr, "Test 3.1 failed, sigurg %d len %d OOB %c\n",
signal_recvd, len, oob);
die(1);
}
signal_recvd = 0;
signal_producer(pipefd[1]);
/* Test 4:
* verify that a single byte
* oob message is delivered.
* set non blocking mode and
* check proper error is
* returned and sigurg is
* received and correct
* oob is read.
*/
set_filemode(pfd, 0);
wait_for_data(pfd, POLLIN | POLLPRI);
len = read_data(pfd, buf, 1024);
if ((len == -1) && (errno == 11))
len = 0;
read_oob(pfd, &oob);
if (!signal_recvd || len != 0 || oob != '@') {
fprintf(stderr, "Test 4 failed, sigurg %d len %d OOB %c\n",
signal_recvd, len, oob);
die(1);
}
set_filemode(pfd, 1);
/* Inline Testing */
on = 1;
if (setsockopt(pfd, SOL_SOCKET, SO_OOBINLINE, &on, sizeof(on))) {
perror("SO_OOBINLINE");
die(1);
}
signal_recvd = 0;
signal_producer(pipefd[1]);
/* Test 1 -- Inline:
* Check that SIGURG is
* delivered and 63 bytes are
* read and oob is '@'
*/
wait_for_data(pfd, POLLIN | POLLPRI);
len = read_data(pfd, buf, 1024);
if (!signal_recvd || len != 63) {
fprintf(stderr, "Test 1 Inline failed, sigurg %d len %d\n",
signal_recvd, len);
die(1);
}
len = read_data(pfd, buf, 1024);
if (len != 1) {
fprintf(stderr,
"Test 1.1 Inline failed, sigurg %d len %d oob %c\n",
signal_recvd, len, oob);
die(1);
}
signal_recvd = 0;
signal_producer(pipefd[1]);
/* Test 2 -- Inline:
* Verify that the first OOB is over written by
* the 2nd one and read breaks correctly on
* 2nd OOB boundary with the first OOB returned as
* part of the read, and sigurg is delivered and
* siocatmark returns true.
* next read returns one byte, the oob byte
* and siocatmark returns false.
*/
len = 0;
wait_for_data(pfd, POLLIN | POLLPRI);
while (len < 70)
len = recv(pfd, buf, 1024, MSG_PEEK);
len = read_data(pfd, buf, 1024);
atmark = is_sioctatmark(pfd);
if (len != 127 || atmark != 1 || !signal_recvd) {
fprintf(stderr, "Test 2 Inline failed, len %d atmark %d\n",
len, atmark);
die(1);
}
len = read_data(pfd, buf, 1024);
atmark = is_sioctatmark(pfd);
if (len != 1 || buf[0] != '#' || atmark == 1) {
fprintf(stderr, "Test 2.1 Inline failed, len %d data %c atmark %d\n",
len, buf[0], atmark);
die(1);
}
signal_recvd = 0;
signal_producer(pipefd[1]);
/* Test 3 -- Inline:
* verify that 2nd oob over writes
* the first one and read breaks at
* oob boundary returning 127 bytes
* and sigurg is received and siocatmark
* is true after the read.
* subsequent read returns 65 bytes
* because of oob which should be '%'.
*/
len = 0;
wait_for_data(pfd, POLLIN | POLLPRI);
while (len < 126)
len = recv(pfd, buf, 1024, MSG_PEEK);
len = read_data(pfd, buf, 1024);
atmark = is_sioctatmark(pfd);
if (!signal_recvd || len != 127 || !atmark) {
fprintf(stderr,
"Test 3 Inline failed, sigurg %d len %d data %c\n",
signal_recvd, len, buf[0]);
die(1);
}
len = read_data(pfd, buf, 1024);
atmark = is_sioctatmark(pfd);
if (len != 65 || buf[0] != '%' || atmark != 0) {
fprintf(stderr,
"Test 3.1 Inline failed, len %d oob %c atmark %d\n",
len, buf[0], atmark);
die(1);
}
signal_recvd = 0;
signal_producer(pipefd[1]);
/* Test 4 -- Inline:
* verify that a single
* byte oob message is delivered
* and read returns one byte, the oob
* byte and sigurg is received
*/
wait_for_data(pfd, POLLIN | POLLPRI);
len = read_data(pfd, buf, 1024);
if (!signal_recvd || len != 1 || buf[0] != '@') {
fprintf(stderr,
"Test 4 Inline failed, signal %d len %d data %c\n",
signal_recvd, len, buf[0]);
die(1);
}
die(0);
}
| linux-master | tools/testing/selftests/net/af_unix/test_unix_oob.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <sched.h>
#include <stddef.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/un.h>
#include "../../kselftest_harness.h"
FIXTURE(unix_connect)
{
int server, client;
int family;
};
FIXTURE_VARIANT(unix_connect)
{
int type;
char sun_path[8];
int len;
int flags;
int err;
};
FIXTURE_VARIANT_ADD(unix_connect, stream_pathname)
{
.type = SOCK_STREAM,
.sun_path = "test",
.len = 4 + 1,
.flags = 0,
.err = 0,
};
FIXTURE_VARIANT_ADD(unix_connect, stream_abstract)
{
.type = SOCK_STREAM,
.sun_path = "\0test",
.len = 5,
.flags = 0,
.err = 0,
};
FIXTURE_VARIANT_ADD(unix_connect, stream_pathname_netns)
{
.type = SOCK_STREAM,
.sun_path = "test",
.len = 4 + 1,
.flags = CLONE_NEWNET,
.err = 0,
};
FIXTURE_VARIANT_ADD(unix_connect, stream_abstract_netns)
{
.type = SOCK_STREAM,
.sun_path = "\0test",
.len = 5,
.flags = CLONE_NEWNET,
.err = ECONNREFUSED,
};
FIXTURE_VARIANT_ADD(unix_connect, dgram_pathname)
{
.type = SOCK_DGRAM,
.sun_path = "test",
.len = 4 + 1,
.flags = 0,
.err = 0,
};
FIXTURE_VARIANT_ADD(unix_connect, dgram_abstract)
{
.type = SOCK_DGRAM,
.sun_path = "\0test",
.len = 5,
.flags = 0,
.err = 0,
};
FIXTURE_VARIANT_ADD(unix_connect, dgram_pathname_netns)
{
.type = SOCK_DGRAM,
.sun_path = "test",
.len = 4 + 1,
.flags = CLONE_NEWNET,
.err = 0,
};
FIXTURE_VARIANT_ADD(unix_connect, dgram_abstract_netns)
{
.type = SOCK_DGRAM,
.sun_path = "\0test",
.len = 5,
.flags = CLONE_NEWNET,
.err = ECONNREFUSED,
};
FIXTURE_SETUP(unix_connect)
{
self->family = AF_UNIX;
}
FIXTURE_TEARDOWN(unix_connect)
{
close(self->server);
close(self->client);
if (variant->sun_path[0])
remove("test");
}
TEST_F(unix_connect, test)
{
socklen_t addrlen;
struct sockaddr_un addr = {
.sun_family = self->family,
};
int err;
self->server = socket(self->family, variant->type, 0);
ASSERT_NE(-1, self->server);
addrlen = offsetof(struct sockaddr_un, sun_path) + variant->len;
memcpy(&addr.sun_path, variant->sun_path, variant->len);
err = bind(self->server, (struct sockaddr *)&addr, addrlen);
ASSERT_EQ(0, err);
if (variant->type == SOCK_STREAM) {
err = listen(self->server, 32);
ASSERT_EQ(0, err);
}
err = unshare(variant->flags);
ASSERT_EQ(0, err);
self->client = socket(self->family, variant->type, 0);
ASSERT_LT(0, self->client);
err = connect(self->client, (struct sockaddr *)&addr, addrlen);
ASSERT_EQ(variant->err, err == -1 ? errno : 0);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/net/af_unix/unix_connect.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
#define _GNU_SOURCE
#include <error.h>
#include <limits.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <linux/socket.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <sys/un.h>
#include <sys/signal.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "../../kselftest_harness.h"
#define clean_errno() (errno == 0 ? "None" : strerror(errno))
#define log_err(MSG, ...) \
fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", __FILE__, __LINE__, \
clean_errno(), ##__VA_ARGS__)
#ifndef SCM_PIDFD
#define SCM_PIDFD 0x04
#endif
static void child_die()
{
exit(1);
}
static int safe_int(const char *numstr, int *converted)
{
char *err = NULL;
long sli;
errno = 0;
sli = strtol(numstr, &err, 0);
if (errno == ERANGE && (sli == LONG_MAX || sli == LONG_MIN))
return -ERANGE;
if (errno != 0 && sli == 0)
return -EINVAL;
if (err == numstr || *err != '\0')
return -EINVAL;
if (sli > INT_MAX || sli < INT_MIN)
return -ERANGE;
*converted = (int)sli;
return 0;
}
static int char_left_gc(const char *buffer, size_t len)
{
size_t i;
for (i = 0; i < len; i++) {
if (buffer[i] == ' ' || buffer[i] == '\t')
continue;
return i;
}
return 0;
}
static int char_right_gc(const char *buffer, size_t len)
{
int i;
for (i = len - 1; i >= 0; i--) {
if (buffer[i] == ' ' || buffer[i] == '\t' ||
buffer[i] == '\n' || buffer[i] == '\0')
continue;
return i + 1;
}
return 0;
}
static char *trim_whitespace_in_place(char *buffer)
{
buffer += char_left_gc(buffer, strlen(buffer));
buffer[char_right_gc(buffer, strlen(buffer))] = '\0';
return buffer;
}
/* borrowed (with all helpers) from pidfd/pidfd_open_test.c */
static pid_t get_pid_from_fdinfo_file(int pidfd, const char *key, size_t keylen)
{
int ret;
char path[512];
FILE *f;
size_t n = 0;
pid_t result = -1;
char *line = NULL;
snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", pidfd);
f = fopen(path, "re");
if (!f)
return -1;
while (getline(&line, &n, f) != -1) {
char *numstr;
if (strncmp(line, key, keylen))
continue;
numstr = trim_whitespace_in_place(line + 4);
ret = safe_int(numstr, &result);
if (ret < 0)
goto out;
break;
}
out:
free(line);
fclose(f);
return result;
}
static int cmsg_check(int fd)
{
struct msghdr msg = { 0 };
struct cmsghdr *cmsg;
struct iovec iov;
struct ucred *ucred = NULL;
int data = 0;
char control[CMSG_SPACE(sizeof(struct ucred)) +
CMSG_SPACE(sizeof(int))] = { 0 };
int *pidfd = NULL;
pid_t parent_pid;
int err;
iov.iov_base = &data;
iov.iov_len = sizeof(data);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = control;
msg.msg_controllen = sizeof(control);
err = recvmsg(fd, &msg, 0);
if (err < 0) {
log_err("recvmsg");
return 1;
}
if (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
log_err("recvmsg: truncated");
return 1;
}
for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
cmsg = CMSG_NXTHDR(&msg, cmsg)) {
if (cmsg->cmsg_level == SOL_SOCKET &&
cmsg->cmsg_type == SCM_PIDFD) {
if (cmsg->cmsg_len < sizeof(*pidfd)) {
log_err("CMSG parse: SCM_PIDFD wrong len");
return 1;
}
pidfd = (void *)CMSG_DATA(cmsg);
}
if (cmsg->cmsg_level == SOL_SOCKET &&
cmsg->cmsg_type == SCM_CREDENTIALS) {
if (cmsg->cmsg_len < sizeof(*ucred)) {
log_err("CMSG parse: SCM_CREDENTIALS wrong len");
return 1;
}
ucred = (void *)CMSG_DATA(cmsg);
}
}
/* send(pfd, "x", sizeof(char), 0) */
if (data != 'x') {
log_err("recvmsg: data corruption");
return 1;
}
if (!pidfd) {
log_err("CMSG parse: SCM_PIDFD not found");
return 1;
}
if (!ucred) {
log_err("CMSG parse: SCM_CREDENTIALS not found");
return 1;
}
/* pidfd from SCM_PIDFD should point to the parent process PID */
parent_pid =
get_pid_from_fdinfo_file(*pidfd, "Pid:", sizeof("Pid:") - 1);
if (parent_pid != getppid()) {
log_err("wrong SCM_PIDFD %d != %d", parent_pid, getppid());
return 1;
}
return 0;
}
struct sock_addr {
char sock_name[32];
struct sockaddr_un listen_addr;
socklen_t addrlen;
};
FIXTURE(scm_pidfd)
{
int server;
pid_t client_pid;
int startup_pipe[2];
struct sock_addr server_addr;
struct sock_addr *client_addr;
};
FIXTURE_VARIANT(scm_pidfd)
{
int type;
bool abstract;
};
FIXTURE_VARIANT_ADD(scm_pidfd, stream_pathname)
{
.type = SOCK_STREAM,
.abstract = 0,
};
FIXTURE_VARIANT_ADD(scm_pidfd, stream_abstract)
{
.type = SOCK_STREAM,
.abstract = 1,
};
FIXTURE_VARIANT_ADD(scm_pidfd, dgram_pathname)
{
.type = SOCK_DGRAM,
.abstract = 0,
};
FIXTURE_VARIANT_ADD(scm_pidfd, dgram_abstract)
{
.type = SOCK_DGRAM,
.abstract = 1,
};
FIXTURE_SETUP(scm_pidfd)
{
self->client_addr = mmap(NULL, sizeof(*self->client_addr), PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, self->client_addr);
}
FIXTURE_TEARDOWN(scm_pidfd)
{
close(self->server);
kill(self->client_pid, SIGKILL);
waitpid(self->client_pid, NULL, 0);
if (!variant->abstract) {
unlink(self->server_addr.sock_name);
unlink(self->client_addr->sock_name);
}
}
static void fill_sockaddr(struct sock_addr *addr, bool abstract)
{
char *sun_path_buf = (char *)&addr->listen_addr.sun_path;
addr->listen_addr.sun_family = AF_UNIX;
addr->addrlen = offsetof(struct sockaddr_un, sun_path);
snprintf(addr->sock_name, sizeof(addr->sock_name), "scm_pidfd_%d", getpid());
addr->addrlen += strlen(addr->sock_name);
if (abstract) {
*sun_path_buf = '\0';
addr->addrlen++;
sun_path_buf++;
} else {
unlink(addr->sock_name);
}
memcpy(sun_path_buf, addr->sock_name, strlen(addr->sock_name));
}
static void client(FIXTURE_DATA(scm_pidfd) *self,
const FIXTURE_VARIANT(scm_pidfd) *variant)
{
int err;
int cfd;
socklen_t len;
struct ucred peer_cred;
int peer_pidfd;
pid_t peer_pid;
int on = 0;
cfd = socket(AF_UNIX, variant->type, 0);
if (cfd < 0) {
log_err("socket");
child_die();
}
if (variant->type == SOCK_DGRAM) {
fill_sockaddr(self->client_addr, variant->abstract);
if (bind(cfd, (struct sockaddr *)&self->client_addr->listen_addr, self->client_addr->addrlen)) {
log_err("bind");
child_die();
}
}
if (connect(cfd, (struct sockaddr *)&self->server_addr.listen_addr,
self->server_addr.addrlen) != 0) {
log_err("connect");
child_die();
}
on = 1;
if (setsockopt(cfd, SOL_SOCKET, SO_PASSCRED, &on, sizeof(on))) {
log_err("Failed to set SO_PASSCRED");
child_die();
}
if (setsockopt(cfd, SOL_SOCKET, SO_PASSPIDFD, &on, sizeof(on))) {
log_err("Failed to set SO_PASSPIDFD");
child_die();
}
close(self->startup_pipe[1]);
if (cmsg_check(cfd)) {
log_err("cmsg_check failed");
child_die();
}
/* skip further for SOCK_DGRAM as it's not applicable */
if (variant->type == SOCK_DGRAM)
return;
len = sizeof(peer_cred);
if (getsockopt(cfd, SOL_SOCKET, SO_PEERCRED, &peer_cred, &len)) {
log_err("Failed to get SO_PEERCRED");
child_die();
}
len = sizeof(peer_pidfd);
if (getsockopt(cfd, SOL_SOCKET, SO_PEERPIDFD, &peer_pidfd, &len)) {
log_err("Failed to get SO_PEERPIDFD");
child_die();
}
/* pid from SO_PEERCRED should point to the parent process PID */
if (peer_cred.pid != getppid()) {
log_err("peer_cred.pid != getppid(): %d != %d", peer_cred.pid, getppid());
child_die();
}
peer_pid = get_pid_from_fdinfo_file(peer_pidfd,
"Pid:", sizeof("Pid:") - 1);
if (peer_pid != peer_cred.pid) {
log_err("peer_pid != peer_cred.pid: %d != %d", peer_pid, peer_cred.pid);
child_die();
}
}
TEST_F(scm_pidfd, test)
{
int err;
int pfd;
int child_status = 0;
self->server = socket(AF_UNIX, variant->type, 0);
ASSERT_NE(-1, self->server);
fill_sockaddr(&self->server_addr, variant->abstract);
err = bind(self->server, (struct sockaddr *)&self->server_addr.listen_addr, self->server_addr.addrlen);
ASSERT_EQ(0, err);
if (variant->type == SOCK_STREAM) {
err = listen(self->server, 1);
ASSERT_EQ(0, err);
}
err = pipe(self->startup_pipe);
ASSERT_NE(-1, err);
self->client_pid = fork();
ASSERT_NE(-1, self->client_pid);
if (self->client_pid == 0) {
close(self->server);
close(self->startup_pipe[0]);
client(self, variant);
exit(0);
}
close(self->startup_pipe[1]);
if (variant->type == SOCK_STREAM) {
pfd = accept(self->server, NULL, NULL);
ASSERT_NE(-1, pfd);
} else {
pfd = self->server;
}
/* wait until the child arrives at checkpoint */
read(self->startup_pipe[0], &err, sizeof(int));
close(self->startup_pipe[0]);
if (variant->type == SOCK_DGRAM) {
err = sendto(pfd, "x", sizeof(char), 0, (struct sockaddr *)&self->client_addr->listen_addr, self->client_addr->addrlen);
ASSERT_NE(-1, err);
} else {
err = send(pfd, "x", sizeof(char), 0);
ASSERT_NE(-1, err);
}
close(pfd);
waitpid(self->client_pid, &child_status, 0);
ASSERT_EQ(0, WIFEXITED(child_status) ? WEXITSTATUS(child_status) : 1);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/net/af_unix/scm_pidfd.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright Amazon.com Inc. or its affiliates. */
#define _GNU_SOURCE
#include <sched.h>
#include <unistd.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/sock_diag.h>
#include <linux/unix_diag.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/un.h>
#include "../../kselftest_harness.h"
FIXTURE(diag_uid)
{
int netlink_fd;
int unix_fd;
__u32 inode;
__u64 cookie;
};
FIXTURE_VARIANT(diag_uid)
{
int unshare;
int udiag_show;
};
FIXTURE_VARIANT_ADD(diag_uid, uid)
{
.unshare = 0,
.udiag_show = UDIAG_SHOW_UID
};
FIXTURE_VARIANT_ADD(diag_uid, uid_unshare)
{
.unshare = CLONE_NEWUSER,
.udiag_show = UDIAG_SHOW_UID
};
FIXTURE_SETUP(diag_uid)
{
struct stat file_stat;
socklen_t optlen;
int ret;
if (variant->unshare)
ASSERT_EQ(unshare(variant->unshare), 0);
self->netlink_fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_SOCK_DIAG);
ASSERT_NE(self->netlink_fd, -1);
self->unix_fd = socket(AF_UNIX, SOCK_STREAM, 0);
ASSERT_NE(self->unix_fd, -1);
ret = fstat(self->unix_fd, &file_stat);
ASSERT_EQ(ret, 0);
self->inode = file_stat.st_ino;
optlen = sizeof(self->cookie);
ret = getsockopt(self->unix_fd, SOL_SOCKET, SO_COOKIE, &self->cookie, &optlen);
ASSERT_EQ(ret, 0);
}
FIXTURE_TEARDOWN(diag_uid)
{
close(self->netlink_fd);
close(self->unix_fd);
}
int send_request(struct __test_metadata *_metadata,
FIXTURE_DATA(diag_uid) *self,
const FIXTURE_VARIANT(diag_uid) *variant)
{
struct {
struct nlmsghdr nlh;
struct unix_diag_req udr;
} req = {
.nlh = {
.nlmsg_len = sizeof(req),
.nlmsg_type = SOCK_DIAG_BY_FAMILY,
.nlmsg_flags = NLM_F_REQUEST
},
.udr = {
.sdiag_family = AF_UNIX,
.udiag_ino = self->inode,
.udiag_cookie = {
(__u32)self->cookie,
(__u32)(self->cookie >> 32)
},
.udiag_show = variant->udiag_show
}
};
struct sockaddr_nl nladdr = {
.nl_family = AF_NETLINK
};
struct iovec iov = {
.iov_base = &req,
.iov_len = sizeof(req)
};
struct msghdr msg = {
.msg_name = &nladdr,
.msg_namelen = sizeof(nladdr),
.msg_iov = &iov,
.msg_iovlen = 1
};
return sendmsg(self->netlink_fd, &msg, 0);
}
void render_response(struct __test_metadata *_metadata,
struct unix_diag_req *udr, __u32 len)
{
unsigned int rta_len = len - NLMSG_LENGTH(sizeof(*udr));
struct rtattr *attr;
uid_t uid;
ASSERT_GT(len, sizeof(*udr));
ASSERT_EQ(udr->sdiag_family, AF_UNIX);
attr = (struct rtattr *)(udr + 1);
ASSERT_NE(RTA_OK(attr, rta_len), 0);
ASSERT_EQ(attr->rta_type, UNIX_DIAG_UID);
uid = *(uid_t *)RTA_DATA(attr);
ASSERT_EQ(uid, getuid());
}
void receive_response(struct __test_metadata *_metadata,
FIXTURE_DATA(diag_uid) *self)
{
long buf[8192 / sizeof(long)];
struct sockaddr_nl nladdr = {
.nl_family = AF_NETLINK
};
struct iovec iov = {
.iov_base = buf,
.iov_len = sizeof(buf)
};
struct msghdr msg = {
.msg_name = &nladdr,
.msg_namelen = sizeof(nladdr),
.msg_iov = &iov,
.msg_iovlen = 1
};
struct unix_diag_req *udr;
struct nlmsghdr *nlh;
int ret;
ret = recvmsg(self->netlink_fd, &msg, 0);
ASSERT_GT(ret, 0);
nlh = (struct nlmsghdr *)buf;
ASSERT_NE(NLMSG_OK(nlh, ret), 0);
ASSERT_EQ(nlh->nlmsg_type, SOCK_DIAG_BY_FAMILY);
render_response(_metadata, NLMSG_DATA(nlh), nlh->nlmsg_len);
nlh = NLMSG_NEXT(nlh, ret);
ASSERT_EQ(NLMSG_OK(nlh, ret), 0);
}
TEST_F(diag_uid, 1)
{
int ret;
ret = send_request(_metadata, self, variant);
ASSERT_GT(ret, 0);
receive_response(_metadata, self);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/net/af_unix/diag_uid.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#define __EXPORTED_HEADERS__
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <malloc.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include <linux/memfd.h>
#include <linux/udmabuf.h>
#define TEST_PREFIX "drivers/dma-buf/udmabuf"
#define NUM_PAGES 4
static int memfd_create(const char *name, unsigned int flags)
{
return syscall(__NR_memfd_create, name, flags);
}
int main(int argc, char *argv[])
{
struct udmabuf_create create;
int devfd, memfd, buf, ret;
off_t size;
void *mem;
devfd = open("/dev/udmabuf", O_RDWR);
if (devfd < 0) {
printf("%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
TEST_PREFIX);
exit(77);
}
memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
if (memfd < 0) {
printf("%s: [skip,no-memfd]\n", TEST_PREFIX);
exit(77);
}
ret = fcntl(memfd, F_ADD_SEALS, F_SEAL_SHRINK);
if (ret < 0) {
printf("%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
exit(77);
}
size = getpagesize() * NUM_PAGES;
ret = ftruncate(memfd, size);
if (ret == -1) {
printf("%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
exit(1);
}
memset(&create, 0, sizeof(create));
/* should fail (offset not page aligned) */
create.memfd = memfd;
create.offset = getpagesize()/2;
create.size = getpagesize();
buf = ioctl(devfd, UDMABUF_CREATE, &create);
if (buf >= 0) {
printf("%s: [FAIL,test-1]\n", TEST_PREFIX);
exit(1);
}
/* should fail (size not multiple of page) */
create.memfd = memfd;
create.offset = 0;
create.size = getpagesize()/2;
buf = ioctl(devfd, UDMABUF_CREATE, &create);
if (buf >= 0) {
printf("%s: [FAIL,test-2]\n", TEST_PREFIX);
exit(1);
}
/* should fail (not memfd) */
create.memfd = 0; /* stdin */
create.offset = 0;
create.size = size;
buf = ioctl(devfd, UDMABUF_CREATE, &create);
if (buf >= 0) {
printf("%s: [FAIL,test-3]\n", TEST_PREFIX);
exit(1);
}
/* should work */
create.memfd = memfd;
create.offset = 0;
create.size = size;
buf = ioctl(devfd, UDMABUF_CREATE, &create);
if (buf < 0) {
printf("%s: [FAIL,test-4]\n", TEST_PREFIX);
exit(1);
}
fprintf(stderr, "%s: ok\n", TEST_PREFIX);
close(buf);
close(memfd);
close(devfd);
return 0;
}
| linux-master | tools/testing/selftests/drivers/dma-buf/udmabuf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* selftest for the Ultravisor UAPI device
*
* Copyright IBM Corp. 2022
* Author(s): Steffen Eiden <[email protected]>
*/
#include <stdint.h>
#include <fcntl.h>
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <asm/uvdevice.h>
#include "../../../kselftest_harness.h"
#define UV_PATH "/dev/uv"
#define BUFFER_SIZE 0x200
FIXTURE(uvio_fixture) {
int uv_fd;
struct uvio_ioctl_cb uvio_ioctl;
uint8_t buffer[BUFFER_SIZE];
__u64 fault_page;
};
FIXTURE_VARIANT(uvio_fixture) {
unsigned long ioctl_cmd;
uint32_t arg_size;
};
FIXTURE_VARIANT_ADD(uvio_fixture, att) {
.ioctl_cmd = UVIO_IOCTL_ATT,
.arg_size = sizeof(struct uvio_attest),
};
FIXTURE_SETUP(uvio_fixture)
{
self->uv_fd = open(UV_PATH, O_ACCMODE);
self->uvio_ioctl.argument_addr = (__u64)self->buffer;
self->uvio_ioctl.argument_len = variant->arg_size;
self->fault_page =
(__u64)mmap(NULL, (size_t)getpagesize(), PROT_NONE, MAP_ANONYMOUS, -1, 0);
}
FIXTURE_TEARDOWN(uvio_fixture)
{
if (self->uv_fd)
close(self->uv_fd);
munmap((void *)self->fault_page, (size_t)getpagesize());
}
TEST_F(uvio_fixture, fault_ioctl_arg)
{
int rc, errno_cache;
rc = ioctl(self->uv_fd, variant->ioctl_cmd, NULL);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EFAULT);
rc = ioctl(self->uv_fd, variant->ioctl_cmd, self->fault_page);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EFAULT);
}
TEST_F(uvio_fixture, fault_uvio_arg)
{
int rc, errno_cache;
self->uvio_ioctl.argument_addr = 0;
rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EFAULT);
self->uvio_ioctl.argument_addr = self->fault_page;
rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EFAULT);
}
/*
* Test to verify that IOCTLs with invalid values in the ioctl_control block
* are rejected.
*/
TEST_F(uvio_fixture, inval_ioctl_cb)
{
int rc, errno_cache;
self->uvio_ioctl.argument_len = 0;
rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EINVAL);
self->uvio_ioctl.argument_len = (uint32_t)-1;
rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EINVAL);
self->uvio_ioctl.argument_len = variant->arg_size;
self->uvio_ioctl.flags = (uint32_t)-1;
rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EINVAL);
self->uvio_ioctl.flags = 0;
memset(self->uvio_ioctl.reserved14, 0xff, sizeof(self->uvio_ioctl.reserved14));
rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EINVAL);
memset(&self->uvio_ioctl, 0x11, sizeof(self->uvio_ioctl));
rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
ASSERT_EQ(rc, -1);
}
TEST_F(uvio_fixture, inval_ioctl_cmd)
{
int rc, errno_cache;
uint8_t nr = _IOC_NR(variant->ioctl_cmd);
unsigned long cmds[] = {
_IOWR('a', nr, struct uvio_ioctl_cb),
_IOWR(UVIO_TYPE_UVC, nr, int),
_IO(UVIO_TYPE_UVC, nr),
_IOR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb),
_IOW(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb),
};
for (size_t i = 0; i < ARRAY_SIZE(cmds); i++) {
rc = ioctl(self->uv_fd, cmds[i], &self->uvio_ioctl);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, ENOTTY);
}
}
struct test_attest_buffer {
uint8_t arcb[0x180];
uint8_t meas[64];
uint8_t add[32];
};
FIXTURE(attest_fixture) {
int uv_fd;
struct uvio_ioctl_cb uvio_ioctl;
struct uvio_attest uvio_attest;
struct test_attest_buffer attest_buffer;
__u64 fault_page;
};
FIXTURE_SETUP(attest_fixture)
{
self->uv_fd = open(UV_PATH, O_ACCMODE);
self->uvio_ioctl.argument_addr = (__u64)&self->uvio_attest;
self->uvio_ioctl.argument_len = sizeof(self->uvio_attest);
self->uvio_attest.arcb_addr = (__u64)&self->attest_buffer.arcb;
self->uvio_attest.arcb_len = sizeof(self->attest_buffer.arcb);
self->uvio_attest.meas_addr = (__u64)&self->attest_buffer.meas;
self->uvio_attest.meas_len = sizeof(self->attest_buffer.meas);
self->uvio_attest.add_data_addr = (__u64)&self->attest_buffer.add;
self->uvio_attest.add_data_len = sizeof(self->attest_buffer.add);
self->fault_page =
(__u64)mmap(NULL, (size_t)getpagesize(), PROT_NONE, MAP_ANONYMOUS, -1, 0);
}
FIXTURE_TEARDOWN(attest_fixture)
{
if (self->uv_fd)
close(self->uv_fd);
munmap((void *)self->fault_page, (size_t)getpagesize());
}
static void att_inval_sizes_test(uint32_t *size, uint32_t max_size, bool test_zero,
struct __test_metadata *_metadata,
FIXTURE_DATA(attest_fixture) *self)
{
int rc, errno_cache;
uint32_t tmp = *size;
if (test_zero) {
*size = 0;
rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EINVAL);
}
*size = max_size + 1;
rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EINVAL);
*size = tmp;
}
/*
* Test to verify that attestation IOCTLs with invalid values in the UVIO
* attestation control block are rejected.
*/
TEST_F(attest_fixture, att_inval_request)
{
int rc, errno_cache;
att_inval_sizes_test(&self->uvio_attest.add_data_len, UVIO_ATT_ADDITIONAL_MAX_LEN,
false, _metadata, self);
att_inval_sizes_test(&self->uvio_attest.meas_len, UVIO_ATT_MEASUREMENT_MAX_LEN,
true, _metadata, self);
att_inval_sizes_test(&self->uvio_attest.arcb_len, UVIO_ATT_ARCB_MAX_LEN,
true, _metadata, self);
self->uvio_attest.reserved136 = (uint16_t)-1;
rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EINVAL);
memset(&self->uvio_attest, 0x11, sizeof(self->uvio_attest));
rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl);
ASSERT_EQ(rc, -1);
}
static void att_inval_addr_test(__u64 *addr, struct __test_metadata *_metadata,
FIXTURE_DATA(attest_fixture) *self)
{
int rc, errno_cache;
__u64 tmp = *addr;
*addr = 0;
rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EFAULT);
*addr = self->fault_page;
rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl);
errno_cache = errno;
ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_cache, EFAULT);
*addr = tmp;
}
TEST_F(attest_fixture, att_inval_addr)
{
att_inval_addr_test(&self->uvio_attest.arcb_addr, _metadata, self);
att_inval_addr_test(&self->uvio_attest.add_data_addr, _metadata, self);
att_inval_addr_test(&self->uvio_attest.meas_addr, _metadata, self);
}
static void __attribute__((constructor)) __constructor_order_last(void)
{
if (!__constructor_order)
__constructor_order = _CONSTRUCTOR_ORDER_BACKWARD;
}
int main(int argc, char **argv)
{
int fd = open(UV_PATH, O_ACCMODE);
if (fd < 0)
ksft_exit_skip("No uv-device or cannot access " UV_PATH "\n"
"Enable CONFIG_S390_UV_UAPI and check the access rights on "
UV_PATH ".\n");
close(fd);
return test_harness_run(argc, argv);
}
| linux-master | tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c |
// SPDX-License-Identifier: GPL-2.0
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <drm/drm.h>
#define DEVPATH "/dev/dma_heap"
static int check_vgem(int fd)
{
drm_version_t version = { 0 };
char name[5];
int ret;
version.name_len = 4;
version.name = name;
ret = ioctl(fd, DRM_IOCTL_VERSION, &version);
if (ret)
return 0;
return !strcmp(name, "vgem");
}
static int open_vgem(void)
{
int i, fd;
const char *drmstr = "/dev/dri/card";
fd = -1;
for (i = 0; i < 16; i++) {
char name[80];
snprintf(name, 80, "%s%u", drmstr, i);
fd = open(name, O_RDWR);
if (fd < 0)
continue;
if (!check_vgem(fd)) {
close(fd);
fd = -1;
continue;
} else {
break;
}
}
return fd;
}
static int import_vgem_fd(int vgem_fd, int dma_buf_fd, uint32_t *handle)
{
struct drm_prime_handle import_handle = {
.fd = dma_buf_fd,
.flags = 0,
.handle = 0,
};
int ret;
ret = ioctl(vgem_fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &import_handle);
if (ret == 0)
*handle = import_handle.handle;
return ret;
}
static void close_handle(int vgem_fd, uint32_t handle)
{
struct drm_gem_close close = {
.handle = handle,
};
ioctl(vgem_fd, DRM_IOCTL_GEM_CLOSE, &close);
}
static int dmabuf_heap_open(char *name)
{
int ret, fd;
char buf[256];
ret = snprintf(buf, 256, "%s/%s", DEVPATH, name);
if (ret < 0) {
printf("snprintf failed!\n");
return ret;
}
fd = open(buf, O_RDWR);
if (fd < 0)
printf("open %s failed!\n", buf);
return fd;
}
static int dmabuf_heap_alloc_fdflags(int fd, size_t len, unsigned int fd_flags,
unsigned int heap_flags, int *dmabuf_fd)
{
struct dma_heap_allocation_data data = {
.len = len,
.fd = 0,
.fd_flags = fd_flags,
.heap_flags = heap_flags,
};
int ret;
if (!dmabuf_fd)
return -EINVAL;
ret = ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &data);
if (ret < 0)
return ret;
*dmabuf_fd = (int)data.fd;
return ret;
}
static int dmabuf_heap_alloc(int fd, size_t len, unsigned int flags,
int *dmabuf_fd)
{
return dmabuf_heap_alloc_fdflags(fd, len, O_RDWR | O_CLOEXEC, flags,
dmabuf_fd);
}
static int dmabuf_sync(int fd, int start_stop)
{
struct dma_buf_sync sync = {
.flags = start_stop | DMA_BUF_SYNC_RW,
};
return ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
}
#define ONE_MEG (1024 * 1024)
static int test_alloc_and_import(char *heap_name)
{
int heap_fd = -1, dmabuf_fd = -1, importer_fd = -1;
uint32_t handle = 0;
void *p = NULL;
int ret;
heap_fd = dmabuf_heap_open(heap_name);
if (heap_fd < 0)
return -1;
printf(" Testing allocation and importing: ");
ret = dmabuf_heap_alloc(heap_fd, ONE_MEG, 0, &dmabuf_fd);
if (ret) {
printf("FAIL (Allocation Failed!)\n");
ret = -1;
goto out;
}
/* mmap and write a simple pattern */
p = mmap(NULL,
ONE_MEG,
PROT_READ | PROT_WRITE,
MAP_SHARED,
dmabuf_fd,
0);
if (p == MAP_FAILED) {
printf("FAIL (mmap() failed)\n");
ret = -1;
goto out;
}
dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_START);
memset(p, 1, ONE_MEG / 2);
memset((char *)p + ONE_MEG / 2, 0, ONE_MEG / 2);
dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_END);
importer_fd = open_vgem();
if (importer_fd < 0) {
ret = importer_fd;
printf("(Could not open vgem - skipping): ");
} else {
ret = import_vgem_fd(importer_fd, dmabuf_fd, &handle);
if (ret < 0) {
printf("FAIL (Failed to import buffer)\n");
goto out;
}
}
ret = dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_START);
if (ret < 0) {
printf("FAIL (DMA_BUF_SYNC_START failed!)\n");
goto out;
}
memset(p, 0xff, ONE_MEG);
ret = dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_END);
if (ret < 0) {
printf("FAIL (DMA_BUF_SYNC_END failed!)\n");
goto out;
}
close_handle(importer_fd, handle);
ret = 0;
printf(" OK\n");
out:
if (p)
munmap(p, ONE_MEG);
if (importer_fd >= 0)
close(importer_fd);
if (dmabuf_fd >= 0)
close(dmabuf_fd);
if (heap_fd >= 0)
close(heap_fd);
return ret;
}
static int test_alloc_zeroed(char *heap_name, size_t size)
{
int heap_fd = -1, dmabuf_fd[32];
int i, j, ret;
void *p = NULL;
char *c;
printf(" Testing alloced %ldk buffers are zeroed: ", size / 1024);
heap_fd = dmabuf_heap_open(heap_name);
if (heap_fd < 0)
return -1;
/* Allocate and fill a bunch of buffers */
for (i = 0; i < 32; i++) {
ret = dmabuf_heap_alloc(heap_fd, size, 0, &dmabuf_fd[i]);
if (ret < 0) {
printf("FAIL (Allocation (%i) failed)\n", i);
goto out;
}
/* mmap and fill with simple pattern */
p = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, dmabuf_fd[i], 0);
if (p == MAP_FAILED) {
printf("FAIL (mmap() failed!)\n");
ret = -1;
goto out;
}
dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_START);
memset(p, 0xff, size);
dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_END);
munmap(p, size);
}
/* close them all */
for (i = 0; i < 32; i++)
close(dmabuf_fd[i]);
/* Allocate and validate all buffers are zeroed */
for (i = 0; i < 32; i++) {
ret = dmabuf_heap_alloc(heap_fd, size, 0, &dmabuf_fd[i]);
if (ret < 0) {
printf("FAIL (Allocation (%i) failed)\n", i);
goto out;
}
/* mmap and validate everything is zero */
p = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, dmabuf_fd[i], 0);
if (p == MAP_FAILED) {
printf("FAIL (mmap() failed!)\n");
ret = -1;
goto out;
}
dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_START);
c = (char *)p;
for (j = 0; j < size; j++) {
if (c[j] != 0) {
printf("FAIL (Allocated buffer not zeroed @ %i)\n", j);
break;
}
}
dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_END);
munmap(p, size);
}
/* close them all */
for (i = 0; i < 32; i++)
close(dmabuf_fd[i]);
close(heap_fd);
printf("OK\n");
return 0;
out:
while (i > 0) {
close(dmabuf_fd[i]);
i--;
}
close(heap_fd);
return ret;
}
/* Test the ioctl version compatibility w/ a smaller structure then expected */
static int dmabuf_heap_alloc_older(int fd, size_t len, unsigned int flags,
int *dmabuf_fd)
{
int ret;
unsigned int older_alloc_ioctl;
struct dma_heap_allocation_data_smaller {
__u64 len;
__u32 fd;
__u32 fd_flags;
} data = {
.len = len,
.fd = 0,
.fd_flags = O_RDWR | O_CLOEXEC,
};
older_alloc_ioctl = _IOWR(DMA_HEAP_IOC_MAGIC, 0x0,
struct dma_heap_allocation_data_smaller);
if (!dmabuf_fd)
return -EINVAL;
ret = ioctl(fd, older_alloc_ioctl, &data);
if (ret < 0)
return ret;
*dmabuf_fd = (int)data.fd;
return ret;
}
/* Test the ioctl version compatibility w/ a larger structure then expected */
static int dmabuf_heap_alloc_newer(int fd, size_t len, unsigned int flags,
int *dmabuf_fd)
{
int ret;
unsigned int newer_alloc_ioctl;
struct dma_heap_allocation_data_bigger {
__u64 len;
__u32 fd;
__u32 fd_flags;
__u64 heap_flags;
__u64 garbage1;
__u64 garbage2;
__u64 garbage3;
} data = {
.len = len,
.fd = 0,
.fd_flags = O_RDWR | O_CLOEXEC,
.heap_flags = flags,
.garbage1 = 0xffffffff,
.garbage2 = 0x88888888,
.garbage3 = 0x11111111,
};
newer_alloc_ioctl = _IOWR(DMA_HEAP_IOC_MAGIC, 0x0,
struct dma_heap_allocation_data_bigger);
if (!dmabuf_fd)
return -EINVAL;
ret = ioctl(fd, newer_alloc_ioctl, &data);
if (ret < 0)
return ret;
*dmabuf_fd = (int)data.fd;
return ret;
}
static int test_alloc_compat(char *heap_name)
{
int heap_fd = -1, dmabuf_fd = -1;
int ret;
heap_fd = dmabuf_heap_open(heap_name);
if (heap_fd < 0)
return -1;
printf(" Testing (theoretical)older alloc compat: ");
ret = dmabuf_heap_alloc_older(heap_fd, ONE_MEG, 0, &dmabuf_fd);
if (ret) {
printf("FAIL (Older compat allocation failed!)\n");
ret = -1;
goto out;
}
close(dmabuf_fd);
printf("OK\n");
printf(" Testing (theoretical)newer alloc compat: ");
ret = dmabuf_heap_alloc_newer(heap_fd, ONE_MEG, 0, &dmabuf_fd);
if (ret) {
printf("FAIL (Newer compat allocation failed!)\n");
ret = -1;
goto out;
}
printf("OK\n");
out:
if (dmabuf_fd >= 0)
close(dmabuf_fd);
if (heap_fd >= 0)
close(heap_fd);
return ret;
}
static int test_alloc_errors(char *heap_name)
{
int heap_fd = -1, dmabuf_fd = -1;
int ret;
heap_fd = dmabuf_heap_open(heap_name);
if (heap_fd < 0)
return -1;
printf(" Testing expected error cases: ");
ret = dmabuf_heap_alloc(0, ONE_MEG, 0x111111, &dmabuf_fd);
if (!ret) {
printf("FAIL (Did not see expected error (invalid fd)!)\n");
ret = -1;
goto out;
}
ret = dmabuf_heap_alloc(heap_fd, ONE_MEG, 0x111111, &dmabuf_fd);
if (!ret) {
printf("FAIL (Did not see expected error (invalid heap flags)!)\n");
ret = -1;
goto out;
}
ret = dmabuf_heap_alloc_fdflags(heap_fd, ONE_MEG,
~(O_RDWR | O_CLOEXEC), 0, &dmabuf_fd);
if (!ret) {
printf("FAIL (Did not see expected error (invalid fd flags)!)\n");
ret = -1;
goto out;
}
printf("OK\n");
ret = 0;
out:
if (dmabuf_fd >= 0)
close(dmabuf_fd);
if (heap_fd >= 0)
close(heap_fd);
return ret;
}
int main(void)
{
DIR *d;
struct dirent *dir;
int ret = -1;
d = opendir(DEVPATH);
if (!d) {
printf("No %s directory?\n", DEVPATH);
return -1;
}
while ((dir = readdir(d)) != NULL) {
if (!strncmp(dir->d_name, ".", 2))
continue;
if (!strncmp(dir->d_name, "..", 3))
continue;
printf("Testing heap: %s\n", dir->d_name);
printf("=======================================\n");
ret = test_alloc_and_import(dir->d_name);
if (ret)
break;
ret = test_alloc_zeroed(dir->d_name, 4 * 1024);
if (ret)
break;
ret = test_alloc_zeroed(dir->d_name, ONE_MEG);
if (ret)
break;
ret = test_alloc_compat(dir->d_name);
if (ret)
break;
ret = test_alloc_errors(dir->d_name);
if (ret)
break;
}
closedir(d);
return ret;
}
| linux-master | tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c |
// SPDX-License-Identifier: GPL-2.0
int main(void)
{
return 0;
}
| linux-master | tools/testing/selftests/landlock/true.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Landlock tests - Common user space base
*
* Copyright © 2017-2020 Mickaël Salaün <[email protected]>
* Copyright © 2019-2020 ANSSI
*/
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <linux/landlock.h>
#include <string.h>
#include <sys/prctl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include "common.h"
#ifndef O_PATH
#define O_PATH 010000000
#endif
TEST(inconsistent_attr)
{
const long page_size = sysconf(_SC_PAGESIZE);
char *const buf = malloc(page_size + 1);
struct landlock_ruleset_attr *const ruleset_attr = (void *)buf;
ASSERT_NE(NULL, buf);
/* Checks copy_from_user(). */
ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, 0, 0));
/* The size if less than sizeof(struct landlock_attr_enforce). */
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, 1, 0));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, 7, 0));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1, landlock_create_ruleset(NULL, 1, 0));
/* The size if less than sizeof(struct landlock_attr_enforce). */
ASSERT_EQ(EFAULT, errno);
ASSERT_EQ(-1, landlock_create_ruleset(
NULL, sizeof(struct landlock_ruleset_attr), 0));
ASSERT_EQ(EFAULT, errno);
ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size + 1, 0));
ASSERT_EQ(E2BIG, errno);
/* Checks minimal valid attribute size. */
ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, 8, 0));
ASSERT_EQ(ENOMSG, errno);
ASSERT_EQ(-1, landlock_create_ruleset(
ruleset_attr,
sizeof(struct landlock_ruleset_attr), 0));
ASSERT_EQ(ENOMSG, errno);
ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size, 0));
ASSERT_EQ(ENOMSG, errno);
/* Checks non-zero value. */
buf[page_size - 2] = '.';
ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size, 0));
ASSERT_EQ(E2BIG, errno);
ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size + 1, 0));
ASSERT_EQ(E2BIG, errno);
free(buf);
}
TEST(abi_version)
{
const struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE,
};
ASSERT_EQ(3, landlock_create_ruleset(NULL, 0,
LANDLOCK_CREATE_RULESET_VERSION));
ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0,
LANDLOCK_CREATE_RULESET_VERSION));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1, landlock_create_ruleset(NULL, sizeof(ruleset_attr),
LANDLOCK_CREATE_RULESET_VERSION));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1,
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr),
LANDLOCK_CREATE_RULESET_VERSION));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1, landlock_create_ruleset(NULL, 0,
LANDLOCK_CREATE_RULESET_VERSION |
1 << 31));
ASSERT_EQ(EINVAL, errno);
}
/* Tests ordering of syscall argument checks. */
TEST(create_ruleset_checks_ordering)
{
const int last_flag = LANDLOCK_CREATE_RULESET_VERSION;
const int invalid_flag = last_flag << 1;
int ruleset_fd;
const struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE,
};
/* Checks priority for invalid flags. */
ASSERT_EQ(-1, landlock_create_ruleset(NULL, 0, invalid_flag));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0, invalid_flag));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1, landlock_create_ruleset(NULL, sizeof(ruleset_attr),
invalid_flag));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1,
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr),
invalid_flag));
ASSERT_EQ(EINVAL, errno);
/* Checks too big ruleset_attr size. */
ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, -1, 0));
ASSERT_EQ(E2BIG, errno);
/* Checks too small ruleset_attr size. */
ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0, 0));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 1, 0));
ASSERT_EQ(EINVAL, errno);
/* Checks valid call. */
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
}
/* Tests ordering of syscall argument checks. */
TEST(add_rule_checks_ordering)
{
const struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_EXECUTE,
};
struct landlock_path_beneath_attr path_beneath_attr = {
.allowed_access = LANDLOCK_ACCESS_FS_EXECUTE,
.parent_fd = -1,
};
const int ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(0, ruleset_fd);
/* Checks invalid flags. */
ASSERT_EQ(-1, landlock_add_rule(-1, 0, NULL, 1));
ASSERT_EQ(EINVAL, errno);
/* Checks invalid ruleset FD. */
ASSERT_EQ(-1, landlock_add_rule(-1, 0, NULL, 0));
ASSERT_EQ(EBADF, errno);
/* Checks invalid rule type. */
ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, 0, NULL, 0));
ASSERT_EQ(EINVAL, errno);
/* Checks invalid rule attr. */
ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
NULL, 0));
ASSERT_EQ(EFAULT, errno);
/* Checks invalid path_beneath.parent_fd. */
ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath_attr, 0));
ASSERT_EQ(EBADF, errno);
/* Checks valid call. */
path_beneath_attr.parent_fd =
open("/tmp", O_PATH | O_NOFOLLOW | O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, path_beneath_attr.parent_fd);
ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath_attr, 0));
ASSERT_EQ(0, close(path_beneath_attr.parent_fd));
ASSERT_EQ(0, close(ruleset_fd));
}
/* Tests ordering of syscall argument and permission checks. */
TEST(restrict_self_checks_ordering)
{
const struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_EXECUTE,
};
struct landlock_path_beneath_attr path_beneath_attr = {
.allowed_access = LANDLOCK_ACCESS_FS_EXECUTE,
.parent_fd = -1,
};
const int ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(0, ruleset_fd);
path_beneath_attr.parent_fd =
open("/tmp", O_PATH | O_NOFOLLOW | O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, path_beneath_attr.parent_fd);
ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath_attr, 0));
ASSERT_EQ(0, close(path_beneath_attr.parent_fd));
/* Checks unprivileged enforcement without no_new_privs. */
drop_caps(_metadata);
ASSERT_EQ(-1, landlock_restrict_self(-1, -1));
ASSERT_EQ(EPERM, errno);
ASSERT_EQ(-1, landlock_restrict_self(-1, 0));
ASSERT_EQ(EPERM, errno);
ASSERT_EQ(-1, landlock_restrict_self(ruleset_fd, 0));
ASSERT_EQ(EPERM, errno);
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
/* Checks invalid flags. */
ASSERT_EQ(-1, landlock_restrict_self(-1, -1));
ASSERT_EQ(EINVAL, errno);
/* Checks invalid ruleset FD. */
ASSERT_EQ(-1, landlock_restrict_self(-1, 0));
ASSERT_EQ(EBADF, errno);
/* Checks valid call. */
ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
ASSERT_EQ(0, close(ruleset_fd));
}
TEST(ruleset_fd_io)
{
struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE,
};
int ruleset_fd;
char buf;
drop_caps(_metadata);
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(-1, write(ruleset_fd, ".", 1));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1, read(ruleset_fd, &buf, 1));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(0, close(ruleset_fd));
}
/* Tests enforcement of a ruleset FD transferred through a UNIX socket. */
TEST(ruleset_fd_transfer)
{
struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR,
};
struct landlock_path_beneath_attr path_beneath_attr = {
.allowed_access = LANDLOCK_ACCESS_FS_READ_DIR,
};
int ruleset_fd_tx, dir_fd;
int socket_fds[2];
pid_t child;
int status;
drop_caps(_metadata);
/* Creates a test ruleset with a simple rule. */
ruleset_fd_tx =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(0, ruleset_fd_tx);
path_beneath_attr.parent_fd =
open("/tmp", O_PATH | O_NOFOLLOW | O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, path_beneath_attr.parent_fd);
ASSERT_EQ(0,
landlock_add_rule(ruleset_fd_tx, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath_attr, 0));
ASSERT_EQ(0, close(path_beneath_attr.parent_fd));
/* Sends the ruleset FD over a socketpair and then close it. */
ASSERT_EQ(0, socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0,
socket_fds));
ASSERT_EQ(0, send_fd(socket_fds[0], ruleset_fd_tx));
ASSERT_EQ(0, close(socket_fds[0]));
ASSERT_EQ(0, close(ruleset_fd_tx));
child = fork();
ASSERT_LE(0, child);
if (child == 0) {
const int ruleset_fd_rx = recv_fd(socket_fds[1]);
ASSERT_LE(0, ruleset_fd_rx);
ASSERT_EQ(0, close(socket_fds[1]));
/* Enforces the received ruleset on the child. */
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
ASSERT_EQ(0, landlock_restrict_self(ruleset_fd_rx, 0));
ASSERT_EQ(0, close(ruleset_fd_rx));
/* Checks that the ruleset enforcement. */
ASSERT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
ASSERT_EQ(EACCES, errno);
dir_fd = open("/tmp", O_RDONLY | O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, dir_fd);
ASSERT_EQ(0, close(dir_fd));
_exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
return;
}
ASSERT_EQ(0, close(socket_fds[1]));
/* Checks that the parent is unrestricted. */
dir_fd = open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, dir_fd);
ASSERT_EQ(0, close(dir_fd));
dir_fd = open("/tmp", O_RDONLY | O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, dir_fd);
ASSERT_EQ(0, close(dir_fd));
ASSERT_EQ(child, waitpid(child, &status, 0));
ASSERT_EQ(1, WIFEXITED(status));
ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/landlock/base_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Landlock tests - Ptrace
*
* Copyright © 2017-2020 Mickaël Salaün <[email protected]>
* Copyright © 2019-2020 ANSSI
*/
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <linux/landlock.h>
#include <signal.h>
#include <sys/prctl.h>
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "common.h"
/* Copied from security/yama/yama_lsm.c */
#define YAMA_SCOPE_DISABLED 0
#define YAMA_SCOPE_RELATIONAL 1
#define YAMA_SCOPE_CAPABILITY 2
#define YAMA_SCOPE_NO_ATTACH 3
static void create_domain(struct __test_metadata *const _metadata)
{
int ruleset_fd;
struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_MAKE_BLOCK,
};
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
EXPECT_LE(0, ruleset_fd)
{
TH_LOG("Failed to create a ruleset: %s", strerror(errno));
}
EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
EXPECT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
EXPECT_EQ(0, close(ruleset_fd));
}
static int test_ptrace_read(const pid_t pid)
{
static const char path_template[] = "/proc/%d/environ";
char procenv_path[sizeof(path_template) + 10];
int procenv_path_size, fd;
procenv_path_size = snprintf(procenv_path, sizeof(procenv_path),
path_template, pid);
if (procenv_path_size >= sizeof(procenv_path))
return E2BIG;
fd = open(procenv_path, O_RDONLY | O_CLOEXEC);
if (fd < 0)
return errno;
/*
* Mixing error codes from close(2) and open(2) should not lead to any
* (access type) confusion for this test.
*/
if (close(fd) != 0)
return errno;
return 0;
}
static int get_yama_ptrace_scope(void)
{
int ret;
char buf[2] = {};
const int fd = open("/proc/sys/kernel/yama/ptrace_scope", O_RDONLY);
if (fd < 0)
return 0;
if (read(fd, buf, 1) < 0) {
close(fd);
return -1;
}
ret = atoi(buf);
close(fd);
return ret;
}
/* clang-format off */
FIXTURE(hierarchy) {};
/* clang-format on */
FIXTURE_VARIANT(hierarchy)
{
const bool domain_both;
const bool domain_parent;
const bool domain_child;
};
/*
* Test multiple tracing combinations between a parent process P1 and a child
* process P2.
*
* Yama's scoped ptrace is presumed disabled. If enabled, this optional
* restriction is enforced in addition to any Landlock check, which means that
* all P2 requests to trace P1 would be denied.
*/
/*
* No domain
*
* P1-. P1 -> P2 : allow
* \ P2 -> P1 : allow
* 'P2
*/
/* clang-format off */
FIXTURE_VARIANT_ADD(hierarchy, allow_without_domain) {
/* clang-format on */
.domain_both = false,
.domain_parent = false,
.domain_child = false,
};
/*
* Child domain
*
* P1--. P1 -> P2 : allow
* \ P2 -> P1 : deny
* .'-----.
* | P2 |
* '------'
*/
/* clang-format off */
FIXTURE_VARIANT_ADD(hierarchy, allow_with_one_domain) {
/* clang-format on */
.domain_both = false,
.domain_parent = false,
.domain_child = true,
};
/*
* Parent domain
* .------.
* | P1 --. P1 -> P2 : deny
* '------' \ P2 -> P1 : allow
* '
* P2
*/
/* clang-format off */
FIXTURE_VARIANT_ADD(hierarchy, deny_with_parent_domain) {
/* clang-format on */
.domain_both = false,
.domain_parent = true,
.domain_child = false,
};
/*
* Parent + child domain (siblings)
* .------.
* | P1 ---. P1 -> P2 : deny
* '------' \ P2 -> P1 : deny
* .---'--.
* | P2 |
* '------'
*/
/* clang-format off */
FIXTURE_VARIANT_ADD(hierarchy, deny_with_sibling_domain) {
/* clang-format on */
.domain_both = false,
.domain_parent = true,
.domain_child = true,
};
/*
* Same domain (inherited)
* .-------------.
* | P1----. | P1 -> P2 : allow
* | \ | P2 -> P1 : allow
* | ' |
* | P2 |
* '-------------'
*/
/* clang-format off */
FIXTURE_VARIANT_ADD(hierarchy, allow_sibling_domain) {
/* clang-format on */
.domain_both = true,
.domain_parent = false,
.domain_child = false,
};
/*
* Inherited + child domain
* .-----------------.
* | P1----. | P1 -> P2 : allow
* | \ | P2 -> P1 : deny
* | .-'----. |
* | | P2 | |
* | '------' |
* '-----------------'
*/
/* clang-format off */
FIXTURE_VARIANT_ADD(hierarchy, allow_with_nested_domain) {
/* clang-format on */
.domain_both = true,
.domain_parent = false,
.domain_child = true,
};
/*
* Inherited + parent domain
* .-----------------.
* |.------. | P1 -> P2 : deny
* || P1 ----. | P2 -> P1 : allow
* |'------' \ |
* | ' |
* | P2 |
* '-----------------'
*/
/* clang-format off */
FIXTURE_VARIANT_ADD(hierarchy, deny_with_nested_and_parent_domain) {
/* clang-format on */
.domain_both = true,
.domain_parent = true,
.domain_child = false,
};
/*
* Inherited + parent and child domain (siblings)
* .-----------------.
* | .------. | P1 -> P2 : deny
* | | P1 . | P2 -> P1 : deny
* | '------'\ |
* | \ |
* | .--'---. |
* | | P2 | |
* | '------' |
* '-----------------'
*/
/* clang-format off */
FIXTURE_VARIANT_ADD(hierarchy, deny_with_forked_domain) {
/* clang-format on */
.domain_both = true,
.domain_parent = true,
.domain_child = true,
};
FIXTURE_SETUP(hierarchy)
{
}
FIXTURE_TEARDOWN(hierarchy)
{
}
/* Test PTRACE_TRACEME and PTRACE_ATTACH for parent and child. */
TEST_F(hierarchy, trace)
{
pid_t child, parent;
int status, err_proc_read;
int pipe_child[2], pipe_parent[2];
int yama_ptrace_scope;
char buf_parent;
long ret;
bool can_read_child, can_trace_child, can_read_parent, can_trace_parent;
yama_ptrace_scope = get_yama_ptrace_scope();
ASSERT_LE(0, yama_ptrace_scope);
if (yama_ptrace_scope > YAMA_SCOPE_DISABLED)
TH_LOG("Incomplete tests due to Yama restrictions (scope %d)",
yama_ptrace_scope);
/*
* can_read_child is true if a parent process can read its child
* process, which is only the case when the parent process is not
* isolated from the child with a dedicated Landlock domain.
*/
can_read_child = !variant->domain_parent;
/*
* can_trace_child is true if a parent process can trace its child
* process. This depends on two conditions:
* - The parent process is not isolated from the child with a dedicated
* Landlock domain.
* - Yama allows tracing children (up to YAMA_SCOPE_RELATIONAL).
*/
can_trace_child = can_read_child &&
yama_ptrace_scope <= YAMA_SCOPE_RELATIONAL;
/*
* can_read_parent is true if a child process can read its parent
* process, which is only the case when the child process is not
* isolated from the parent with a dedicated Landlock domain.
*/
can_read_parent = !variant->domain_child;
/*
* can_trace_parent is true if a child process can trace its parent
* process. This depends on two conditions:
* - The child process is not isolated from the parent with a dedicated
* Landlock domain.
* - Yama is disabled (YAMA_SCOPE_DISABLED).
*/
can_trace_parent = can_read_parent &&
yama_ptrace_scope <= YAMA_SCOPE_DISABLED;
/*
* Removes all effective and permitted capabilities to not interfere
* with cap_ptrace_access_check() in case of PTRACE_MODE_FSCREDS.
*/
drop_caps(_metadata);
parent = getpid();
ASSERT_EQ(0, pipe2(pipe_child, O_CLOEXEC));
ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
if (variant->domain_both) {
create_domain(_metadata);
if (!_metadata->passed)
/* Aborts before forking. */
return;
}
child = fork();
ASSERT_LE(0, child);
if (child == 0) {
char buf_child;
ASSERT_EQ(0, close(pipe_parent[1]));
ASSERT_EQ(0, close(pipe_child[0]));
if (variant->domain_child)
create_domain(_metadata);
/* Waits for the parent to be in a domain, if any. */
ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
/* Tests PTRACE_MODE_READ on the parent. */
err_proc_read = test_ptrace_read(parent);
if (can_read_parent) {
EXPECT_EQ(0, err_proc_read);
} else {
EXPECT_EQ(EACCES, err_proc_read);
}
/* Tests PTRACE_ATTACH on the parent. */
ret = ptrace(PTRACE_ATTACH, parent, NULL, 0);
if (can_trace_parent) {
EXPECT_EQ(0, ret);
} else {
EXPECT_EQ(-1, ret);
EXPECT_EQ(EPERM, errno);
}
if (ret == 0) {
ASSERT_EQ(parent, waitpid(parent, &status, 0));
ASSERT_EQ(1, WIFSTOPPED(status));
ASSERT_EQ(0, ptrace(PTRACE_DETACH, parent, NULL, 0));
}
/* Tests child PTRACE_TRACEME. */
ret = ptrace(PTRACE_TRACEME);
if (can_trace_child) {
EXPECT_EQ(0, ret);
} else {
EXPECT_EQ(-1, ret);
EXPECT_EQ(EPERM, errno);
}
/*
* Signals that the PTRACE_ATTACH test is done and the
* PTRACE_TRACEME test is ongoing.
*/
ASSERT_EQ(1, write(pipe_child[1], ".", 1));
if (can_trace_child) {
ASSERT_EQ(0, raise(SIGSTOP));
}
/* Waits for the parent PTRACE_ATTACH test. */
ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
_exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
return;
}
ASSERT_EQ(0, close(pipe_child[1]));
ASSERT_EQ(0, close(pipe_parent[0]));
if (variant->domain_parent)
create_domain(_metadata);
/* Signals that the parent is in a domain, if any. */
ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
/*
* Waits for the child to test PTRACE_ATTACH on the parent and start
* testing PTRACE_TRACEME.
*/
ASSERT_EQ(1, read(pipe_child[0], &buf_parent, 1));
/* Tests child PTRACE_TRACEME. */
if (can_trace_child) {
ASSERT_EQ(child, waitpid(child, &status, 0));
ASSERT_EQ(1, WIFSTOPPED(status));
ASSERT_EQ(0, ptrace(PTRACE_DETACH, child, NULL, 0));
} else {
/* The child should not be traced by the parent. */
EXPECT_EQ(-1, ptrace(PTRACE_DETACH, child, NULL, 0));
EXPECT_EQ(ESRCH, errno);
}
/* Tests PTRACE_MODE_READ on the child. */
err_proc_read = test_ptrace_read(child);
if (can_read_child) {
EXPECT_EQ(0, err_proc_read);
} else {
EXPECT_EQ(EACCES, err_proc_read);
}
/* Tests PTRACE_ATTACH on the child. */
ret = ptrace(PTRACE_ATTACH, child, NULL, 0);
if (can_trace_child) {
EXPECT_EQ(0, ret);
} else {
EXPECT_EQ(-1, ret);
EXPECT_EQ(EPERM, errno);
}
if (ret == 0) {
ASSERT_EQ(child, waitpid(child, &status, 0));
ASSERT_EQ(1, WIFSTOPPED(status));
ASSERT_EQ(0, ptrace(PTRACE_DETACH, child, NULL, 0));
}
/* Signals that the parent PTRACE_ATTACH test is done. */
ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
ASSERT_EQ(child, waitpid(child, &status, 0));
if (WIFSIGNALED(status) || !WIFEXITED(status) ||
WEXITSTATUS(status) != EXIT_SUCCESS)
_metadata->passed = 0;
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/landlock/ptrace_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Landlock tests - Filesystem
*
* Copyright © 2017-2020 Mickaël Salaün <[email protected]>
* Copyright © 2020 ANSSI
* Copyright © 2020-2022 Microsoft Corporation
*/
#define _GNU_SOURCE
#include <fcntl.h>
#include <linux/landlock.h>
#include <linux/magic.h>
#include <sched.h>
#include <stdio.h>
#include <string.h>
#include <sys/capability.h>
#include <sys/mount.h>
#include <sys/prctl.h>
#include <sys/sendfile.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include <unistd.h>
#include "common.h"
#ifndef renameat2
int renameat2(int olddirfd, const char *oldpath, int newdirfd,
const char *newpath, unsigned int flags)
{
return syscall(__NR_renameat2, olddirfd, oldpath, newdirfd, newpath,
flags);
}
#endif
#ifndef RENAME_EXCHANGE
#define RENAME_EXCHANGE (1 << 1)
#endif
#define TMP_DIR "tmp"
#define BINARY_PATH "./true"
/* Paths (sibling number and depth) */
static const char dir_s1d1[] = TMP_DIR "/s1d1";
static const char file1_s1d1[] = TMP_DIR "/s1d1/f1";
static const char file2_s1d1[] = TMP_DIR "/s1d1/f2";
static const char dir_s1d2[] = TMP_DIR "/s1d1/s1d2";
static const char file1_s1d2[] = TMP_DIR "/s1d1/s1d2/f1";
static const char file2_s1d2[] = TMP_DIR "/s1d1/s1d2/f2";
static const char dir_s1d3[] = TMP_DIR "/s1d1/s1d2/s1d3";
static const char file1_s1d3[] = TMP_DIR "/s1d1/s1d2/s1d3/f1";
static const char file2_s1d3[] = TMP_DIR "/s1d1/s1d2/s1d3/f2";
static const char dir_s2d1[] = TMP_DIR "/s2d1";
static const char file1_s2d1[] = TMP_DIR "/s2d1/f1";
static const char dir_s2d2[] = TMP_DIR "/s2d1/s2d2";
static const char file1_s2d2[] = TMP_DIR "/s2d1/s2d2/f1";
static const char dir_s2d3[] = TMP_DIR "/s2d1/s2d2/s2d3";
static const char file1_s2d3[] = TMP_DIR "/s2d1/s2d2/s2d3/f1";
static const char file2_s2d3[] = TMP_DIR "/s2d1/s2d2/s2d3/f2";
static const char dir_s3d1[] = TMP_DIR "/s3d1";
static const char file1_s3d1[] = TMP_DIR "/s3d1/f1";
/* dir_s3d2 is a mount point. */
static const char dir_s3d2[] = TMP_DIR "/s3d1/s3d2";
static const char dir_s3d3[] = TMP_DIR "/s3d1/s3d2/s3d3";
/*
* layout1 hierarchy:
*
* tmp
* ├── s1d1
* │ ├── f1
* │ ├── f2
* │ └── s1d2
* │ ├── f1
* │ ├── f2
* │ └── s1d3
* │ ├── f1
* │ └── f2
* ├── s2d1
* │ ├── f1
* │ └── s2d2
* │ ├── f1
* │ └── s2d3
* │ ├── f1
* │ └── f2
* └── s3d1
* ├── f1
* └── s3d2
* └── s3d3
*/
static bool fgrep(FILE *const inf, const char *const str)
{
char line[32];
const int slen = strlen(str);
while (!feof(inf)) {
if (!fgets(line, sizeof(line), inf))
break;
if (strncmp(line, str, slen))
continue;
return true;
}
return false;
}
static bool supports_filesystem(const char *const filesystem)
{
char str[32];
int len;
bool res = true;
FILE *const inf = fopen("/proc/filesystems", "r");
/*
* Consider that the filesystem is supported if we cannot get the
* supported ones.
*/
if (!inf)
return true;
/* filesystem can be null for bind mounts. */
if (!filesystem)
goto out;
len = snprintf(str, sizeof(str), "nodev\t%s\n", filesystem);
if (len >= sizeof(str))
/* Ignores too-long filesystem names. */
goto out;
res = fgrep(inf, str);
out:
fclose(inf);
return res;
}
static bool cwd_matches_fs(unsigned int fs_magic)
{
struct statfs statfs_buf;
if (!fs_magic)
return true;
if (statfs(".", &statfs_buf))
return true;
return statfs_buf.f_type == fs_magic;
}
static void mkdir_parents(struct __test_metadata *const _metadata,
const char *const path)
{
char *walker;
const char *parent;
int i, err;
ASSERT_NE(path[0], '\0');
walker = strdup(path);
ASSERT_NE(NULL, walker);
parent = walker;
for (i = 1; walker[i]; i++) {
if (walker[i] != '/')
continue;
walker[i] = '\0';
err = mkdir(parent, 0700);
ASSERT_FALSE(err && errno != EEXIST)
{
TH_LOG("Failed to create directory \"%s\": %s", parent,
strerror(errno));
}
walker[i] = '/';
}
free(walker);
}
static void create_directory(struct __test_metadata *const _metadata,
const char *const path)
{
mkdir_parents(_metadata, path);
ASSERT_EQ(0, mkdir(path, 0700))
{
TH_LOG("Failed to create directory \"%s\": %s", path,
strerror(errno));
}
}
static void create_file(struct __test_metadata *const _metadata,
const char *const path)
{
mkdir_parents(_metadata, path);
ASSERT_EQ(0, mknod(path, S_IFREG | 0700, 0))
{
TH_LOG("Failed to create file \"%s\": %s", path,
strerror(errno));
}
}
static int remove_path(const char *const path)
{
char *walker;
int i, ret, err = 0;
walker = strdup(path);
if (!walker) {
err = ENOMEM;
goto out;
}
if (unlink(path) && rmdir(path)) {
if (errno != ENOENT && errno != ENOTDIR)
err = errno;
goto out;
}
for (i = strlen(walker); i > 0; i--) {
if (walker[i] != '/')
continue;
walker[i] = '\0';
ret = rmdir(walker);
if (ret) {
if (errno != ENOTEMPTY && errno != EBUSY)
err = errno;
goto out;
}
if (strcmp(walker, TMP_DIR) == 0)
goto out;
}
out:
free(walker);
return err;
}
struct mnt_opt {
const char *const source;
const char *const type;
const unsigned long flags;
const char *const data;
};
const struct mnt_opt mnt_tmp = {
.type = "tmpfs",
.data = "size=4m,mode=700",
};
static int mount_opt(const struct mnt_opt *const mnt, const char *const target)
{
return mount(mnt->source ?: mnt->type, target, mnt->type, mnt->flags,
mnt->data);
}
static void prepare_layout_opt(struct __test_metadata *const _metadata,
const struct mnt_opt *const mnt)
{
disable_caps(_metadata);
umask(0077);
create_directory(_metadata, TMP_DIR);
/*
* Do not pollute the rest of the system: creates a private mount point
* for tests relying on pivot_root(2) and move_mount(2).
*/
set_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(0, unshare(CLONE_NEWNS | CLONE_NEWCGROUP));
ASSERT_EQ(0, mount_opt(mnt, TMP_DIR))
{
TH_LOG("Failed to mount the %s filesystem: %s", mnt->type,
strerror(errno));
/*
* FIXTURE_TEARDOWN() is not called when FIXTURE_SETUP()
* failed, so we need to explicitly do a minimal cleanup to
* avoid cascading errors with other tests that don't depend on
* the same filesystem.
*/
remove_path(TMP_DIR);
}
ASSERT_EQ(0, mount(NULL, TMP_DIR, NULL, MS_PRIVATE | MS_REC, NULL));
clear_cap(_metadata, CAP_SYS_ADMIN);
}
static void prepare_layout(struct __test_metadata *const _metadata)
{
prepare_layout_opt(_metadata, &mnt_tmp);
}
static void cleanup_layout(struct __test_metadata *const _metadata)
{
set_cap(_metadata, CAP_SYS_ADMIN);
EXPECT_EQ(0, umount(TMP_DIR));
clear_cap(_metadata, CAP_SYS_ADMIN);
EXPECT_EQ(0, remove_path(TMP_DIR));
}
/* clang-format off */
FIXTURE(layout0) {};
/* clang-format on */
FIXTURE_SETUP(layout0)
{
prepare_layout(_metadata);
}
FIXTURE_TEARDOWN(layout0)
{
cleanup_layout(_metadata);
}
static void create_layout1(struct __test_metadata *const _metadata)
{
create_file(_metadata, file1_s1d1);
create_file(_metadata, file1_s1d2);
create_file(_metadata, file1_s1d3);
create_file(_metadata, file2_s1d1);
create_file(_metadata, file2_s1d2);
create_file(_metadata, file2_s1d3);
create_file(_metadata, file1_s2d1);
create_file(_metadata, file1_s2d2);
create_file(_metadata, file1_s2d3);
create_file(_metadata, file2_s2d3);
create_file(_metadata, file1_s3d1);
create_directory(_metadata, dir_s3d2);
set_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(0, mount_opt(&mnt_tmp, dir_s3d2));
clear_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(0, mkdir(dir_s3d3, 0700));
}
static void remove_layout1(struct __test_metadata *const _metadata)
{
EXPECT_EQ(0, remove_path(file2_s1d3));
EXPECT_EQ(0, remove_path(file2_s1d2));
EXPECT_EQ(0, remove_path(file2_s1d1));
EXPECT_EQ(0, remove_path(file1_s1d3));
EXPECT_EQ(0, remove_path(file1_s1d2));
EXPECT_EQ(0, remove_path(file1_s1d1));
EXPECT_EQ(0, remove_path(dir_s1d3));
EXPECT_EQ(0, remove_path(file2_s2d3));
EXPECT_EQ(0, remove_path(file1_s2d3));
EXPECT_EQ(0, remove_path(file1_s2d2));
EXPECT_EQ(0, remove_path(file1_s2d1));
EXPECT_EQ(0, remove_path(dir_s2d2));
EXPECT_EQ(0, remove_path(file1_s3d1));
EXPECT_EQ(0, remove_path(dir_s3d3));
set_cap(_metadata, CAP_SYS_ADMIN);
umount(dir_s3d2);
clear_cap(_metadata, CAP_SYS_ADMIN);
EXPECT_EQ(0, remove_path(dir_s3d2));
}
/* clang-format off */
FIXTURE(layout1) {};
/* clang-format on */
FIXTURE_SETUP(layout1)
{
prepare_layout(_metadata);
create_layout1(_metadata);
}
FIXTURE_TEARDOWN(layout1)
{
remove_layout1(_metadata);
cleanup_layout(_metadata);
}
/*
* This helper enables to use the ASSERT_* macros and print the line number
* pointing to the test caller.
*/
static int test_open_rel(const int dirfd, const char *const path,
const int flags)
{
int fd;
/* Works with file and directories. */
fd = openat(dirfd, path, flags | O_CLOEXEC);
if (fd < 0)
return errno;
/*
* Mixing error codes from close(2) and open(2) should not lead to any
* (access type) confusion for this test.
*/
if (close(fd) != 0)
return errno;
return 0;
}
static int test_open(const char *const path, const int flags)
{
return test_open_rel(AT_FDCWD, path, flags);
}
TEST_F_FORK(layout1, no_restriction)
{
ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY));
ASSERT_EQ(0, test_open(file2_s1d1, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
ASSERT_EQ(0, test_open(file2_s1d2, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s2d1, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s2d1, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s2d2, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s2d3, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s2d3, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s3d1, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s3d2, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s3d3, O_RDONLY));
}
TEST_F_FORK(layout1, inval)
{
struct landlock_path_beneath_attr path_beneath = {
.allowed_access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
.parent_fd = -1,
};
struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
};
int ruleset_fd;
path_beneath.parent_fd =
open(dir_s1d2, O_PATH | O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, path_beneath.parent_fd);
ruleset_fd = open(dir_s1d1, O_PATH | O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath, 0));
/* Returns EBADF because ruleset_fd is not a landlock-ruleset FD. */
ASSERT_EQ(EBADF, errno);
ASSERT_EQ(0, close(ruleset_fd));
ruleset_fd = open(dir_s1d1, O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath, 0));
/* Returns EBADFD because ruleset_fd is not a valid ruleset. */
ASSERT_EQ(EBADFD, errno);
ASSERT_EQ(0, close(ruleset_fd));
/* Gets a real ruleset. */
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath, 0));
ASSERT_EQ(0, close(path_beneath.parent_fd));
/* Tests without O_PATH. */
path_beneath.parent_fd = open(dir_s1d2, O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, path_beneath.parent_fd);
ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath, 0));
ASSERT_EQ(0, close(path_beneath.parent_fd));
/* Tests with a ruleset FD. */
path_beneath.parent_fd = ruleset_fd;
ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath, 0));
ASSERT_EQ(EBADFD, errno);
/* Checks unhandled allowed_access. */
path_beneath.parent_fd =
open(dir_s1d2, O_PATH | O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, path_beneath.parent_fd);
/* Test with legitimate values. */
path_beneath.allowed_access |= LANDLOCK_ACCESS_FS_EXECUTE;
ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath, 0));
ASSERT_EQ(EINVAL, errno);
path_beneath.allowed_access &= ~LANDLOCK_ACCESS_FS_EXECUTE;
/* Tests with denied-by-default access right. */
path_beneath.allowed_access |= LANDLOCK_ACCESS_FS_REFER;
ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath, 0));
ASSERT_EQ(EINVAL, errno);
path_beneath.allowed_access &= ~LANDLOCK_ACCESS_FS_REFER;
/* Test with unknown (64-bits) value. */
path_beneath.allowed_access |= (1ULL << 60);
ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath, 0));
ASSERT_EQ(EINVAL, errno);
path_beneath.allowed_access &= ~(1ULL << 60);
/* Test with no access. */
path_beneath.allowed_access = 0;
ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath, 0));
ASSERT_EQ(ENOMSG, errno);
path_beneath.allowed_access &= ~(1ULL << 60);
ASSERT_EQ(0, close(path_beneath.parent_fd));
/* Enforces the ruleset. */
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
ASSERT_EQ(0, close(ruleset_fd));
}
/* clang-format off */
#define ACCESS_FILE ( \
LANDLOCK_ACCESS_FS_EXECUTE | \
LANDLOCK_ACCESS_FS_WRITE_FILE | \
LANDLOCK_ACCESS_FS_READ_FILE | \
LANDLOCK_ACCESS_FS_TRUNCATE)
#define ACCESS_LAST LANDLOCK_ACCESS_FS_TRUNCATE
#define ACCESS_ALL ( \
ACCESS_FILE | \
LANDLOCK_ACCESS_FS_READ_DIR | \
LANDLOCK_ACCESS_FS_REMOVE_DIR | \
LANDLOCK_ACCESS_FS_REMOVE_FILE | \
LANDLOCK_ACCESS_FS_MAKE_CHAR | \
LANDLOCK_ACCESS_FS_MAKE_DIR | \
LANDLOCK_ACCESS_FS_MAKE_REG | \
LANDLOCK_ACCESS_FS_MAKE_SOCK | \
LANDLOCK_ACCESS_FS_MAKE_FIFO | \
LANDLOCK_ACCESS_FS_MAKE_BLOCK | \
LANDLOCK_ACCESS_FS_MAKE_SYM | \
LANDLOCK_ACCESS_FS_REFER)
/* clang-format on */
TEST_F_FORK(layout1, file_and_dir_access_rights)
{
__u64 access;
int err;
struct landlock_path_beneath_attr path_beneath_file = {},
path_beneath_dir = {};
struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = ACCESS_ALL,
};
const int ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(0, ruleset_fd);
/* Tests access rights for files. */
path_beneath_file.parent_fd = open(file1_s1d2, O_PATH | O_CLOEXEC);
ASSERT_LE(0, path_beneath_file.parent_fd);
/* Tests access rights for directories. */
path_beneath_dir.parent_fd =
open(dir_s1d2, O_PATH | O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, path_beneath_dir.parent_fd);
for (access = 1; access <= ACCESS_LAST; access <<= 1) {
path_beneath_dir.allowed_access = access;
ASSERT_EQ(0, landlock_add_rule(ruleset_fd,
LANDLOCK_RULE_PATH_BENEATH,
&path_beneath_dir, 0));
path_beneath_file.allowed_access = access;
err = landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath_file, 0);
if (access & ACCESS_FILE) {
ASSERT_EQ(0, err);
} else {
ASSERT_EQ(-1, err);
ASSERT_EQ(EINVAL, errno);
}
}
ASSERT_EQ(0, close(path_beneath_file.parent_fd));
ASSERT_EQ(0, close(path_beneath_dir.parent_fd));
ASSERT_EQ(0, close(ruleset_fd));
}
TEST_F_FORK(layout0, unknown_access_rights)
{
__u64 access_mask;
for (access_mask = 1ULL << 63; access_mask != ACCESS_LAST;
access_mask >>= 1) {
struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = access_mask,
};
ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr,
sizeof(ruleset_attr), 0));
ASSERT_EQ(EINVAL, errno);
}
}
static void add_path_beneath(struct __test_metadata *const _metadata,
const int ruleset_fd, const __u64 allowed_access,
const char *const path)
{
struct landlock_path_beneath_attr path_beneath = {
.allowed_access = allowed_access,
};
path_beneath.parent_fd = open(path, O_PATH | O_CLOEXEC);
ASSERT_LE(0, path_beneath.parent_fd)
{
TH_LOG("Failed to open directory \"%s\": %s", path,
strerror(errno));
}
ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath, 0))
{
TH_LOG("Failed to update the ruleset with \"%s\": %s", path,
strerror(errno));
}
ASSERT_EQ(0, close(path_beneath.parent_fd));
}
struct rule {
const char *path;
__u64 access;
};
/* clang-format off */
#define ACCESS_RO ( \
LANDLOCK_ACCESS_FS_READ_FILE | \
LANDLOCK_ACCESS_FS_READ_DIR)
#define ACCESS_RW ( \
ACCESS_RO | \
LANDLOCK_ACCESS_FS_WRITE_FILE)
/* clang-format on */
static int create_ruleset(struct __test_metadata *const _metadata,
const __u64 handled_access_fs,
const struct rule rules[])
{
int ruleset_fd, i;
struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = handled_access_fs,
};
ASSERT_NE(NULL, rules)
{
TH_LOG("No rule list");
}
ASSERT_NE(NULL, rules[0].path)
{
TH_LOG("Empty rule list");
}
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(0, ruleset_fd)
{
TH_LOG("Failed to create a ruleset: %s", strerror(errno));
}
for (i = 0; rules[i].path; i++) {
add_path_beneath(_metadata, ruleset_fd, rules[i].access,
rules[i].path);
}
return ruleset_fd;
}
static void enforce_ruleset(struct __test_metadata *const _metadata,
const int ruleset_fd)
{
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0))
{
TH_LOG("Failed to enforce ruleset: %s", strerror(errno));
}
}
TEST_F_FORK(layout0, proc_nsfs)
{
const struct rule rules[] = {
{
.path = "/dev/null",
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
struct landlock_path_beneath_attr path_beneath;
const int ruleset_fd = create_ruleset(
_metadata, rules[0].access | LANDLOCK_ACCESS_FS_READ_DIR,
rules);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(0, test_open("/proc/self/ns/mnt", O_RDONLY));
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(EACCES, test_open("/", O_RDONLY));
ASSERT_EQ(EACCES, test_open("/dev", O_RDONLY));
ASSERT_EQ(0, test_open("/dev/null", O_RDONLY));
ASSERT_EQ(EACCES, test_open("/dev/full", O_RDONLY));
ASSERT_EQ(EACCES, test_open("/proc", O_RDONLY));
ASSERT_EQ(EACCES, test_open("/proc/self", O_RDONLY));
ASSERT_EQ(EACCES, test_open("/proc/self/ns", O_RDONLY));
/*
* Because nsfs is an internal filesystem, /proc/self/ns/mnt is a
* disconnected path. Such path cannot be identified and must then be
* allowed.
*/
ASSERT_EQ(0, test_open("/proc/self/ns/mnt", O_RDONLY));
/*
* Checks that it is not possible to add nsfs-like filesystem
* references to a ruleset.
*/
path_beneath.allowed_access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
path_beneath.parent_fd = open("/proc/self/ns/mnt", O_PATH | O_CLOEXEC);
ASSERT_LE(0, path_beneath.parent_fd);
ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
&path_beneath, 0));
ASSERT_EQ(EBADFD, errno);
ASSERT_EQ(0, close(path_beneath.parent_fd));
}
TEST_F_FORK(layout0, unpriv)
{
const struct rule rules[] = {
{
.path = TMP_DIR,
.access = ACCESS_RO,
},
{},
};
int ruleset_fd;
drop_caps(_metadata);
ruleset_fd = create_ruleset(_metadata, ACCESS_RO, rules);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(-1, landlock_restrict_self(ruleset_fd, 0));
ASSERT_EQ(EPERM, errno);
/* enforce_ruleset() calls prctl(no_new_privs). */
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
}
TEST_F_FORK(layout1, effective_access)
{
const struct rule rules[] = {
{
.path = dir_s1d2,
.access = ACCESS_RO,
},
{
.path = file1_s2d2,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
char buf;
int reg_fd;
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Tests on a directory (with or without O_PATH). */
ASSERT_EQ(EACCES, test_open("/", O_RDONLY));
ASSERT_EQ(0, test_open("/", O_RDONLY | O_PATH));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY | O_PATH));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY | O_PATH));
ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
/* Tests on a file (with or without O_PATH). */
ASSERT_EQ(EACCES, test_open(dir_s2d2, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY | O_PATH));
ASSERT_EQ(0, test_open(file1_s2d2, O_RDONLY));
/* Checks effective read and write actions. */
reg_fd = open(file1_s2d2, O_RDWR | O_CLOEXEC);
ASSERT_LE(0, reg_fd);
ASSERT_EQ(1, write(reg_fd, ".", 1));
ASSERT_LE(0, lseek(reg_fd, 0, SEEK_SET));
ASSERT_EQ(1, read(reg_fd, &buf, 1));
ASSERT_EQ('.', buf);
ASSERT_EQ(0, close(reg_fd));
/* Just in case, double-checks effective actions. */
reg_fd = open(file1_s2d2, O_RDONLY | O_CLOEXEC);
ASSERT_LE(0, reg_fd);
ASSERT_EQ(-1, write(reg_fd, &buf, 1));
ASSERT_EQ(EBADF, errno);
ASSERT_EQ(0, close(reg_fd));
}
TEST_F_FORK(layout1, unhandled_access)
{
const struct rule rules[] = {
{
.path = dir_s1d2,
.access = ACCESS_RO,
},
{},
};
/* Here, we only handle read accesses, not write accesses. */
const int ruleset_fd = create_ruleset(_metadata, ACCESS_RO, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/*
* Because the policy does not handle LANDLOCK_ACCESS_FS_WRITE_FILE,
* opening for write-only should be allowed, but not read-write.
*/
ASSERT_EQ(0, test_open(file1_s1d1, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR));
ASSERT_EQ(0, test_open(file1_s1d2, O_WRONLY));
ASSERT_EQ(0, test_open(file1_s1d2, O_RDWR));
}
TEST_F_FORK(layout1, ruleset_overlap)
{
const struct rule rules[] = {
/* These rules should be ORed among them. */
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_READ_DIR,
},
{},
};
const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks s1d1 hierarchy. */
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* Checks s1d2 hierarchy. */
ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d2, O_WRONLY));
ASSERT_EQ(0, test_open(file1_s1d2, O_RDWR));
ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
/* Checks s1d3 hierarchy. */
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY));
ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR));
ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
}
TEST_F_FORK(layout1, layer_rule_unions)
{
const struct rule layer1[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
/* dir_s1d3 should allow READ_FILE and WRITE_FILE (O_RDWR). */
{
.path = dir_s1d3,
.access = LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
const struct rule layer2[] = {
/* Doesn't change anything from layer1. */
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
const struct rule layer3[] = {
/* Only allows write (but not read) to dir_s1d3. */
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks s1d1 hierarchy with layer1. */
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* Checks s1d2 hierarchy with layer1. */
ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* Checks s1d3 hierarchy with layer1. */
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY));
/* dir_s1d3 should allow READ_FILE and WRITE_FILE (O_RDWR). */
ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* Doesn't change anything from layer1. */
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks s1d1 hierarchy with layer2. */
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* Checks s1d2 hierarchy with layer2. */
ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* Checks s1d3 hierarchy with layer2. */
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY));
/* dir_s1d3 should allow READ_FILE and WRITE_FILE (O_RDWR). */
ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* Only allows write (but not read) to dir_s1d3. */
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer3);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks s1d1 hierarchy with layer3. */
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* Checks s1d2 hierarchy with layer3. */
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* Checks s1d3 hierarchy with layer3. */
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY));
/* dir_s1d3 should now deny READ_FILE and WRITE_FILE (O_RDWR). */
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDWR));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
}
TEST_F_FORK(layout1, non_overlapping_accesses)
{
const struct rule layer1[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_MAKE_REG,
},
{},
};
const struct rule layer2[] = {
{
.path = dir_s1d3,
.access = LANDLOCK_ACCESS_FS_REMOVE_FILE,
},
{},
};
int ruleset_fd;
ASSERT_EQ(0, unlink(file1_s1d1));
ASSERT_EQ(0, unlink(file1_s1d2));
ruleset_fd =
create_ruleset(_metadata, LANDLOCK_ACCESS_FS_MAKE_REG, layer1);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(-1, mknod(file1_s1d1, S_IFREG | 0700, 0));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(0, mknod(file1_s1d2, S_IFREG | 0700, 0));
ASSERT_EQ(0, unlink(file1_s1d2));
ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_REMOVE_FILE,
layer2);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Unchanged accesses for file creation. */
ASSERT_EQ(-1, mknod(file1_s1d1, S_IFREG | 0700, 0));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(0, mknod(file1_s1d2, S_IFREG | 0700, 0));
/* Checks file removing. */
ASSERT_EQ(-1, unlink(file1_s1d2));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(0, unlink(file1_s1d3));
}
TEST_F_FORK(layout1, interleaved_masked_accesses)
{
/*
* Checks overly restrictive rules:
* layer 1: allows R s1d1/s1d2/s1d3/file1
* layer 2: allows RW s1d1/s1d2/s1d3
* allows W s1d1/s1d2
* denies R s1d1/s1d2
* layer 3: allows R s1d1
* layer 4: allows R s1d1/s1d2
* denies W s1d1/s1d2
* layer 5: allows R s1d1/s1d2
* layer 6: allows X ----
* layer 7: allows W s1d1/s1d2
* denies R s1d1/s1d2
*/
const struct rule layer1_read[] = {
/* Allows read access to file1_s1d3 with the first layer. */
{
.path = file1_s1d3,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{},
};
/* First rule with write restrictions. */
const struct rule layer2_read_write[] = {
/* Start by granting read-write access via its parent directory... */
{
.path = dir_s1d3,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
/* ...but also denies read access via its grandparent directory. */
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
const struct rule layer3_read[] = {
/* Allows read access via its great-grandparent directory. */
{
.path = dir_s1d1,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{},
};
const struct rule layer4_read_write[] = {
/*
* Try to confuse the deny access by denying write (but not
* read) access via its grandparent directory.
*/
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{},
};
const struct rule layer5_read[] = {
/*
* Try to override layer2's deny read access by explicitly
* allowing read access via file1_s1d3's grandparent.
*/
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{},
};
const struct rule layer6_execute[] = {
/*
* Restricts an unrelated file hierarchy with a new access
* (non-overlapping) type.
*/
{
.path = dir_s2d1,
.access = LANDLOCK_ACCESS_FS_EXECUTE,
},
{},
};
const struct rule layer7_read_write[] = {
/*
* Finally, denies read access to file1_s1d3 via its
* grandparent.
*/
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
int ruleset_fd;
ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE,
layer1_read);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks that read access is granted for file1_s1d3 with layer 1. */
ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR));
ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(file2_s1d3, O_WRONLY));
ruleset_fd = create_ruleset(_metadata,
LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
layer2_read_write);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks that previous access rights are unchanged with layer 2. */
ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR));
ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(file2_s1d3, O_WRONLY));
ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE,
layer3_read);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks that previous access rights are unchanged with layer 3. */
ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR));
ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(file2_s1d3, O_WRONLY));
/* This time, denies write access for the file hierarchy. */
ruleset_fd = create_ruleset(_metadata,
LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
layer4_read_write);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/*
* Checks that the only change with layer 4 is that write access is
* denied.
*/
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY));
ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE,
layer5_read);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks that previous access rights are unchanged with layer 5. */
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_EXECUTE,
layer6_execute);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks that previous access rights are unchanged with layer 6. */
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
ruleset_fd = create_ruleset(_metadata,
LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
layer7_read_write);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks read access is now denied with layer 7. */
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY));
ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY));
}
TEST_F_FORK(layout1, inherit_subset)
{
const struct rule rules[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_READ_DIR,
},
{},
};
const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* Write access is forbidden. */
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
/* Readdir access is allowed. */
ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
/* Write access is forbidden. */
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
/* Readdir access is allowed. */
ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
/*
* Tests shared rule extension: the following rules should not grant
* any new access, only remove some. Once enforced, these rules are
* ANDed with the previous ones.
*/
add_path_beneath(_metadata, ruleset_fd, LANDLOCK_ACCESS_FS_WRITE_FILE,
dir_s1d2);
/*
* According to ruleset_fd, dir_s1d2 should now have the
* LANDLOCK_ACCESS_FS_READ_FILE and LANDLOCK_ACCESS_FS_WRITE_FILE
* access rights (even if this directory is opened a second time).
* However, when enforcing this updated ruleset, the ruleset tied to
* the current process (i.e. its domain) will still only have the
* dir_s1d2 with LANDLOCK_ACCESS_FS_READ_FILE and
* LANDLOCK_ACCESS_FS_READ_DIR accesses, but
* LANDLOCK_ACCESS_FS_WRITE_FILE must not be allowed because it would
* be a privilege escalation.
*/
enforce_ruleset(_metadata, ruleset_fd);
/* Same tests and results as above. */
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* It is still forbidden to write in file1_s1d2. */
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
/* Readdir access is still allowed. */
ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
/* It is still forbidden to write in file1_s1d3. */
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
/* Readdir access is still allowed. */
ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
/*
* Try to get more privileges by adding new access rights to the parent
* directory: dir_s1d1.
*/
add_path_beneath(_metadata, ruleset_fd, ACCESS_RW, dir_s1d1);
enforce_ruleset(_metadata, ruleset_fd);
/* Same tests and results as above. */
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* It is still forbidden to write in file1_s1d2. */
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
/* Readdir access is still allowed. */
ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
/* It is still forbidden to write in file1_s1d3. */
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
/* Readdir access is still allowed. */
ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
/*
* Now, dir_s1d3 get a new rule tied to it, only allowing
* LANDLOCK_ACCESS_FS_WRITE_FILE. The (kernel internal) difference is
* that there was no rule tied to it before.
*/
add_path_beneath(_metadata, ruleset_fd, LANDLOCK_ACCESS_FS_WRITE_FILE,
dir_s1d3);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/*
* Same tests and results as above, except for open(dir_s1d3) which is
* now denied because the new rule mask the rule previously inherited
* from dir_s1d2.
*/
/* Same tests and results as above. */
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
/* It is still forbidden to write in file1_s1d2. */
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
/* Readdir access is still allowed. */
ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
/* It is still forbidden to write in file1_s1d3. */
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
/*
* Readdir of dir_s1d3 is still allowed because of the OR policy inside
* the same layer.
*/
ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
}
TEST_F_FORK(layout1, inherit_superset)
{
const struct rule rules[] = {
{
.path = dir_s1d3,
.access = ACCESS_RO,
},
{},
};
const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
/* Readdir access is denied for dir_s1d2. */
ASSERT_EQ(EACCES, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
/* Readdir access is allowed for dir_s1d3. */
ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
/* File access is allowed for file1_s1d3. */
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
/* Now dir_s1d2, parent of dir_s1d3, gets a new rule tied to it. */
add_path_beneath(_metadata, ruleset_fd,
LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_READ_DIR,
dir_s1d2);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Readdir access is still denied for dir_s1d2. */
ASSERT_EQ(EACCES, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
/* Readdir access is still allowed for dir_s1d3. */
ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
/* File access is still allowed for file1_s1d3. */
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
}
TEST_F_FORK(layout0, max_layers)
{
int i, err;
const struct rule rules[] = {
{
.path = TMP_DIR,
.access = ACCESS_RO,
},
{},
};
const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
for (i = 0; i < 16; i++)
enforce_ruleset(_metadata, ruleset_fd);
for (i = 0; i < 2; i++) {
err = landlock_restrict_self(ruleset_fd, 0);
ASSERT_EQ(-1, err);
ASSERT_EQ(E2BIG, errno);
}
ASSERT_EQ(0, close(ruleset_fd));
}
TEST_F_FORK(layout1, empty_or_same_ruleset)
{
struct landlock_ruleset_attr ruleset_attr = {};
int ruleset_fd;
/* Tests empty handled_access_fs. */
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(-1, ruleset_fd);
ASSERT_EQ(ENOMSG, errno);
/* Enforces policy which deny read access to all files. */
ruleset_attr.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE;
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
/* Nests a policy which deny read access to all directories. */
ruleset_attr.handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR;
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY));
/* Enforces a second time with the same ruleset. */
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
}
TEST_F_FORK(layout1, rule_on_mountpoint)
{
const struct rule rules[] = {
{
.path = dir_s1d1,
.access = ACCESS_RO,
},
{
/* dir_s3d2 is a mount point. */
.path = dir_s3d2,
.access = ACCESS_RO,
},
{},
};
const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
ASSERT_EQ(EACCES, test_open(dir_s2d1, O_RDONLY));
ASSERT_EQ(EACCES, test_open(dir_s3d1, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s3d2, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s3d3, O_RDONLY));
}
TEST_F_FORK(layout1, rule_over_mountpoint)
{
const struct rule rules[] = {
{
.path = dir_s1d1,
.access = ACCESS_RO,
},
{
/* dir_s3d2 is a mount point. */
.path = dir_s3d1,
.access = ACCESS_RO,
},
{},
};
const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
ASSERT_EQ(EACCES, test_open(dir_s2d1, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s3d1, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s3d2, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s3d3, O_RDONLY));
}
/*
* This test verifies that we can apply a landlock rule on the root directory
* (which might require special handling).
*/
TEST_F_FORK(layout1, rule_over_root_allow_then_deny)
{
struct rule rules[] = {
{
.path = "/",
.access = ACCESS_RO,
},
{},
};
int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks allowed access. */
ASSERT_EQ(0, test_open("/", O_RDONLY));
ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
rules[0].access = LANDLOCK_ACCESS_FS_READ_FILE;
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks denied access (on a directory). */
ASSERT_EQ(EACCES, test_open("/", O_RDONLY));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY));
}
TEST_F_FORK(layout1, rule_over_root_deny)
{
const struct rule rules[] = {
{
.path = "/",
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{},
};
const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks denied access (on a directory). */
ASSERT_EQ(EACCES, test_open("/", O_RDONLY));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY));
}
TEST_F_FORK(layout1, rule_inside_mount_ns)
{
const struct rule rules[] = {
{
.path = "s3d3",
.access = ACCESS_RO,
},
{},
};
int ruleset_fd;
set_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(0, syscall(__NR_pivot_root, dir_s3d2, dir_s3d3))
{
TH_LOG("Failed to pivot root: %s", strerror(errno));
};
ASSERT_EQ(0, chdir("/"));
clear_cap(_metadata, CAP_SYS_ADMIN);
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(0, test_open("s3d3", O_RDONLY));
ASSERT_EQ(EACCES, test_open("/", O_RDONLY));
}
TEST_F_FORK(layout1, mount_and_pivot)
{
const struct rule rules[] = {
{
.path = dir_s3d2,
.access = ACCESS_RO,
},
{},
};
const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
set_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(-1, mount(NULL, dir_s3d2, NULL, MS_RDONLY, NULL));
ASSERT_EQ(EPERM, errno);
ASSERT_EQ(-1, syscall(__NR_pivot_root, dir_s3d2, dir_s3d3));
ASSERT_EQ(EPERM, errno);
clear_cap(_metadata, CAP_SYS_ADMIN);
}
TEST_F_FORK(layout1, move_mount)
{
const struct rule rules[] = {
{
.path = dir_s3d2,
.access = ACCESS_RO,
},
{},
};
const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
set_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(0, syscall(__NR_move_mount, AT_FDCWD, dir_s3d2, AT_FDCWD,
dir_s1d2, 0))
{
TH_LOG("Failed to move mount: %s", strerror(errno));
}
ASSERT_EQ(0, syscall(__NR_move_mount, AT_FDCWD, dir_s1d2, AT_FDCWD,
dir_s3d2, 0));
clear_cap(_metadata, CAP_SYS_ADMIN);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
set_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(-1, syscall(__NR_move_mount, AT_FDCWD, dir_s3d2, AT_FDCWD,
dir_s1d2, 0));
ASSERT_EQ(EPERM, errno);
clear_cap(_metadata, CAP_SYS_ADMIN);
}
TEST_F_FORK(layout1, release_inodes)
{
const struct rule rules[] = {
{
.path = dir_s1d1,
.access = ACCESS_RO,
},
{
.path = dir_s3d2,
.access = ACCESS_RO,
},
{
.path = dir_s3d3,
.access = ACCESS_RO,
},
{},
};
const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules);
ASSERT_LE(0, ruleset_fd);
/* Unmount a file hierarchy while it is being used by a ruleset. */
set_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(0, umount(dir_s3d2));
clear_cap(_metadata, CAP_SYS_ADMIN);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY));
ASSERT_EQ(EACCES, test_open(dir_s3d2, O_RDONLY));
/* This dir_s3d3 would not be allowed and does not exist anyway. */
ASSERT_EQ(ENOENT, test_open(dir_s3d3, O_RDONLY));
}
enum relative_access {
REL_OPEN,
REL_CHDIR,
REL_CHROOT_ONLY,
REL_CHROOT_CHDIR,
};
static void test_relative_path(struct __test_metadata *const _metadata,
const enum relative_access rel)
{
/*
* Common layer to check that chroot doesn't ignore it (i.e. a chroot
* is not a disconnected root directory).
*/
const struct rule layer1_base[] = {
{
.path = TMP_DIR,
.access = ACCESS_RO,
},
{},
};
const struct rule layer2_subs[] = {
{
.path = dir_s1d2,
.access = ACCESS_RO,
},
{
.path = dir_s2d2,
.access = ACCESS_RO,
},
{},
};
int dirfd, ruleset_fd;
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1_base);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2_subs);
ASSERT_LE(0, ruleset_fd);
switch (rel) {
case REL_OPEN:
case REL_CHDIR:
break;
case REL_CHROOT_ONLY:
ASSERT_EQ(0, chdir(dir_s2d2));
break;
case REL_CHROOT_CHDIR:
ASSERT_EQ(0, chdir(dir_s1d2));
break;
default:
ASSERT_TRUE(false);
return;
}
set_cap(_metadata, CAP_SYS_CHROOT);
enforce_ruleset(_metadata, ruleset_fd);
switch (rel) {
case REL_OPEN:
dirfd = open(dir_s1d2, O_DIRECTORY);
ASSERT_LE(0, dirfd);
break;
case REL_CHDIR:
ASSERT_EQ(0, chdir(dir_s1d2));
dirfd = AT_FDCWD;
break;
case REL_CHROOT_ONLY:
/* Do chroot into dir_s1d2 (relative to dir_s2d2). */
ASSERT_EQ(0, chroot("../../s1d1/s1d2"))
{
TH_LOG("Failed to chroot: %s", strerror(errno));
}
dirfd = AT_FDCWD;
break;
case REL_CHROOT_CHDIR:
/* Do chroot into dir_s1d2. */
ASSERT_EQ(0, chroot("."))
{
TH_LOG("Failed to chroot: %s", strerror(errno));
}
dirfd = AT_FDCWD;
break;
}
ASSERT_EQ((rel == REL_CHROOT_CHDIR) ? 0 : EACCES,
test_open_rel(dirfd, "..", O_RDONLY));
ASSERT_EQ(0, test_open_rel(dirfd, ".", O_RDONLY));
if (rel == REL_CHROOT_ONLY) {
/* The current directory is dir_s2d2. */
ASSERT_EQ(0, test_open_rel(dirfd, "./s2d3", O_RDONLY));
} else {
/* The current directory is dir_s1d2. */
ASSERT_EQ(0, test_open_rel(dirfd, "./s1d3", O_RDONLY));
}
if (rel == REL_CHROOT_ONLY || rel == REL_CHROOT_CHDIR) {
/* Checks the root dir_s1d2. */
ASSERT_EQ(0, test_open_rel(dirfd, "/..", O_RDONLY));
ASSERT_EQ(0, test_open_rel(dirfd, "/", O_RDONLY));
ASSERT_EQ(0, test_open_rel(dirfd, "/f1", O_RDONLY));
ASSERT_EQ(0, test_open_rel(dirfd, "/s1d3", O_RDONLY));
}
if (rel != REL_CHROOT_CHDIR) {
ASSERT_EQ(EACCES, test_open_rel(dirfd, "../../s1d1", O_RDONLY));
ASSERT_EQ(0, test_open_rel(dirfd, "../../s1d1/s1d2", O_RDONLY));
ASSERT_EQ(0, test_open_rel(dirfd, "../../s1d1/s1d2/s1d3",
O_RDONLY));
ASSERT_EQ(EACCES, test_open_rel(dirfd, "../../s2d1", O_RDONLY));
ASSERT_EQ(0, test_open_rel(dirfd, "../../s2d1/s2d2", O_RDONLY));
ASSERT_EQ(0, test_open_rel(dirfd, "../../s2d1/s2d2/s2d3",
O_RDONLY));
}
if (rel == REL_OPEN)
ASSERT_EQ(0, close(dirfd));
ASSERT_EQ(0, close(ruleset_fd));
}
TEST_F_FORK(layout1, relative_open)
{
test_relative_path(_metadata, REL_OPEN);
}
TEST_F_FORK(layout1, relative_chdir)
{
test_relative_path(_metadata, REL_CHDIR);
}
TEST_F_FORK(layout1, relative_chroot_only)
{
test_relative_path(_metadata, REL_CHROOT_ONLY);
}
TEST_F_FORK(layout1, relative_chroot_chdir)
{
test_relative_path(_metadata, REL_CHROOT_CHDIR);
}
static void copy_binary(struct __test_metadata *const _metadata,
const char *const dst_path)
{
int dst_fd, src_fd;
struct stat statbuf;
dst_fd = open(dst_path, O_WRONLY | O_TRUNC | O_CLOEXEC);
ASSERT_LE(0, dst_fd)
{
TH_LOG("Failed to open \"%s\": %s", dst_path, strerror(errno));
}
src_fd = open(BINARY_PATH, O_RDONLY | O_CLOEXEC);
ASSERT_LE(0, src_fd)
{
TH_LOG("Failed to open \"" BINARY_PATH "\": %s",
strerror(errno));
}
ASSERT_EQ(0, fstat(src_fd, &statbuf));
ASSERT_EQ(statbuf.st_size,
sendfile(dst_fd, src_fd, 0, statbuf.st_size));
ASSERT_EQ(0, close(src_fd));
ASSERT_EQ(0, close(dst_fd));
}
static void test_execute(struct __test_metadata *const _metadata, const int err,
const char *const path)
{
int status;
char *const argv[] = { (char *)path, NULL };
const pid_t child = fork();
ASSERT_LE(0, child);
if (child == 0) {
ASSERT_EQ(err ? -1 : 0, execve(path, argv, NULL))
{
TH_LOG("Failed to execute \"%s\": %s", path,
strerror(errno));
};
ASSERT_EQ(err, errno);
_exit(_metadata->passed ? 2 : 1);
return;
}
ASSERT_EQ(child, waitpid(child, &status, 0));
ASSERT_EQ(1, WIFEXITED(status));
ASSERT_EQ(err ? 2 : 0, WEXITSTATUS(status))
{
TH_LOG("Unexpected return code for \"%s\": %s", path,
strerror(errno));
};
}
TEST_F_FORK(layout1, execute)
{
const struct rule rules[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_EXECUTE,
},
{},
};
const int ruleset_fd =
create_ruleset(_metadata, rules[0].access, rules);
ASSERT_LE(0, ruleset_fd);
copy_binary(_metadata, file1_s1d1);
copy_binary(_metadata, file1_s1d2);
copy_binary(_metadata, file1_s1d3);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY));
test_execute(_metadata, EACCES, file1_s1d1);
ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
test_execute(_metadata, 0, file1_s1d2);
ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
test_execute(_metadata, 0, file1_s1d3);
}
TEST_F_FORK(layout1, link)
{
const struct rule layer1[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_MAKE_REG,
},
{},
};
const struct rule layer2[] = {
{
.path = dir_s1d3,
.access = LANDLOCK_ACCESS_FS_REMOVE_FILE,
},
{},
};
int ruleset_fd = create_ruleset(_metadata, layer1[0].access, layer1);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(0, unlink(file1_s1d1));
ASSERT_EQ(0, unlink(file1_s1d2));
ASSERT_EQ(0, unlink(file1_s1d3));
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(-1, link(file2_s1d1, file1_s1d1));
ASSERT_EQ(EACCES, errno);
/* Denies linking because of reparenting. */
ASSERT_EQ(-1, link(file1_s2d1, file1_s1d2));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, link(file2_s1d2, file1_s1d3));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, link(file2_s1d3, file1_s1d2));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(0, link(file2_s1d2, file1_s1d2));
ASSERT_EQ(0, link(file2_s1d3, file1_s1d3));
/* Prepares for next unlinks. */
ASSERT_EQ(0, unlink(file2_s1d2));
ASSERT_EQ(0, unlink(file2_s1d3));
ruleset_fd = create_ruleset(_metadata, layer2[0].access, layer2);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks that linkind doesn't require the ability to delete a file. */
ASSERT_EQ(0, link(file1_s1d2, file2_s1d2));
ASSERT_EQ(0, link(file1_s1d3, file2_s1d3));
}
static int test_rename(const char *const oldpath, const char *const newpath)
{
if (rename(oldpath, newpath))
return errno;
return 0;
}
static int test_exchange(const char *const oldpath, const char *const newpath)
{
if (renameat2(AT_FDCWD, oldpath, AT_FDCWD, newpath, RENAME_EXCHANGE))
return errno;
return 0;
}
TEST_F_FORK(layout1, rename_file)
{
const struct rule rules[] = {
{
.path = dir_s1d3,
.access = LANDLOCK_ACCESS_FS_REMOVE_FILE,
},
{
.path = dir_s2d2,
.access = LANDLOCK_ACCESS_FS_REMOVE_FILE,
},
{},
};
const int ruleset_fd =
create_ruleset(_metadata, rules[0].access, rules);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(0, unlink(file1_s1d2));
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/*
* Tries to replace a file, from a directory that allows file removal,
* but to a different directory (which also allows file removal).
*/
ASSERT_EQ(-1, rename(file1_s2d3, file1_s1d3));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d3, AT_FDCWD, file1_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d3, AT_FDCWD, dir_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(EXDEV, errno);
/*
* Tries to replace a file, from a directory that denies file removal,
* to a different directory (which allows file removal).
*/
ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d3));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, file1_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d2, AT_FDCWD, file1_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(EXDEV, errno);
/* Exchanges files and directories that partially allow removal. */
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d2, AT_FDCWD, file1_s2d1,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* Checks that file1_s2d1 cannot be removed (instead of ENOTDIR). */
ASSERT_EQ(-1, rename(dir_s2d2, file1_s2d1));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, dir_s2d2,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* Checks that file1_s1d1 cannot be removed (instead of EISDIR). */
ASSERT_EQ(-1, rename(file1_s1d1, dir_s1d2));
ASSERT_EQ(EACCES, errno);
/* Renames files with different parents. */
ASSERT_EQ(-1, rename(file1_s2d2, file1_s1d2));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(0, unlink(file1_s1d3));
ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d3));
ASSERT_EQ(EACCES, errno);
/* Exchanges and renames files with same parent. */
ASSERT_EQ(0, renameat2(AT_FDCWD, file2_s2d3, AT_FDCWD, file1_s2d3,
RENAME_EXCHANGE));
ASSERT_EQ(0, rename(file2_s2d3, file1_s2d3));
/* Exchanges files and directories with same parent, twice. */
ASSERT_EQ(0, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s2d3,
RENAME_EXCHANGE));
ASSERT_EQ(0, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s2d3,
RENAME_EXCHANGE));
}
TEST_F_FORK(layout1, rename_dir)
{
const struct rule rules[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_REMOVE_DIR,
},
{
.path = dir_s2d1,
.access = LANDLOCK_ACCESS_FS_REMOVE_DIR,
},
{},
};
const int ruleset_fd =
create_ruleset(_metadata, rules[0].access, rules);
ASSERT_LE(0, ruleset_fd);
/* Empties dir_s1d3 to allow renaming. */
ASSERT_EQ(0, unlink(file1_s1d3));
ASSERT_EQ(0, unlink(file2_s1d3));
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Exchanges and renames directory to a different parent. */
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d3, AT_FDCWD, dir_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, rename(dir_s2d3, dir_s1d3));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(EXDEV, errno);
/*
* Exchanges directory to the same parent, which doesn't allow
* directory removal.
*/
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s1d1, AT_FDCWD, dir_s2d1,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* Checks that dir_s1d2 cannot be removed (instead of ENOTDIR). */
ASSERT_EQ(-1, rename(dir_s1d2, file1_s1d1));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, dir_s1d2,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* Checks that dir_s1d2 cannot be removed (instead of EISDIR). */
ASSERT_EQ(-1, rename(file1_s1d1, dir_s1d2));
ASSERT_EQ(EACCES, errno);
/*
* Exchanges and renames directory to the same parent, which allows
* directory removal.
*/
ASSERT_EQ(0, renameat2(AT_FDCWD, dir_s1d3, AT_FDCWD, file1_s1d2,
RENAME_EXCHANGE));
ASSERT_EQ(0, unlink(dir_s1d3));
ASSERT_EQ(0, mkdir(dir_s1d3, 0700));
ASSERT_EQ(0, rename(file1_s1d2, dir_s1d3));
ASSERT_EQ(0, rmdir(dir_s1d3));
}
TEST_F_FORK(layout1, reparent_refer)
{
const struct rule layer1[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_REFER,
},
{
.path = dir_s2d2,
.access = LANDLOCK_ACCESS_FS_REFER,
},
{},
};
int ruleset_fd =
create_ruleset(_metadata, LANDLOCK_ACCESS_FS_REFER, layer1);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(-1, rename(dir_s1d2, dir_s2d1));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, rename(dir_s1d2, dir_s2d2));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, rename(dir_s1d2, dir_s2d3));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, rename(dir_s1d3, dir_s2d1));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, rename(dir_s1d3, dir_s2d2));
ASSERT_EQ(EXDEV, errno);
/*
* Moving should only be allowed when the source and the destination
* parent directory have REFER.
*/
ASSERT_EQ(-1, rename(dir_s1d3, dir_s2d3));
ASSERT_EQ(ENOTEMPTY, errno);
ASSERT_EQ(0, unlink(file1_s2d3));
ASSERT_EQ(0, unlink(file2_s2d3));
ASSERT_EQ(0, rename(dir_s1d3, dir_s2d3));
}
/* Checks renames beneath dir_s1d1. */
static void refer_denied_by_default(struct __test_metadata *const _metadata,
const struct rule layer1[],
const int layer1_err,
const struct rule layer2[])
{
int ruleset_fd;
ASSERT_EQ(0, unlink(file1_s1d2));
ruleset_fd = create_ruleset(_metadata, layer1[0].access, layer1);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/*
* If the first layer handles LANDLOCK_ACCESS_FS_REFER (according to
* layer1_err), then it allows some different-parent renames and links.
*/
ASSERT_EQ(layer1_err, test_rename(file1_s1d1, file1_s1d2));
if (layer1_err == 0)
ASSERT_EQ(layer1_err, test_rename(file1_s1d2, file1_s1d1));
ASSERT_EQ(layer1_err, test_exchange(file2_s1d1, file2_s1d2));
ASSERT_EQ(layer1_err, test_exchange(file2_s1d2, file2_s1d1));
ruleset_fd = create_ruleset(_metadata, layer2[0].access, layer2);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/*
* Now, either the first or the second layer does not handle
* LANDLOCK_ACCESS_FS_REFER, which means that any different-parent
* renames and links are denied, thus making the layer handling
* LANDLOCK_ACCESS_FS_REFER null and void.
*/
ASSERT_EQ(EXDEV, test_rename(file1_s1d1, file1_s1d2));
ASSERT_EQ(EXDEV, test_exchange(file2_s1d1, file2_s1d2));
ASSERT_EQ(EXDEV, test_exchange(file2_s1d2, file2_s1d1));
}
const struct rule layer_dir_s1d1_refer[] = {
{
.path = dir_s1d1,
.access = LANDLOCK_ACCESS_FS_REFER,
},
{},
};
const struct rule layer_dir_s1d1_execute[] = {
{
/* Matches a parent directory. */
.path = dir_s1d1,
.access = LANDLOCK_ACCESS_FS_EXECUTE,
},
{},
};
const struct rule layer_dir_s2d1_execute[] = {
{
/* Does not match a parent directory. */
.path = dir_s2d1,
.access = LANDLOCK_ACCESS_FS_EXECUTE,
},
{},
};
/*
* Tests precedence over renames: denied by default for different parent
* directories, *with* a rule matching a parent directory, but not directly
* denying access (with MAKE_REG nor REMOVE).
*/
TEST_F_FORK(layout1, refer_denied_by_default1)
{
refer_denied_by_default(_metadata, layer_dir_s1d1_refer, 0,
layer_dir_s1d1_execute);
}
/*
* Same test but this time turning around the ABI version order: the first
* layer does not handle LANDLOCK_ACCESS_FS_REFER.
*/
TEST_F_FORK(layout1, refer_denied_by_default2)
{
refer_denied_by_default(_metadata, layer_dir_s1d1_execute, EXDEV,
layer_dir_s1d1_refer);
}
/*
* Tests precedence over renames: denied by default for different parent
* directories, *without* a rule matching a parent directory, but not directly
* denying access (with MAKE_REG nor REMOVE).
*/
TEST_F_FORK(layout1, refer_denied_by_default3)
{
refer_denied_by_default(_metadata, layer_dir_s1d1_refer, 0,
layer_dir_s2d1_execute);
}
/*
* Same test but this time turning around the ABI version order: the first
* layer does not handle LANDLOCK_ACCESS_FS_REFER.
*/
TEST_F_FORK(layout1, refer_denied_by_default4)
{
refer_denied_by_default(_metadata, layer_dir_s2d1_execute, EXDEV,
layer_dir_s1d1_refer);
}
TEST_F_FORK(layout1, reparent_link)
{
const struct rule layer1[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_MAKE_REG,
},
{
.path = dir_s1d3,
.access = LANDLOCK_ACCESS_FS_REFER,
},
{
.path = dir_s2d2,
.access = LANDLOCK_ACCESS_FS_REFER,
},
{
.path = dir_s2d3,
.access = LANDLOCK_ACCESS_FS_MAKE_REG,
},
{},
};
const int ruleset_fd = create_ruleset(
_metadata,
LANDLOCK_ACCESS_FS_MAKE_REG | LANDLOCK_ACCESS_FS_REFER, layer1);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(0, unlink(file1_s1d1));
ASSERT_EQ(0, unlink(file1_s1d2));
ASSERT_EQ(0, unlink(file1_s1d3));
/* Denies linking because of missing MAKE_REG. */
ASSERT_EQ(-1, link(file2_s1d1, file1_s1d1));
ASSERT_EQ(EACCES, errno);
/* Denies linking because of missing source and destination REFER. */
ASSERT_EQ(-1, link(file1_s2d1, file1_s1d2));
ASSERT_EQ(EXDEV, errno);
/* Denies linking because of missing source REFER. */
ASSERT_EQ(-1, link(file1_s2d1, file1_s1d3));
ASSERT_EQ(EXDEV, errno);
/* Denies linking because of missing MAKE_REG. */
ASSERT_EQ(-1, link(file1_s2d2, file1_s1d1));
ASSERT_EQ(EACCES, errno);
/* Denies linking because of missing destination REFER. */
ASSERT_EQ(-1, link(file1_s2d2, file1_s1d2));
ASSERT_EQ(EXDEV, errno);
/* Allows linking because of REFER and MAKE_REG. */
ASSERT_EQ(0, link(file1_s2d2, file1_s1d3));
ASSERT_EQ(0, unlink(file1_s2d2));
/* Reverse linking denied because of missing MAKE_REG. */
ASSERT_EQ(-1, link(file1_s1d3, file1_s2d2));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(0, unlink(file1_s2d3));
/* Checks reverse linking. */
ASSERT_EQ(0, link(file1_s1d3, file1_s2d3));
ASSERT_EQ(0, unlink(file1_s1d3));
/*
* This is OK for a file link, but it should not be allowed for a
* directory rename (because of the superset of access rights.
*/
ASSERT_EQ(0, link(file1_s2d3, file1_s1d3));
ASSERT_EQ(0, unlink(file1_s1d3));
ASSERT_EQ(-1, link(file2_s1d2, file1_s1d3));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, link(file2_s1d3, file1_s1d2));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(0, link(file2_s1d2, file1_s1d2));
ASSERT_EQ(0, link(file2_s1d3, file1_s1d3));
}
TEST_F_FORK(layout1, reparent_rename)
{
/* Same rules as for reparent_link. */
const struct rule layer1[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_MAKE_REG,
},
{
.path = dir_s1d3,
.access = LANDLOCK_ACCESS_FS_REFER,
},
{
.path = dir_s2d2,
.access = LANDLOCK_ACCESS_FS_REFER,
},
{
.path = dir_s2d3,
.access = LANDLOCK_ACCESS_FS_MAKE_REG,
},
{},
};
const int ruleset_fd = create_ruleset(
_metadata,
LANDLOCK_ACCESS_FS_MAKE_REG | LANDLOCK_ACCESS_FS_REFER, layer1);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(0, unlink(file1_s1d2));
ASSERT_EQ(0, unlink(file1_s1d3));
/* Denies renaming because of missing MAKE_REG. */
ASSERT_EQ(-1, renameat2(AT_FDCWD, file2_s1d1, AT_FDCWD, file1_s1d1,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, file2_s1d1,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(0, unlink(file1_s1d1));
ASSERT_EQ(-1, rename(file2_s1d1, file1_s1d1));
ASSERT_EQ(EACCES, errno);
/* Even denies same file exchange. */
ASSERT_EQ(-1, renameat2(AT_FDCWD, file2_s1d1, AT_FDCWD, file2_s1d1,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* Denies renaming because of missing source and destination REFER. */
ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d2));
ASSERT_EQ(EXDEV, errno);
/*
* Denies renaming because of missing MAKE_REG, source and destination
* REFER.
*/
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, file2_s1d1,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file2_s1d1, AT_FDCWD, file1_s2d1,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* Denies renaming because of missing source REFER. */
ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d3));
ASSERT_EQ(EXDEV, errno);
/* Denies renaming because of missing MAKE_REG. */
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, file2_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* Denies renaming because of missing MAKE_REG. */
ASSERT_EQ(-1, rename(file1_s2d2, file1_s1d1));
ASSERT_EQ(EACCES, errno);
/* Denies renaming because of missing destination REFER*/
ASSERT_EQ(-1, rename(file1_s2d2, file1_s1d2));
ASSERT_EQ(EXDEV, errno);
/* Denies exchange because of one missing MAKE_REG. */
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, file2_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* Allows renaming because of REFER and MAKE_REG. */
ASSERT_EQ(0, rename(file1_s2d2, file1_s1d3));
/* Reverse renaming denied because of missing MAKE_REG. */
ASSERT_EQ(-1, rename(file1_s1d3, file1_s2d2));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(0, unlink(file1_s2d3));
ASSERT_EQ(0, rename(file1_s1d3, file1_s2d3));
/* Tests reverse renaming. */
ASSERT_EQ(0, rename(file1_s2d3, file1_s1d3));
ASSERT_EQ(0, renameat2(AT_FDCWD, file2_s2d3, AT_FDCWD, file1_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(0, rename(file1_s1d3, file1_s2d3));
/*
* This is OK for a file rename, but it should not be allowed for a
* directory rename (because of the superset of access rights).
*/
ASSERT_EQ(0, rename(file1_s2d3, file1_s1d3));
ASSERT_EQ(0, rename(file1_s1d3, file1_s2d3));
/*
* Tests superset restrictions applied to directories. Not only the
* dir_s2d3's parent (dir_s2d2) should be taken into account but also
* access rights tied to dir_s2d3. dir_s2d2 is missing one access right
* compared to dir_s1d3/file1_s1d3 (MAKE_REG) but it is provided
* directly by the moved dir_s2d3.
*/
ASSERT_EQ(0, rename(dir_s2d3, file1_s1d3));
ASSERT_EQ(0, rename(file1_s1d3, dir_s2d3));
/*
* The first rename is allowed but not the exchange because dir_s1d3's
* parent (dir_s1d2) doesn't have REFER.
*/
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d3, AT_FDCWD, dir_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s1d3, AT_FDCWD, file1_s2d3,
RENAME_EXCHANGE));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, rename(file1_s2d3, dir_s1d3));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, rename(file2_s1d2, file1_s1d3));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, rename(file2_s1d3, file1_s1d2));
ASSERT_EQ(EXDEV, errno);
/* Renaming in the same directory is always allowed. */
ASSERT_EQ(0, rename(file2_s1d2, file1_s1d2));
ASSERT_EQ(0, rename(file2_s1d3, file1_s1d3));
ASSERT_EQ(0, unlink(file1_s1d2));
/* Denies because of missing source MAKE_REG and destination REFER. */
ASSERT_EQ(-1, rename(dir_s2d3, file1_s1d2));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(0, unlink(file1_s1d3));
/* Denies because of missing source MAKE_REG and REFER. */
ASSERT_EQ(-1, rename(dir_s2d2, file1_s1d3));
ASSERT_EQ(EXDEV, errno);
}
static void
reparent_exdev_layers_enforce1(struct __test_metadata *const _metadata)
{
const struct rule layer1[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_REFER,
},
{
/* Interesting for the layer2 tests. */
.path = dir_s1d3,
.access = LANDLOCK_ACCESS_FS_MAKE_REG,
},
{
.path = dir_s2d2,
.access = LANDLOCK_ACCESS_FS_REFER,
},
{
.path = dir_s2d3,
.access = LANDLOCK_ACCESS_FS_MAKE_REG,
},
{},
};
const int ruleset_fd = create_ruleset(
_metadata,
LANDLOCK_ACCESS_FS_MAKE_REG | LANDLOCK_ACCESS_FS_REFER, layer1);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
}
static void
reparent_exdev_layers_enforce2(struct __test_metadata *const _metadata)
{
const struct rule layer2[] = {
{
.path = dir_s2d3,
.access = LANDLOCK_ACCESS_FS_MAKE_DIR,
},
{},
};
/*
* Same checks as before but with a second layer and a new MAKE_DIR
* rule (and no explicit handling of REFER).
*/
const int ruleset_fd =
create_ruleset(_metadata, LANDLOCK_ACCESS_FS_MAKE_DIR, layer2);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
}
TEST_F_FORK(layout1, reparent_exdev_layers_rename1)
{
ASSERT_EQ(0, unlink(file1_s2d2));
ASSERT_EQ(0, unlink(file1_s2d3));
reparent_exdev_layers_enforce1(_metadata);
/*
* Moving the dir_s1d3 directory below dir_s2d2 is allowed by Landlock
* because it doesn't inherit new access rights.
*/
ASSERT_EQ(0, rename(dir_s1d3, file1_s2d2));
ASSERT_EQ(0, rename(file1_s2d2, dir_s1d3));
/*
* Moving the dir_s1d3 directory below dir_s2d3 is allowed, even if it
* gets a new inherited access rights (MAKE_REG), because MAKE_REG is
* already allowed for dir_s1d3.
*/
ASSERT_EQ(0, rename(dir_s1d3, file1_s2d3));
ASSERT_EQ(0, rename(file1_s2d3, dir_s1d3));
/*
* However, moving the file1_s1d3 file below dir_s2d3 is allowed
* because it cannot inherit MAKE_REG right (which is dedicated to
* directories).
*/
ASSERT_EQ(0, rename(file1_s1d3, file1_s2d3));
reparent_exdev_layers_enforce2(_metadata);
/*
* Moving the dir_s1d3 directory below dir_s2d2 is now denied because
* MAKE_DIR is not tied to dir_s2d2.
*/
ASSERT_EQ(-1, rename(dir_s1d3, file1_s2d2));
ASSERT_EQ(EACCES, errno);
/*
* Moving the dir_s1d3 directory below dir_s2d3 is forbidden because it
* would grants MAKE_REG and MAKE_DIR rights to it.
*/
ASSERT_EQ(-1, rename(dir_s1d3, file1_s2d3));
ASSERT_EQ(EXDEV, errno);
/*
* Moving the file2_s1d3 file below dir_s2d3 is denied because the
* second layer does not handle REFER, which is always denied by
* default.
*/
ASSERT_EQ(-1, rename(file2_s1d3, file1_s2d3));
ASSERT_EQ(EXDEV, errno);
}
TEST_F_FORK(layout1, reparent_exdev_layers_rename2)
{
reparent_exdev_layers_enforce1(_metadata);
/* Checks EACCES predominance over EXDEV. */
ASSERT_EQ(-1, rename(file1_s1d1, file1_s2d2));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, rename(file1_s1d2, file1_s2d2));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, rename(file1_s1d1, file1_s2d3));
ASSERT_EQ(EXDEV, errno);
/* Modify layout! */
ASSERT_EQ(0, rename(file1_s1d2, file1_s2d3));
/* Without REFER source. */
ASSERT_EQ(-1, rename(dir_s1d1, file1_s2d2));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, rename(dir_s1d2, file1_s2d2));
ASSERT_EQ(EXDEV, errno);
reparent_exdev_layers_enforce2(_metadata);
/* Checks EACCES predominance over EXDEV. */
ASSERT_EQ(-1, rename(file1_s1d1, file1_s2d2));
ASSERT_EQ(EACCES, errno);
/* Checks with actual file2_s1d2. */
ASSERT_EQ(-1, rename(file2_s1d2, file1_s2d2));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, rename(file1_s1d1, file1_s2d3));
ASSERT_EQ(EXDEV, errno);
/*
* Modifying the layout is now denied because the second layer does not
* handle REFER, which is always denied by default.
*/
ASSERT_EQ(-1, rename(file2_s1d2, file1_s2d3));
ASSERT_EQ(EXDEV, errno);
/* Without REFER source, EACCES wins over EXDEV. */
ASSERT_EQ(-1, rename(dir_s1d1, file1_s2d2));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, rename(dir_s1d2, file1_s2d2));
ASSERT_EQ(EACCES, errno);
}
TEST_F_FORK(layout1, reparent_exdev_layers_exchange1)
{
const char *const dir_file1_s1d2 = file1_s1d2, *const dir_file2_s2d3 =
file2_s2d3;
ASSERT_EQ(0, unlink(file1_s1d2));
ASSERT_EQ(0, mkdir(file1_s1d2, 0700));
ASSERT_EQ(0, unlink(file2_s2d3));
ASSERT_EQ(0, mkdir(file2_s2d3, 0700));
reparent_exdev_layers_enforce1(_metadata);
/* Error predominance with file exchange: returns EXDEV and EACCES. */
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, file1_s2d3,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d3, AT_FDCWD, file1_s1d1,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/*
* Checks with directories which creation could be allowed, but denied
* because of access rights that would be inherited.
*/
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file1_s1d2, AT_FDCWD,
dir_file2_s2d3, RENAME_EXCHANGE));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file2_s2d3, AT_FDCWD,
dir_file1_s1d2, RENAME_EXCHANGE));
ASSERT_EQ(EXDEV, errno);
/* Checks with same access rights. */
ASSERT_EQ(0, renameat2(AT_FDCWD, dir_s1d3, AT_FDCWD, dir_s2d3,
RENAME_EXCHANGE));
ASSERT_EQ(0, renameat2(AT_FDCWD, dir_s2d3, AT_FDCWD, dir_s1d3,
RENAME_EXCHANGE));
/* Checks with different (child-only) access rights. */
ASSERT_EQ(0, renameat2(AT_FDCWD, dir_s2d3, AT_FDCWD, dir_file1_s1d2,
RENAME_EXCHANGE));
ASSERT_EQ(0, renameat2(AT_FDCWD, dir_file1_s1d2, AT_FDCWD, dir_s2d3,
RENAME_EXCHANGE));
/*
* Checks that exchange between file and directory are consistent.
*
* Moving a file (file1_s2d2) to a directory which only grants more
* directory-related access rights is allowed, and at the same time
* moving a directory (dir_file2_s2d3) to another directory which
* grants less access rights is allowed too.
*
* See layout1.reparent_exdev_layers_exchange3 for inverted arguments.
*/
ASSERT_EQ(0, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_file2_s2d3,
RENAME_EXCHANGE));
/*
* However, moving back the directory is denied because it would get
* more access rights than the current state and because file creation
* is forbidden (in dir_s2d2).
*/
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file2_s2d3, AT_FDCWD, file1_s2d2,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_file2_s2d3,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
reparent_exdev_layers_enforce2(_metadata);
/* Error predominance with file exchange: returns EXDEV and EACCES. */
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, file1_s2d3,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d3, AT_FDCWD, file1_s1d1,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* Checks with directories which creation is now denied. */
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file1_s1d2, AT_FDCWD,
dir_file2_s2d3, RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file2_s2d3, AT_FDCWD,
dir_file1_s1d2, RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* Checks with different (child-only) access rights. */
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s1d3, AT_FDCWD, dir_s2d3,
RENAME_EXCHANGE));
/* Denied because of MAKE_DIR. */
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d3, AT_FDCWD, dir_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* Checks with different (child-only) access rights. */
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d3, AT_FDCWD, dir_file1_s1d2,
RENAME_EXCHANGE));
/* Denied because of MAKE_DIR. */
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file1_s1d2, AT_FDCWD, dir_s2d3,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* See layout1.reparent_exdev_layers_exchange2 for complement. */
}
TEST_F_FORK(layout1, reparent_exdev_layers_exchange2)
{
const char *const dir_file2_s2d3 = file2_s2d3;
ASSERT_EQ(0, unlink(file2_s2d3));
ASSERT_EQ(0, mkdir(file2_s2d3, 0700));
reparent_exdev_layers_enforce1(_metadata);
reparent_exdev_layers_enforce2(_metadata);
/* Checks that exchange between file and directory are consistent. */
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_file2_s2d3,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file2_s2d3, AT_FDCWD, file1_s2d2,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
}
TEST_F_FORK(layout1, reparent_exdev_layers_exchange3)
{
const char *const dir_file2_s2d3 = file2_s2d3;
ASSERT_EQ(0, unlink(file2_s2d3));
ASSERT_EQ(0, mkdir(file2_s2d3, 0700));
reparent_exdev_layers_enforce1(_metadata);
/*
* Checks that exchange between file and directory are consistent,
* including with inverted arguments (see
* layout1.reparent_exdev_layers_exchange1).
*/
ASSERT_EQ(0, renameat2(AT_FDCWD, dir_file2_s2d3, AT_FDCWD, file1_s2d2,
RENAME_EXCHANGE));
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_file2_s2d3,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_file2_s2d3, AT_FDCWD, file1_s2d2,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
}
TEST_F_FORK(layout1, reparent_remove)
{
const struct rule layer1[] = {
{
.path = dir_s1d1,
.access = LANDLOCK_ACCESS_FS_REFER |
LANDLOCK_ACCESS_FS_REMOVE_DIR,
},
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_REMOVE_FILE,
},
{
.path = dir_s2d1,
.access = LANDLOCK_ACCESS_FS_REFER |
LANDLOCK_ACCESS_FS_REMOVE_FILE,
},
{},
};
const int ruleset_fd = create_ruleset(
_metadata,
LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_REMOVE_DIR |
LANDLOCK_ACCESS_FS_REMOVE_FILE,
layer1);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Access denied because of wrong/swapped remove file/dir. */
ASSERT_EQ(-1, rename(file1_s1d1, dir_s2d2));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, rename(dir_s2d2, file1_s1d1));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, dir_s2d2,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, dir_s2d3,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
/* Access allowed thanks to the matching rights. */
ASSERT_EQ(-1, rename(file1_s2d1, dir_s1d2));
ASSERT_EQ(EISDIR, errno);
ASSERT_EQ(-1, rename(dir_s1d2, file1_s2d1));
ASSERT_EQ(ENOTDIR, errno);
ASSERT_EQ(-1, rename(dir_s1d3, file1_s2d1));
ASSERT_EQ(ENOTDIR, errno);
ASSERT_EQ(0, unlink(file1_s2d1));
ASSERT_EQ(0, unlink(file1_s1d3));
ASSERT_EQ(0, unlink(file2_s1d3));
ASSERT_EQ(0, rename(dir_s1d3, file1_s2d1));
/* Effectively removes a file and a directory by exchanging them. */
ASSERT_EQ(0, mkdir(dir_s1d3, 0700));
ASSERT_EQ(0, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(EACCES, errno);
}
TEST_F_FORK(layout1, reparent_dom_superset)
{
const struct rule layer1[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_REFER,
},
{
.path = file1_s1d2,
.access = LANDLOCK_ACCESS_FS_EXECUTE,
},
{
.path = dir_s1d3,
.access = LANDLOCK_ACCESS_FS_MAKE_SOCK |
LANDLOCK_ACCESS_FS_EXECUTE,
},
{
.path = dir_s2d2,
.access = LANDLOCK_ACCESS_FS_REFER |
LANDLOCK_ACCESS_FS_EXECUTE |
LANDLOCK_ACCESS_FS_MAKE_SOCK,
},
{
.path = dir_s2d3,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_MAKE_FIFO,
},
{},
};
int ruleset_fd = create_ruleset(_metadata,
LANDLOCK_ACCESS_FS_REFER |
LANDLOCK_ACCESS_FS_EXECUTE |
LANDLOCK_ACCESS_FS_MAKE_SOCK |
LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_MAKE_FIFO,
layer1);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(-1, rename(file1_s1d2, file1_s2d1));
ASSERT_EQ(EXDEV, errno);
/*
* Moving file1_s1d2 beneath dir_s2d3 would grant it the READ_FILE
* access right.
*/
ASSERT_EQ(-1, rename(file1_s1d2, file1_s2d3));
ASSERT_EQ(EXDEV, errno);
/*
* Moving file1_s1d2 should be allowed even if dir_s2d2 grants a
* superset of access rights compared to dir_s1d2, because file1_s1d2
* already has these access rights anyway.
*/
ASSERT_EQ(0, rename(file1_s1d2, file1_s2d2));
ASSERT_EQ(0, rename(file1_s2d2, file1_s1d2));
ASSERT_EQ(-1, rename(dir_s1d3, file1_s2d1));
ASSERT_EQ(EXDEV, errno);
/*
* Moving dir_s1d3 beneath dir_s2d3 would grant it the MAKE_FIFO access
* right.
*/
ASSERT_EQ(-1, rename(dir_s1d3, file1_s2d3));
ASSERT_EQ(EXDEV, errno);
/*
* Moving dir_s1d3 should be allowed even if dir_s2d2 grants a superset
* of access rights compared to dir_s1d2, because dir_s1d3 already has
* these access rights anyway.
*/
ASSERT_EQ(0, rename(dir_s1d3, file1_s2d2));
ASSERT_EQ(0, rename(file1_s2d2, dir_s1d3));
/*
* Moving file1_s2d3 beneath dir_s1d2 is allowed, but moving it back
* will be denied because the new inherited access rights from dir_s1d2
* will be less than the destination (original) dir_s2d3. This is a
* sinkhole scenario where we cannot move back files or directories.
*/
ASSERT_EQ(0, rename(file1_s2d3, file2_s1d2));
ASSERT_EQ(-1, rename(file2_s1d2, file1_s2d3));
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(0, unlink(file2_s1d2));
ASSERT_EQ(0, unlink(file2_s2d3));
/*
* Checks similar directory one-way move: dir_s2d3 loses EXECUTE and
* MAKE_SOCK which were inherited from dir_s1d3.
*/
ASSERT_EQ(0, rename(dir_s2d3, file2_s1d2));
ASSERT_EQ(-1, rename(file2_s1d2, dir_s2d3));
ASSERT_EQ(EXDEV, errno);
}
TEST_F_FORK(layout1, remove_dir)
{
const struct rule rules[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_REMOVE_DIR,
},
{},
};
const int ruleset_fd =
create_ruleset(_metadata, rules[0].access, rules);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(0, unlink(file1_s1d1));
ASSERT_EQ(0, unlink(file1_s1d2));
ASSERT_EQ(0, unlink(file1_s1d3));
ASSERT_EQ(0, unlink(file2_s1d3));
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(0, rmdir(dir_s1d3));
ASSERT_EQ(0, mkdir(dir_s1d3, 0700));
ASSERT_EQ(0, unlinkat(AT_FDCWD, dir_s1d3, AT_REMOVEDIR));
/* dir_s1d2 itself cannot be removed. */
ASSERT_EQ(-1, rmdir(dir_s1d2));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, unlinkat(AT_FDCWD, dir_s1d2, AT_REMOVEDIR));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, rmdir(dir_s1d1));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, unlinkat(AT_FDCWD, dir_s1d1, AT_REMOVEDIR));
ASSERT_EQ(EACCES, errno);
}
TEST_F_FORK(layout1, remove_file)
{
const struct rule rules[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_REMOVE_FILE,
},
{},
};
const int ruleset_fd =
create_ruleset(_metadata, rules[0].access, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(-1, unlink(file1_s1d1));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, unlinkat(AT_FDCWD, file1_s1d1, 0));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(0, unlink(file1_s1d2));
ASSERT_EQ(0, unlinkat(AT_FDCWD, file1_s1d3, 0));
}
static void test_make_file(struct __test_metadata *const _metadata,
const __u64 access, const mode_t mode,
const dev_t dev)
{
const struct rule rules[] = {
{
.path = dir_s1d2,
.access = access,
},
{},
};
const int ruleset_fd = create_ruleset(_metadata, access, rules);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(0, unlink(file1_s1d1));
ASSERT_EQ(0, unlink(file2_s1d1));
ASSERT_EQ(0, mknod(file2_s1d1, mode | 0400, dev))
{
TH_LOG("Failed to make file \"%s\": %s", file2_s1d1,
strerror(errno));
};
ASSERT_EQ(0, unlink(file1_s1d2));
ASSERT_EQ(0, unlink(file2_s1d2));
ASSERT_EQ(0, unlink(file1_s1d3));
ASSERT_EQ(0, unlink(file2_s1d3));
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(-1, mknod(file1_s1d1, mode | 0400, dev));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, link(file2_s1d1, file1_s1d1));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, rename(file2_s1d1, file1_s1d1));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(0, mknod(file1_s1d2, mode | 0400, dev))
{
TH_LOG("Failed to make file \"%s\": %s", file1_s1d2,
strerror(errno));
};
ASSERT_EQ(0, link(file1_s1d2, file2_s1d2));
ASSERT_EQ(0, unlink(file2_s1d2));
ASSERT_EQ(0, rename(file1_s1d2, file2_s1d2));
ASSERT_EQ(0, mknod(file1_s1d3, mode | 0400, dev));
ASSERT_EQ(0, link(file1_s1d3, file2_s1d3));
ASSERT_EQ(0, unlink(file2_s1d3));
ASSERT_EQ(0, rename(file1_s1d3, file2_s1d3));
}
TEST_F_FORK(layout1, make_char)
{
/* Creates a /dev/null device. */
set_cap(_metadata, CAP_MKNOD);
test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_CHAR, S_IFCHR,
makedev(1, 3));
}
TEST_F_FORK(layout1, make_block)
{
/* Creates a /dev/loop0 device. */
set_cap(_metadata, CAP_MKNOD);
test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_BLOCK, S_IFBLK,
makedev(7, 0));
}
TEST_F_FORK(layout1, make_reg_1)
{
test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_REG, S_IFREG, 0);
}
TEST_F_FORK(layout1, make_reg_2)
{
test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_REG, 0, 0);
}
TEST_F_FORK(layout1, make_sock)
{
test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_SOCK, S_IFSOCK, 0);
}
TEST_F_FORK(layout1, make_fifo)
{
test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_FIFO, S_IFIFO, 0);
}
TEST_F_FORK(layout1, make_sym)
{
const struct rule rules[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_MAKE_SYM,
},
{},
};
const int ruleset_fd =
create_ruleset(_metadata, rules[0].access, rules);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(0, unlink(file1_s1d1));
ASSERT_EQ(0, unlink(file2_s1d1));
ASSERT_EQ(0, symlink("none", file2_s1d1));
ASSERT_EQ(0, unlink(file1_s1d2));
ASSERT_EQ(0, unlink(file2_s1d2));
ASSERT_EQ(0, unlink(file1_s1d3));
ASSERT_EQ(0, unlink(file2_s1d3));
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(-1, symlink("none", file1_s1d1));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, link(file2_s1d1, file1_s1d1));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, rename(file2_s1d1, file1_s1d1));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(0, symlink("none", file1_s1d2));
ASSERT_EQ(0, link(file1_s1d2, file2_s1d2));
ASSERT_EQ(0, unlink(file2_s1d2));
ASSERT_EQ(0, rename(file1_s1d2, file2_s1d2));
ASSERT_EQ(0, symlink("none", file1_s1d3));
ASSERT_EQ(0, link(file1_s1d3, file2_s1d3));
ASSERT_EQ(0, unlink(file2_s1d3));
ASSERT_EQ(0, rename(file1_s1d3, file2_s1d3));
}
TEST_F_FORK(layout1, make_dir)
{
const struct rule rules[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_MAKE_DIR,
},
{},
};
const int ruleset_fd =
create_ruleset(_metadata, rules[0].access, rules);
ASSERT_LE(0, ruleset_fd);
ASSERT_EQ(0, unlink(file1_s1d1));
ASSERT_EQ(0, unlink(file1_s1d2));
ASSERT_EQ(0, unlink(file1_s1d3));
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Uses file_* as directory names. */
ASSERT_EQ(-1, mkdir(file1_s1d1, 0700));
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(0, mkdir(file1_s1d2, 0700));
ASSERT_EQ(0, mkdir(file1_s1d3, 0700));
}
static int open_proc_fd(struct __test_metadata *const _metadata, const int fd,
const int open_flags)
{
static const char path_template[] = "/proc/self/fd/%d";
char procfd_path[sizeof(path_template) + 10];
const int procfd_path_size =
snprintf(procfd_path, sizeof(procfd_path), path_template, fd);
ASSERT_LT(procfd_path_size, sizeof(procfd_path));
return open(procfd_path, open_flags);
}
TEST_F_FORK(layout1, proc_unlinked_file)
{
const struct rule rules[] = {
{
.path = file1_s1d2,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{},
};
int reg_fd, proc_fd;
const int ruleset_fd = create_ruleset(
_metadata,
LANDLOCK_ACCESS_FS_READ_FILE | LANDLOCK_ACCESS_FS_WRITE_FILE,
rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR));
ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
reg_fd = open(file1_s1d2, O_RDONLY | O_CLOEXEC);
ASSERT_LE(0, reg_fd);
ASSERT_EQ(0, unlink(file1_s1d2));
proc_fd = open_proc_fd(_metadata, reg_fd, O_RDONLY | O_CLOEXEC);
ASSERT_LE(0, proc_fd);
ASSERT_EQ(0, close(proc_fd));
proc_fd = open_proc_fd(_metadata, reg_fd, O_RDWR | O_CLOEXEC);
ASSERT_EQ(-1, proc_fd)
{
TH_LOG("Successfully opened /proc/self/fd/%d: %s", reg_fd,
strerror(errno));
}
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(0, close(reg_fd));
}
TEST_F_FORK(layout1, proc_pipe)
{
int proc_fd;
int pipe_fds[2];
char buf = '\0';
const struct rule rules[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
/* Limits read and write access to files tied to the filesystem. */
const int ruleset_fd =
create_ruleset(_metadata, rules[0].access, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks enforcement for normal files. */
ASSERT_EQ(0, test_open(file1_s1d2, O_RDWR));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR));
/* Checks access to pipes through FD. */
ASSERT_EQ(0, pipe2(pipe_fds, O_CLOEXEC));
ASSERT_EQ(1, write(pipe_fds[1], ".", 1))
{
TH_LOG("Failed to write in pipe: %s", strerror(errno));
}
ASSERT_EQ(1, read(pipe_fds[0], &buf, 1));
ASSERT_EQ('.', buf);
/* Checks write access to pipe through /proc/self/fd . */
proc_fd = open_proc_fd(_metadata, pipe_fds[1], O_WRONLY | O_CLOEXEC);
ASSERT_LE(0, proc_fd);
ASSERT_EQ(1, write(proc_fd, ".", 1))
{
TH_LOG("Failed to write through /proc/self/fd/%d: %s",
pipe_fds[1], strerror(errno));
}
ASSERT_EQ(0, close(proc_fd));
/* Checks read access to pipe through /proc/self/fd . */
proc_fd = open_proc_fd(_metadata, pipe_fds[0], O_RDONLY | O_CLOEXEC);
ASSERT_LE(0, proc_fd);
buf = '\0';
ASSERT_EQ(1, read(proc_fd, &buf, 1))
{
TH_LOG("Failed to read through /proc/self/fd/%d: %s",
pipe_fds[1], strerror(errno));
}
ASSERT_EQ(0, close(proc_fd));
ASSERT_EQ(0, close(pipe_fds[0]));
ASSERT_EQ(0, close(pipe_fds[1]));
}
/* Invokes truncate(2) and returns its errno or 0. */
static int test_truncate(const char *const path)
{
if (truncate(path, 10) < 0)
return errno;
return 0;
}
/*
* Invokes creat(2) and returns its errno or 0.
* Closes the opened file descriptor on success.
*/
static int test_creat(const char *const path)
{
int fd = creat(path, 0600);
if (fd < 0)
return errno;
/*
* Mixing error codes from close(2) and creat(2) should not lead to any
* (access type) confusion for this test.
*/
if (close(fd) < 0)
return errno;
return 0;
}
/*
* Exercises file truncation when it's not restricted,
* as it was the case before LANDLOCK_ACCESS_FS_TRUNCATE existed.
*/
TEST_F_FORK(layout1, truncate_unhandled)
{
const char *const file_r = file1_s1d1;
const char *const file_w = file2_s1d1;
const char *const file_none = file1_s1d2;
const struct rule rules[] = {
{
.path = file_r,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = file_w,
.access = LANDLOCK_ACCESS_FS_WRITE_FILE,
},
/* Implicitly: No rights for file_none. */
{},
};
const __u64 handled = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE;
int ruleset_fd;
/* Enable Landlock. */
ruleset_fd = create_ruleset(_metadata, handled, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/*
* Checks read right: truncate and open with O_TRUNC work, unless the
* file is attempted to be opened for writing.
*/
EXPECT_EQ(0, test_truncate(file_r));
EXPECT_EQ(0, test_open(file_r, O_RDONLY | O_TRUNC));
EXPECT_EQ(EACCES, test_open(file_r, O_WRONLY | O_TRUNC));
EXPECT_EQ(EACCES, test_creat(file_r));
/*
* Checks write right: truncate and open with O_TRUNC work, unless the
* file is attempted to be opened for reading.
*/
EXPECT_EQ(0, test_truncate(file_w));
EXPECT_EQ(EACCES, test_open(file_w, O_RDONLY | O_TRUNC));
EXPECT_EQ(0, test_open(file_w, O_WRONLY | O_TRUNC));
EXPECT_EQ(0, test_creat(file_w));
/*
* Checks "no rights" case: truncate works but all open attempts fail,
* including creat.
*/
EXPECT_EQ(0, test_truncate(file_none));
EXPECT_EQ(EACCES, test_open(file_none, O_RDONLY | O_TRUNC));
EXPECT_EQ(EACCES, test_open(file_none, O_WRONLY | O_TRUNC));
EXPECT_EQ(EACCES, test_creat(file_none));
}
TEST_F_FORK(layout1, truncate)
{
const char *const file_rwt = file1_s1d1;
const char *const file_rw = file2_s1d1;
const char *const file_rt = file1_s1d2;
const char *const file_t = file2_s1d2;
const char *const file_none = file1_s1d3;
const char *const dir_t = dir_s2d1;
const char *const file_in_dir_t = file1_s2d1;
const char *const dir_w = dir_s3d1;
const char *const file_in_dir_w = file1_s3d1;
const struct rule rules[] = {
{
.path = file_rwt,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE |
LANDLOCK_ACCESS_FS_TRUNCATE,
},
{
.path = file_rw,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{
.path = file_rt,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_TRUNCATE,
},
{
.path = file_t,
.access = LANDLOCK_ACCESS_FS_TRUNCATE,
},
/* Implicitly: No access rights for file_none. */
{
.path = dir_t,
.access = LANDLOCK_ACCESS_FS_TRUNCATE,
},
{
.path = dir_w,
.access = LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
const __u64 handled = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE |
LANDLOCK_ACCESS_FS_TRUNCATE;
int ruleset_fd;
/* Enable Landlock. */
ruleset_fd = create_ruleset(_metadata, handled, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks read, write and truncate rights: truncation works. */
EXPECT_EQ(0, test_truncate(file_rwt));
EXPECT_EQ(0, test_open(file_rwt, O_RDONLY | O_TRUNC));
EXPECT_EQ(0, test_open(file_rwt, O_WRONLY | O_TRUNC));
/* Checks read and write rights: no truncate variant works. */
EXPECT_EQ(EACCES, test_truncate(file_rw));
EXPECT_EQ(EACCES, test_open(file_rw, O_RDONLY | O_TRUNC));
EXPECT_EQ(EACCES, test_open(file_rw, O_WRONLY | O_TRUNC));
/*
* Checks read and truncate rights: truncation works.
*
* Note: Files can get truncated using open() even with O_RDONLY.
*/
EXPECT_EQ(0, test_truncate(file_rt));
EXPECT_EQ(0, test_open(file_rt, O_RDONLY | O_TRUNC));
EXPECT_EQ(EACCES, test_open(file_rt, O_WRONLY | O_TRUNC));
/* Checks truncate right: truncate works, but can't open file. */
EXPECT_EQ(0, test_truncate(file_t));
EXPECT_EQ(EACCES, test_open(file_t, O_RDONLY | O_TRUNC));
EXPECT_EQ(EACCES, test_open(file_t, O_WRONLY | O_TRUNC));
/* Checks "no rights" case: No form of truncation works. */
EXPECT_EQ(EACCES, test_truncate(file_none));
EXPECT_EQ(EACCES, test_open(file_none, O_RDONLY | O_TRUNC));
EXPECT_EQ(EACCES, test_open(file_none, O_WRONLY | O_TRUNC));
/*
* Checks truncate right on directory: truncate works on contained
* files.
*/
EXPECT_EQ(0, test_truncate(file_in_dir_t));
EXPECT_EQ(EACCES, test_open(file_in_dir_t, O_RDONLY | O_TRUNC));
EXPECT_EQ(EACCES, test_open(file_in_dir_t, O_WRONLY | O_TRUNC));
/*
* Checks creat in dir_w: This requires the truncate right when
* overwriting an existing file, but does not require it when the file
* is new.
*/
EXPECT_EQ(EACCES, test_creat(file_in_dir_w));
ASSERT_EQ(0, unlink(file_in_dir_w));
EXPECT_EQ(0, test_creat(file_in_dir_w));
}
/* Invokes ftruncate(2) and returns its errno or 0. */
static int test_ftruncate(int fd)
{
if (ftruncate(fd, 10) < 0)
return errno;
return 0;
}
TEST_F_FORK(layout1, ftruncate)
{
/*
* This test opens a new file descriptor at different stages of
* Landlock restriction:
*
* without restriction: ftruncate works
* something else but truncate restricted: ftruncate works
* truncate restricted and permitted: ftruncate works
* truncate restricted and not permitted: ftruncate fails
*
* Whether this works or not is expected to depend on the time when the
* FD was opened, not to depend on the time when ftruncate() was
* called.
*/
const char *const path = file1_s1d1;
const __u64 handled1 = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE;
const struct rule layer1[] = {
{
.path = path,
.access = LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
const __u64 handled2 = LANDLOCK_ACCESS_FS_TRUNCATE;
const struct rule layer2[] = {
{
.path = path,
.access = LANDLOCK_ACCESS_FS_TRUNCATE,
},
{},
};
const __u64 handled3 = LANDLOCK_ACCESS_FS_TRUNCATE |
LANDLOCK_ACCESS_FS_WRITE_FILE;
const struct rule layer3[] = {
{
.path = path,
.access = LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
int fd_layer0, fd_layer1, fd_layer2, fd_layer3, ruleset_fd;
fd_layer0 = open(path, O_WRONLY);
EXPECT_EQ(0, test_ftruncate(fd_layer0));
ruleset_fd = create_ruleset(_metadata, handled1, layer1);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
fd_layer1 = open(path, O_WRONLY);
EXPECT_EQ(0, test_ftruncate(fd_layer0));
EXPECT_EQ(0, test_ftruncate(fd_layer1));
ruleset_fd = create_ruleset(_metadata, handled2, layer2);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
fd_layer2 = open(path, O_WRONLY);
EXPECT_EQ(0, test_ftruncate(fd_layer0));
EXPECT_EQ(0, test_ftruncate(fd_layer1));
EXPECT_EQ(0, test_ftruncate(fd_layer2));
ruleset_fd = create_ruleset(_metadata, handled3, layer3);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
fd_layer3 = open(path, O_WRONLY);
EXPECT_EQ(0, test_ftruncate(fd_layer0));
EXPECT_EQ(0, test_ftruncate(fd_layer1));
EXPECT_EQ(0, test_ftruncate(fd_layer2));
EXPECT_EQ(EACCES, test_ftruncate(fd_layer3));
ASSERT_EQ(0, close(fd_layer0));
ASSERT_EQ(0, close(fd_layer1));
ASSERT_EQ(0, close(fd_layer2));
ASSERT_EQ(0, close(fd_layer3));
}
/* clang-format off */
FIXTURE(ftruncate) {};
/* clang-format on */
FIXTURE_SETUP(ftruncate)
{
prepare_layout(_metadata);
create_file(_metadata, file1_s1d1);
}
FIXTURE_TEARDOWN(ftruncate)
{
EXPECT_EQ(0, remove_path(file1_s1d1));
cleanup_layout(_metadata);
}
FIXTURE_VARIANT(ftruncate)
{
const __u64 handled;
const __u64 permitted;
const int expected_open_result;
const int expected_ftruncate_result;
};
/* clang-format off */
FIXTURE_VARIANT_ADD(ftruncate, w_w) {
/* clang-format on */
.handled = LANDLOCK_ACCESS_FS_WRITE_FILE,
.permitted = LANDLOCK_ACCESS_FS_WRITE_FILE,
.expected_open_result = 0,
.expected_ftruncate_result = 0,
};
/* clang-format off */
FIXTURE_VARIANT_ADD(ftruncate, t_t) {
/* clang-format on */
.handled = LANDLOCK_ACCESS_FS_TRUNCATE,
.permitted = LANDLOCK_ACCESS_FS_TRUNCATE,
.expected_open_result = 0,
.expected_ftruncate_result = 0,
};
/* clang-format off */
FIXTURE_VARIANT_ADD(ftruncate, wt_w) {
/* clang-format on */
.handled = LANDLOCK_ACCESS_FS_WRITE_FILE | LANDLOCK_ACCESS_FS_TRUNCATE,
.permitted = LANDLOCK_ACCESS_FS_WRITE_FILE,
.expected_open_result = 0,
.expected_ftruncate_result = EACCES,
};
/* clang-format off */
FIXTURE_VARIANT_ADD(ftruncate, wt_wt) {
/* clang-format on */
.handled = LANDLOCK_ACCESS_FS_WRITE_FILE | LANDLOCK_ACCESS_FS_TRUNCATE,
.permitted = LANDLOCK_ACCESS_FS_WRITE_FILE |
LANDLOCK_ACCESS_FS_TRUNCATE,
.expected_open_result = 0,
.expected_ftruncate_result = 0,
};
/* clang-format off */
FIXTURE_VARIANT_ADD(ftruncate, wt_t) {
/* clang-format on */
.handled = LANDLOCK_ACCESS_FS_WRITE_FILE | LANDLOCK_ACCESS_FS_TRUNCATE,
.permitted = LANDLOCK_ACCESS_FS_TRUNCATE,
.expected_open_result = EACCES,
};
TEST_F_FORK(ftruncate, open_and_ftruncate)
{
const char *const path = file1_s1d1;
const struct rule rules[] = {
{
.path = path,
.access = variant->permitted,
},
{},
};
int fd, ruleset_fd;
/* Enable Landlock. */
ruleset_fd = create_ruleset(_metadata, variant->handled, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
fd = open(path, O_WRONLY);
EXPECT_EQ(variant->expected_open_result, (fd < 0 ? errno : 0));
if (fd >= 0) {
EXPECT_EQ(variant->expected_ftruncate_result,
test_ftruncate(fd));
ASSERT_EQ(0, close(fd));
}
}
TEST_F_FORK(ftruncate, open_and_ftruncate_in_different_processes)
{
int child, fd, status;
int socket_fds[2];
ASSERT_EQ(0, socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0,
socket_fds));
child = fork();
ASSERT_LE(0, child);
if (child == 0) {
/*
* Enables Landlock in the child process, open a file descriptor
* where truncation is forbidden and send it to the
* non-landlocked parent process.
*/
const char *const path = file1_s1d1;
const struct rule rules[] = {
{
.path = path,
.access = variant->permitted,
},
{},
};
int fd, ruleset_fd;
ruleset_fd = create_ruleset(_metadata, variant->handled, rules);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
fd = open(path, O_WRONLY);
ASSERT_EQ(variant->expected_open_result, (fd < 0 ? errno : 0));
if (fd >= 0) {
ASSERT_EQ(0, send_fd(socket_fds[0], fd));
ASSERT_EQ(0, close(fd));
}
ASSERT_EQ(0, close(socket_fds[0]));
_exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
return;
}
if (variant->expected_open_result == 0) {
fd = recv_fd(socket_fds[1]);
ASSERT_LE(0, fd);
EXPECT_EQ(variant->expected_ftruncate_result,
test_ftruncate(fd));
ASSERT_EQ(0, close(fd));
}
ASSERT_EQ(child, waitpid(child, &status, 0));
ASSERT_EQ(1, WIFEXITED(status));
ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
ASSERT_EQ(0, close(socket_fds[0]));
ASSERT_EQ(0, close(socket_fds[1]));
}
TEST(memfd_ftruncate)
{
int fd;
fd = memfd_create("name", MFD_CLOEXEC);
ASSERT_LE(0, fd);
/*
* Checks that ftruncate is permitted on file descriptors that are
* created in ways other than open(2).
*/
EXPECT_EQ(0, test_ftruncate(fd));
ASSERT_EQ(0, close(fd));
}
/* clang-format off */
FIXTURE(layout1_bind) {};
/* clang-format on */
FIXTURE_SETUP(layout1_bind)
{
prepare_layout(_metadata);
create_layout1(_metadata);
set_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(0, mount(dir_s1d2, dir_s2d2, NULL, MS_BIND, NULL));
clear_cap(_metadata, CAP_SYS_ADMIN);
}
FIXTURE_TEARDOWN(layout1_bind)
{
set_cap(_metadata, CAP_SYS_ADMIN);
EXPECT_EQ(0, umount(dir_s2d2));
clear_cap(_metadata, CAP_SYS_ADMIN);
remove_layout1(_metadata);
cleanup_layout(_metadata);
}
static const char bind_dir_s1d3[] = TMP_DIR "/s2d1/s2d2/s1d3";
static const char bind_file1_s1d3[] = TMP_DIR "/s2d1/s2d2/s1d3/f1";
/*
* layout1_bind hierarchy:
*
* tmp
* ├── s1d1
* │ ├── f1
* │ ├── f2
* │ └── s1d2
* │ ├── f1
* │ ├── f2
* │ └── s1d3
* │ ├── f1
* │ └── f2
* ├── s2d1
* │ ├── f1
* │ └── s2d2
* │ ├── f1
* │ ├── f2
* │ └── s1d3
* │ ├── f1
* │ └── f2
* └── s3d1
* └── s3d2
* └── s3d3
*/
TEST_F_FORK(layout1_bind, no_restriction)
{
ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s2d1, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s2d1, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY));
ASSERT_EQ(0, test_open(file1_s2d2, O_RDONLY));
ASSERT_EQ(ENOENT, test_open(dir_s2d3, O_RDONLY));
ASSERT_EQ(ENOENT, test_open(file1_s2d3, O_RDONLY));
ASSERT_EQ(0, test_open(bind_dir_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(bind_file1_s1d3, O_RDONLY));
ASSERT_EQ(0, test_open(dir_s3d1, O_RDONLY));
}
TEST_F_FORK(layout1_bind, same_content_same_file)
{
/*
* Sets access right on parent directories of both source and
* destination mount points.
*/
const struct rule layer1_parent[] = {
{
.path = dir_s1d1,
.access = ACCESS_RO,
},
{
.path = dir_s2d1,
.access = ACCESS_RW,
},
{},
};
/*
* Sets access rights on the same bind-mounted directories. The result
* should be ACCESS_RW for both directories, but not both hierarchies
* because of the first layer.
*/
const struct rule layer2_mount_point[] = {
{
.path = dir_s1d2,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = dir_s2d2,
.access = ACCESS_RW,
},
{},
};
/* Only allow read-access to the s1d3 hierarchies. */
const struct rule layer3_source[] = {
{
.path = dir_s1d3,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{},
};
/* Removes all access rights. */
const struct rule layer4_destination[] = {
{
.path = bind_file1_s1d3,
.access = LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
int ruleset_fd;
/* Sets rules for the parent directories. */
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1_parent);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks source hierarchy. */
ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
/* Checks destination hierarchy. */
ASSERT_EQ(0, test_open(file1_s2d1, O_RDWR));
ASSERT_EQ(0, test_open(dir_s2d1, O_RDONLY | O_DIRECTORY));
ASSERT_EQ(0, test_open(file1_s2d2, O_RDWR));
ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY | O_DIRECTORY));
/* Sets rules for the mount points. */
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2_mount_point);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks source hierarchy. */
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
/* Checks destination hierarchy. */
ASSERT_EQ(EACCES, test_open(file1_s2d1, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s2d1, O_WRONLY));
ASSERT_EQ(EACCES, test_open(dir_s2d1, O_RDONLY | O_DIRECTORY));
ASSERT_EQ(0, test_open(file1_s2d2, O_RDWR));
ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY | O_DIRECTORY));
ASSERT_EQ(0, test_open(bind_dir_s1d3, O_RDONLY | O_DIRECTORY));
/* Sets a (shared) rule only on the source. */
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer3_source);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks source hierarchy. */
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
ASSERT_EQ(EACCES, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY));
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
ASSERT_EQ(EACCES, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
/* Checks destination hierarchy. */
ASSERT_EQ(EACCES, test_open(file1_s2d2, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s2d2, O_WRONLY));
ASSERT_EQ(EACCES, test_open(dir_s2d2, O_RDONLY | O_DIRECTORY));
ASSERT_EQ(0, test_open(bind_file1_s1d3, O_RDONLY));
ASSERT_EQ(EACCES, test_open(bind_file1_s1d3, O_WRONLY));
ASSERT_EQ(EACCES, test_open(bind_dir_s1d3, O_RDONLY | O_DIRECTORY));
/* Sets a (shared) rule only on the destination. */
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer4_destination);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks source hierarchy. */
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDONLY));
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY));
/* Checks destination hierarchy. */
ASSERT_EQ(EACCES, test_open(bind_file1_s1d3, O_RDONLY));
ASSERT_EQ(EACCES, test_open(bind_file1_s1d3, O_WRONLY));
}
TEST_F_FORK(layout1_bind, reparent_cross_mount)
{
const struct rule layer1[] = {
{
/* dir_s2d1 is beneath the dir_s2d2 mount point. */
.path = dir_s2d1,
.access = LANDLOCK_ACCESS_FS_REFER,
},
{
.path = bind_dir_s1d3,
.access = LANDLOCK_ACCESS_FS_EXECUTE,
},
{},
};
int ruleset_fd = create_ruleset(
_metadata,
LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_EXECUTE, layer1);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks basic denied move. */
ASSERT_EQ(-1, rename(file1_s1d1, file1_s1d2));
ASSERT_EQ(EXDEV, errno);
/* Checks real cross-mount move (Landlock is not involved). */
ASSERT_EQ(-1, rename(file1_s2d1, file1_s2d2));
ASSERT_EQ(EXDEV, errno);
/* Checks move that will give more accesses. */
ASSERT_EQ(-1, rename(file1_s2d2, bind_file1_s1d3));
ASSERT_EQ(EXDEV, errno);
/* Checks legitimate downgrade move. */
ASSERT_EQ(0, rename(bind_file1_s1d3, file1_s2d2));
}
#define LOWER_BASE TMP_DIR "/lower"
#define LOWER_DATA LOWER_BASE "/data"
static const char lower_fl1[] = LOWER_DATA "/fl1";
static const char lower_dl1[] = LOWER_DATA "/dl1";
static const char lower_dl1_fl2[] = LOWER_DATA "/dl1/fl2";
static const char lower_fo1[] = LOWER_DATA "/fo1";
static const char lower_do1[] = LOWER_DATA "/do1";
static const char lower_do1_fo2[] = LOWER_DATA "/do1/fo2";
static const char lower_do1_fl3[] = LOWER_DATA "/do1/fl3";
static const char (*lower_base_files[])[] = {
&lower_fl1,
&lower_fo1,
NULL,
};
static const char (*lower_base_directories[])[] = {
&lower_dl1,
&lower_do1,
NULL,
};
static const char (*lower_sub_files[])[] = {
&lower_dl1_fl2,
&lower_do1_fo2,
&lower_do1_fl3,
NULL,
};
#define UPPER_BASE TMP_DIR "/upper"
#define UPPER_DATA UPPER_BASE "/data"
#define UPPER_WORK UPPER_BASE "/work"
static const char upper_fu1[] = UPPER_DATA "/fu1";
static const char upper_du1[] = UPPER_DATA "/du1";
static const char upper_du1_fu2[] = UPPER_DATA "/du1/fu2";
static const char upper_fo1[] = UPPER_DATA "/fo1";
static const char upper_do1[] = UPPER_DATA "/do1";
static const char upper_do1_fo2[] = UPPER_DATA "/do1/fo2";
static const char upper_do1_fu3[] = UPPER_DATA "/do1/fu3";
static const char (*upper_base_files[])[] = {
&upper_fu1,
&upper_fo1,
NULL,
};
static const char (*upper_base_directories[])[] = {
&upper_du1,
&upper_do1,
NULL,
};
static const char (*upper_sub_files[])[] = {
&upper_du1_fu2,
&upper_do1_fo2,
&upper_do1_fu3,
NULL,
};
#define MERGE_BASE TMP_DIR "/merge"
#define MERGE_DATA MERGE_BASE "/data"
static const char merge_fl1[] = MERGE_DATA "/fl1";
static const char merge_dl1[] = MERGE_DATA "/dl1";
static const char merge_dl1_fl2[] = MERGE_DATA "/dl1/fl2";
static const char merge_fu1[] = MERGE_DATA "/fu1";
static const char merge_du1[] = MERGE_DATA "/du1";
static const char merge_du1_fu2[] = MERGE_DATA "/du1/fu2";
static const char merge_fo1[] = MERGE_DATA "/fo1";
static const char merge_do1[] = MERGE_DATA "/do1";
static const char merge_do1_fo2[] = MERGE_DATA "/do1/fo2";
static const char merge_do1_fl3[] = MERGE_DATA "/do1/fl3";
static const char merge_do1_fu3[] = MERGE_DATA "/do1/fu3";
static const char (*merge_base_files[])[] = {
&merge_fl1,
&merge_fu1,
&merge_fo1,
NULL,
};
static const char (*merge_base_directories[])[] = {
&merge_dl1,
&merge_du1,
&merge_do1,
NULL,
};
static const char (*merge_sub_files[])[] = {
&merge_dl1_fl2, &merge_du1_fu2, &merge_do1_fo2,
&merge_do1_fl3, &merge_do1_fu3, NULL,
};
/*
* layout2_overlay hierarchy:
*
* tmp
* ├── lower
* │ └── data
* │ ├── dl1
* │ │ └── fl2
* │ ├── do1
* │ │ ├── fl3
* │ │ └── fo2
* │ ├── fl1
* │ └── fo1
* ├── merge
* │ └── data
* │ ├── dl1
* │ │ └── fl2
* │ ├── do1
* │ │ ├── fl3
* │ │ ├── fo2
* │ │ └── fu3
* │ ├── du1
* │ │ └── fu2
* │ ├── fl1
* │ ├── fo1
* │ └── fu1
* └── upper
* ├── data
* │ ├── do1
* │ │ ├── fo2
* │ │ └── fu3
* │ ├── du1
* │ │ └── fu2
* │ ├── fo1
* │ └── fu1
* └── work
* └── work
*/
FIXTURE(layout2_overlay)
{
bool skip_test;
};
FIXTURE_SETUP(layout2_overlay)
{
if (!supports_filesystem("overlay")) {
self->skip_test = true;
SKIP(return, "overlayfs is not supported (setup)");
}
prepare_layout(_metadata);
create_directory(_metadata, LOWER_BASE);
set_cap(_metadata, CAP_SYS_ADMIN);
/* Creates tmpfs mount points to get deterministic overlayfs. */
ASSERT_EQ(0, mount_opt(&mnt_tmp, LOWER_BASE));
clear_cap(_metadata, CAP_SYS_ADMIN);
create_file(_metadata, lower_fl1);
create_file(_metadata, lower_dl1_fl2);
create_file(_metadata, lower_fo1);
create_file(_metadata, lower_do1_fo2);
create_file(_metadata, lower_do1_fl3);
create_directory(_metadata, UPPER_BASE);
set_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(0, mount_opt(&mnt_tmp, UPPER_BASE));
clear_cap(_metadata, CAP_SYS_ADMIN);
create_file(_metadata, upper_fu1);
create_file(_metadata, upper_du1_fu2);
create_file(_metadata, upper_fo1);
create_file(_metadata, upper_do1_fo2);
create_file(_metadata, upper_do1_fu3);
ASSERT_EQ(0, mkdir(UPPER_WORK, 0700));
create_directory(_metadata, MERGE_DATA);
set_cap(_metadata, CAP_SYS_ADMIN);
set_cap(_metadata, CAP_DAC_OVERRIDE);
ASSERT_EQ(0, mount("overlay", MERGE_DATA, "overlay", 0,
"lowerdir=" LOWER_DATA ",upperdir=" UPPER_DATA
",workdir=" UPPER_WORK));
clear_cap(_metadata, CAP_DAC_OVERRIDE);
clear_cap(_metadata, CAP_SYS_ADMIN);
}
FIXTURE_TEARDOWN(layout2_overlay)
{
if (self->skip_test)
SKIP(return, "overlayfs is not supported (teardown)");
EXPECT_EQ(0, remove_path(lower_do1_fl3));
EXPECT_EQ(0, remove_path(lower_dl1_fl2));
EXPECT_EQ(0, remove_path(lower_fl1));
EXPECT_EQ(0, remove_path(lower_do1_fo2));
EXPECT_EQ(0, remove_path(lower_fo1));
set_cap(_metadata, CAP_SYS_ADMIN);
EXPECT_EQ(0, umount(LOWER_BASE));
clear_cap(_metadata, CAP_SYS_ADMIN);
EXPECT_EQ(0, remove_path(LOWER_BASE));
EXPECT_EQ(0, remove_path(upper_do1_fu3));
EXPECT_EQ(0, remove_path(upper_du1_fu2));
EXPECT_EQ(0, remove_path(upper_fu1));
EXPECT_EQ(0, remove_path(upper_do1_fo2));
EXPECT_EQ(0, remove_path(upper_fo1));
EXPECT_EQ(0, remove_path(UPPER_WORK "/work"));
set_cap(_metadata, CAP_SYS_ADMIN);
EXPECT_EQ(0, umount(UPPER_BASE));
clear_cap(_metadata, CAP_SYS_ADMIN);
EXPECT_EQ(0, remove_path(UPPER_BASE));
set_cap(_metadata, CAP_SYS_ADMIN);
EXPECT_EQ(0, umount(MERGE_DATA));
clear_cap(_metadata, CAP_SYS_ADMIN);
EXPECT_EQ(0, remove_path(MERGE_DATA));
cleanup_layout(_metadata);
}
TEST_F_FORK(layout2_overlay, no_restriction)
{
if (self->skip_test)
SKIP(return, "overlayfs is not supported (test)");
ASSERT_EQ(0, test_open(lower_fl1, O_RDONLY));
ASSERT_EQ(0, test_open(lower_dl1, O_RDONLY));
ASSERT_EQ(0, test_open(lower_dl1_fl2, O_RDONLY));
ASSERT_EQ(0, test_open(lower_fo1, O_RDONLY));
ASSERT_EQ(0, test_open(lower_do1, O_RDONLY));
ASSERT_EQ(0, test_open(lower_do1_fo2, O_RDONLY));
ASSERT_EQ(0, test_open(lower_do1_fl3, O_RDONLY));
ASSERT_EQ(0, test_open(upper_fu1, O_RDONLY));
ASSERT_EQ(0, test_open(upper_du1, O_RDONLY));
ASSERT_EQ(0, test_open(upper_du1_fu2, O_RDONLY));
ASSERT_EQ(0, test_open(upper_fo1, O_RDONLY));
ASSERT_EQ(0, test_open(upper_do1, O_RDONLY));
ASSERT_EQ(0, test_open(upper_do1_fo2, O_RDONLY));
ASSERT_EQ(0, test_open(upper_do1_fu3, O_RDONLY));
ASSERT_EQ(0, test_open(merge_fl1, O_RDONLY));
ASSERT_EQ(0, test_open(merge_dl1, O_RDONLY));
ASSERT_EQ(0, test_open(merge_dl1_fl2, O_RDONLY));
ASSERT_EQ(0, test_open(merge_fu1, O_RDONLY));
ASSERT_EQ(0, test_open(merge_du1, O_RDONLY));
ASSERT_EQ(0, test_open(merge_du1_fu2, O_RDONLY));
ASSERT_EQ(0, test_open(merge_fo1, O_RDONLY));
ASSERT_EQ(0, test_open(merge_do1, O_RDONLY));
ASSERT_EQ(0, test_open(merge_do1_fo2, O_RDONLY));
ASSERT_EQ(0, test_open(merge_do1_fl3, O_RDONLY));
ASSERT_EQ(0, test_open(merge_do1_fu3, O_RDONLY));
}
#define for_each_path(path_list, path_entry, i) \
for (i = 0, path_entry = *path_list[i]; path_list[i]; \
path_entry = *path_list[++i])
TEST_F_FORK(layout2_overlay, same_content_different_file)
{
/* Sets access right on parent directories of both layers. */
const struct rule layer1_base[] = {
{
.path = LOWER_BASE,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = UPPER_BASE,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = MERGE_BASE,
.access = ACCESS_RW,
},
{},
};
const struct rule layer2_data[] = {
{
.path = LOWER_DATA,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = UPPER_DATA,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = MERGE_DATA,
.access = ACCESS_RW,
},
{},
};
/* Sets access right on directories inside both layers. */
const struct rule layer3_subdirs[] = {
{
.path = lower_dl1,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = lower_do1,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = upper_du1,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = upper_do1,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = merge_dl1,
.access = ACCESS_RW,
},
{
.path = merge_du1,
.access = ACCESS_RW,
},
{
.path = merge_do1,
.access = ACCESS_RW,
},
{},
};
/* Tighten access rights to the files. */
const struct rule layer4_files[] = {
{
.path = lower_dl1_fl2,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = lower_do1_fo2,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = lower_do1_fl3,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = upper_du1_fu2,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = upper_do1_fo2,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = upper_do1_fu3,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{
.path = merge_dl1_fl2,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{
.path = merge_du1_fu2,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{
.path = merge_do1_fo2,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{
.path = merge_do1_fl3,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{
.path = merge_do1_fu3,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
const struct rule layer5_merge_only[] = {
{
.path = MERGE_DATA,
.access = LANDLOCK_ACCESS_FS_READ_FILE |
LANDLOCK_ACCESS_FS_WRITE_FILE,
},
{},
};
int ruleset_fd;
size_t i;
const char *path_entry;
if (self->skip_test)
SKIP(return, "overlayfs is not supported (test)");
/* Sets rules on base directories (i.e. outside overlay scope). */
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1_base);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks lower layer. */
for_each_path(lower_base_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDONLY));
ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY));
}
for_each_path(lower_base_directories, path_entry, i) {
ASSERT_EQ(EACCES,
test_open(path_entry, O_RDONLY | O_DIRECTORY));
}
for_each_path(lower_sub_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDONLY));
ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY));
}
/* Checks upper layer. */
for_each_path(upper_base_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDONLY));
ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY));
}
for_each_path(upper_base_directories, path_entry, i) {
ASSERT_EQ(EACCES,
test_open(path_entry, O_RDONLY | O_DIRECTORY));
}
for_each_path(upper_sub_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDONLY));
ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY));
}
/*
* Checks that access rights are independent from the lower and upper
* layers: write access to upper files viewed through the merge point
* is still allowed, and write access to lower file viewed (and copied)
* through the merge point is still allowed.
*/
for_each_path(merge_base_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDWR));
}
for_each_path(merge_base_directories, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDONLY | O_DIRECTORY));
}
for_each_path(merge_sub_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDWR));
}
/* Sets rules on data directories (i.e. inside overlay scope). */
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2_data);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks merge. */
for_each_path(merge_base_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDWR));
}
for_each_path(merge_base_directories, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDONLY | O_DIRECTORY));
}
for_each_path(merge_sub_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDWR));
}
/* Same checks with tighter rules. */
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer3_subdirs);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks changes for lower layer. */
for_each_path(lower_base_files, path_entry, i) {
ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY));
}
/* Checks changes for upper layer. */
for_each_path(upper_base_files, path_entry, i) {
ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY));
}
/* Checks all merge accesses. */
for_each_path(merge_base_files, path_entry, i) {
ASSERT_EQ(EACCES, test_open(path_entry, O_RDWR));
}
for_each_path(merge_base_directories, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDONLY | O_DIRECTORY));
}
for_each_path(merge_sub_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDWR));
}
/* Sets rules directly on overlayed files. */
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer4_files);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks unchanged accesses on lower layer. */
for_each_path(lower_sub_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDONLY));
ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY));
}
/* Checks unchanged accesses on upper layer. */
for_each_path(upper_sub_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDONLY));
ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY));
}
/* Checks all merge accesses. */
for_each_path(merge_base_files, path_entry, i) {
ASSERT_EQ(EACCES, test_open(path_entry, O_RDWR));
}
for_each_path(merge_base_directories, path_entry, i) {
ASSERT_EQ(EACCES,
test_open(path_entry, O_RDONLY | O_DIRECTORY));
}
for_each_path(merge_sub_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDWR));
}
/* Only allowes access to the merge hierarchy. */
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer5_merge_only);
ASSERT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks new accesses on lower layer. */
for_each_path(lower_sub_files, path_entry, i) {
ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY));
}
/* Checks new accesses on upper layer. */
for_each_path(upper_sub_files, path_entry, i) {
ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY));
}
/* Checks all merge accesses. */
for_each_path(merge_base_files, path_entry, i) {
ASSERT_EQ(EACCES, test_open(path_entry, O_RDWR));
}
for_each_path(merge_base_directories, path_entry, i) {
ASSERT_EQ(EACCES,
test_open(path_entry, O_RDONLY | O_DIRECTORY));
}
for_each_path(merge_sub_files, path_entry, i) {
ASSERT_EQ(0, test_open(path_entry, O_RDWR));
}
}
FIXTURE(layout3_fs)
{
bool has_created_dir;
bool has_created_file;
char *dir_path;
bool skip_test;
};
FIXTURE_VARIANT(layout3_fs)
{
const struct mnt_opt mnt;
const char *const file_path;
unsigned int cwd_fs_magic;
};
/* clang-format off */
FIXTURE_VARIANT_ADD(layout3_fs, tmpfs) {
/* clang-format on */
.mnt = mnt_tmp,
.file_path = file1_s1d1,
};
FIXTURE_VARIANT_ADD(layout3_fs, ramfs) {
.mnt = {
.type = "ramfs",
.data = "mode=700",
},
.file_path = TMP_DIR "/dir/file",
};
FIXTURE_VARIANT_ADD(layout3_fs, cgroup2) {
.mnt = {
.type = "cgroup2",
},
.file_path = TMP_DIR "/test/cgroup.procs",
};
FIXTURE_VARIANT_ADD(layout3_fs, proc) {
.mnt = {
.type = "proc",
},
.file_path = TMP_DIR "/self/status",
};
FIXTURE_VARIANT_ADD(layout3_fs, sysfs) {
.mnt = {
.type = "sysfs",
},
.file_path = TMP_DIR "/kernel/notes",
};
FIXTURE_VARIANT_ADD(layout3_fs, hostfs) {
.mnt = {
.source = TMP_DIR,
.flags = MS_BIND,
},
.file_path = TMP_DIR "/dir/file",
.cwd_fs_magic = HOSTFS_SUPER_MAGIC,
};
FIXTURE_SETUP(layout3_fs)
{
struct stat statbuf;
const char *slash;
size_t dir_len;
if (!supports_filesystem(variant->mnt.type) ||
!cwd_matches_fs(variant->cwd_fs_magic)) {
self->skip_test = true;
SKIP(return, "this filesystem is not supported (setup)");
}
slash = strrchr(variant->file_path, '/');
ASSERT_NE(slash, NULL);
dir_len = (size_t)slash - (size_t)variant->file_path;
ASSERT_LT(0, dir_len);
self->dir_path = malloc(dir_len + 1);
self->dir_path[dir_len] = '\0';
strncpy(self->dir_path, variant->file_path, dir_len);
prepare_layout_opt(_metadata, &variant->mnt);
/* Creates directory when required. */
if (stat(self->dir_path, &statbuf)) {
set_cap(_metadata, CAP_DAC_OVERRIDE);
EXPECT_EQ(0, mkdir(self->dir_path, 0700))
{
TH_LOG("Failed to create directory \"%s\": %s",
self->dir_path, strerror(errno));
free(self->dir_path);
self->dir_path = NULL;
}
self->has_created_dir = true;
clear_cap(_metadata, CAP_DAC_OVERRIDE);
}
/* Creates file when required. */
if (stat(variant->file_path, &statbuf)) {
int fd;
set_cap(_metadata, CAP_DAC_OVERRIDE);
fd = creat(variant->file_path, 0600);
EXPECT_LE(0, fd)
{
TH_LOG("Failed to create file \"%s\": %s",
variant->file_path, strerror(errno));
}
EXPECT_EQ(0, close(fd));
self->has_created_file = true;
clear_cap(_metadata, CAP_DAC_OVERRIDE);
}
}
FIXTURE_TEARDOWN(layout3_fs)
{
if (self->skip_test)
SKIP(return, "this filesystem is not supported (teardown)");
if (self->has_created_file) {
set_cap(_metadata, CAP_DAC_OVERRIDE);
/*
* Don't check for error because the file might already
* have been removed (cf. release_inode test).
*/
unlink(variant->file_path);
clear_cap(_metadata, CAP_DAC_OVERRIDE);
}
if (self->has_created_dir) {
set_cap(_metadata, CAP_DAC_OVERRIDE);
/*
* Don't check for error because the directory might already
* have been removed (cf. release_inode test).
*/
rmdir(self->dir_path);
clear_cap(_metadata, CAP_DAC_OVERRIDE);
}
free(self->dir_path);
self->dir_path = NULL;
cleanup_layout(_metadata);
}
static void layer3_fs_tag_inode(struct __test_metadata *const _metadata,
FIXTURE_DATA(layout3_fs) * self,
const FIXTURE_VARIANT(layout3_fs) * variant,
const char *const rule_path)
{
const struct rule layer1_allow_read_file[] = {
{
.path = rule_path,
.access = LANDLOCK_ACCESS_FS_READ_FILE,
},
{},
};
const struct landlock_ruleset_attr layer2_deny_everything_attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE,
};
const char *const dev_null_path = "/dev/null";
int ruleset_fd;
if (self->skip_test)
SKIP(return, "this filesystem is not supported (test)");
/* Checks without Landlock. */
EXPECT_EQ(0, test_open(dev_null_path, O_RDONLY | O_CLOEXEC));
EXPECT_EQ(0, test_open(variant->file_path, O_RDONLY | O_CLOEXEC));
ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE,
layer1_allow_read_file);
EXPECT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
EXPECT_EQ(0, close(ruleset_fd));
EXPECT_EQ(EACCES, test_open(dev_null_path, O_RDONLY | O_CLOEXEC));
EXPECT_EQ(0, test_open(variant->file_path, O_RDONLY | O_CLOEXEC));
/* Forbids directory reading. */
ruleset_fd =
landlock_create_ruleset(&layer2_deny_everything_attr,
sizeof(layer2_deny_everything_attr), 0);
EXPECT_LE(0, ruleset_fd);
enforce_ruleset(_metadata, ruleset_fd);
EXPECT_EQ(0, close(ruleset_fd));
/* Checks with Landlock and forbidden access. */
EXPECT_EQ(EACCES, test_open(dev_null_path, O_RDONLY | O_CLOEXEC));
EXPECT_EQ(EACCES, test_open(variant->file_path, O_RDONLY | O_CLOEXEC));
}
/* Matrix of tests to check file hierarchy evaluation. */
TEST_F_FORK(layout3_fs, tag_inode_dir_parent)
{
/* The current directory must not be the root for this test. */
layer3_fs_tag_inode(_metadata, self, variant, ".");
}
TEST_F_FORK(layout3_fs, tag_inode_dir_mnt)
{
layer3_fs_tag_inode(_metadata, self, variant, TMP_DIR);
}
TEST_F_FORK(layout3_fs, tag_inode_dir_child)
{
layer3_fs_tag_inode(_metadata, self, variant, self->dir_path);
}
TEST_F_FORK(layout3_fs, tag_inode_file)
{
layer3_fs_tag_inode(_metadata, self, variant, variant->file_path);
}
/* Light version of layout1.release_inodes */
TEST_F_FORK(layout3_fs, release_inodes)
{
const struct rule layer1[] = {
{
.path = TMP_DIR,
.access = LANDLOCK_ACCESS_FS_READ_DIR,
},
{},
};
int ruleset_fd;
if (self->skip_test)
SKIP(return, "this filesystem is not supported (test)");
/* Clean up for the teardown to not fail. */
if (self->has_created_file)
EXPECT_EQ(0, remove_path(variant->file_path));
if (self->has_created_dir)
/* Don't check for error because of cgroup specificities. */
remove_path(self->dir_path);
ruleset_fd =
create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_DIR, layer1);
ASSERT_LE(0, ruleset_fd);
/* Unmount the filesystem while it is being used by a ruleset. */
set_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(0, umount(TMP_DIR));
clear_cap(_metadata, CAP_SYS_ADMIN);
/* Replaces with a new mount point to simplify FIXTURE_TEARDOWN. */
set_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(0, mount_opt(&mnt_tmp, TMP_DIR));
clear_cap(_metadata, CAP_SYS_ADMIN);
enforce_ruleset(_metadata, ruleset_fd);
ASSERT_EQ(0, close(ruleset_fd));
/* Checks that access to the new mount point is denied. */
ASSERT_EQ(EACCES, test_open(TMP_DIR, O_RDONLY));
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/landlock/fs_test.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <linux/kernel.h>
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <syscall.h>
#include <unistd.h>
#include <sys/resource.h>
#include "../kselftest_harness.h"
#include "../clone3/clone3_selftests.h"
#ifndef __NR_close_range
#if defined __alpha__
#define __NR_close_range 546
#elif defined _MIPS_SIM
#if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */
#define __NR_close_range (436 + 4000)
#endif
#if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */
#define __NR_close_range (436 + 6000)
#endif
#if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */
#define __NR_close_range (436 + 5000)
#endif
#elif defined __ia64__
#define __NR_close_range (436 + 1024)
#else
#define __NR_close_range 436
#endif
#endif
#ifndef CLOSE_RANGE_UNSHARE
#define CLOSE_RANGE_UNSHARE (1U << 1)
#endif
#ifndef CLOSE_RANGE_CLOEXEC
#define CLOSE_RANGE_CLOEXEC (1U << 2)
#endif
static inline int sys_close_range(unsigned int fd, unsigned int max_fd,
unsigned int flags)
{
return syscall(__NR_close_range, fd, max_fd, flags);
}
TEST(core_close_range)
{
int i, ret;
int open_fds[101];
for (i = 0; i < ARRAY_SIZE(open_fds); i++) {
int fd;
fd = open("/dev/null", O_RDONLY | O_CLOEXEC);
ASSERT_GE(fd, 0) {
if (errno == ENOENT)
SKIP(return, "Skipping test since /dev/null does not exist");
}
open_fds[i] = fd;
}
EXPECT_EQ(-1, sys_close_range(open_fds[0], open_fds[100], -1)) {
if (errno == ENOSYS)
SKIP(return, "close_range() syscall not supported");
}
EXPECT_EQ(0, sys_close_range(open_fds[0], open_fds[50], 0));
for (i = 0; i <= 50; i++)
EXPECT_EQ(-1, fcntl(open_fds[i], F_GETFL));
for (i = 51; i <= 100; i++)
EXPECT_GT(fcntl(open_fds[i], F_GETFL), -1);
/* create a couple of gaps */
close(57);
close(78);
close(81);
close(82);
close(84);
close(90);
EXPECT_EQ(0, sys_close_range(open_fds[51], open_fds[92], 0));
for (i = 51; i <= 92; i++)
EXPECT_EQ(-1, fcntl(open_fds[i], F_GETFL));
for (i = 93; i <= 100; i++)
EXPECT_GT(fcntl(open_fds[i], F_GETFL), -1);
/* test that the kernel caps and still closes all fds */
EXPECT_EQ(0, sys_close_range(open_fds[93], open_fds[99], 0));
for (i = 93; i <= 99; i++)
EXPECT_EQ(-1, fcntl(open_fds[i], F_GETFL));
EXPECT_GT(fcntl(open_fds[i], F_GETFL), -1);
EXPECT_EQ(0, sys_close_range(open_fds[100], open_fds[100], 0));
EXPECT_EQ(-1, fcntl(open_fds[100], F_GETFL));
}
TEST(close_range_unshare)
{
int i, ret, status;
pid_t pid;
int open_fds[101];
struct __clone_args args = {
.flags = CLONE_FILES,
.exit_signal = SIGCHLD,
};
for (i = 0; i < ARRAY_SIZE(open_fds); i++) {
int fd;
fd = open("/dev/null", O_RDONLY | O_CLOEXEC);
ASSERT_GE(fd, 0) {
if (errno == ENOENT)
SKIP(return, "Skipping test since /dev/null does not exist");
}
open_fds[i] = fd;
}
pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0) {
ret = sys_close_range(open_fds[0], open_fds[50],
CLOSE_RANGE_UNSHARE);
if (ret)
exit(EXIT_FAILURE);
for (i = 0; i <= 50; i++)
if (fcntl(open_fds[i], F_GETFL) != -1)
exit(EXIT_FAILURE);
for (i = 51; i <= 100; i++)
if (fcntl(open_fds[i], F_GETFL) == -1)
exit(EXIT_FAILURE);
/* create a couple of gaps */
close(57);
close(78);
close(81);
close(82);
close(84);
close(90);
ret = sys_close_range(open_fds[51], open_fds[92],
CLOSE_RANGE_UNSHARE);
if (ret)
exit(EXIT_FAILURE);
for (i = 51; i <= 92; i++)
if (fcntl(open_fds[i], F_GETFL) != -1)
exit(EXIT_FAILURE);
for (i = 93; i <= 100; i++)
if (fcntl(open_fds[i], F_GETFL) == -1)
exit(EXIT_FAILURE);
/* test that the kernel caps and still closes all fds */
ret = sys_close_range(open_fds[93], open_fds[99],
CLOSE_RANGE_UNSHARE);
if (ret)
exit(EXIT_FAILURE);
for (i = 93; i <= 99; i++)
if (fcntl(open_fds[i], F_GETFL) != -1)
exit(EXIT_FAILURE);
if (fcntl(open_fds[100], F_GETFL) == -1)
exit(EXIT_FAILURE);
ret = sys_close_range(open_fds[100], open_fds[100],
CLOSE_RANGE_UNSHARE);
if (ret)
exit(EXIT_FAILURE);
if (fcntl(open_fds[100], F_GETFL) != -1)
exit(EXIT_FAILURE);
exit(EXIT_SUCCESS);
}
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST(close_range_unshare_capped)
{
int i, ret, status;
pid_t pid;
int open_fds[101];
struct __clone_args args = {
.flags = CLONE_FILES,
.exit_signal = SIGCHLD,
};
for (i = 0; i < ARRAY_SIZE(open_fds); i++) {
int fd;
fd = open("/dev/null", O_RDONLY | O_CLOEXEC);
ASSERT_GE(fd, 0) {
if (errno == ENOENT)
SKIP(return, "Skipping test since /dev/null does not exist");
}
open_fds[i] = fd;
}
pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0) {
ret = sys_close_range(open_fds[0], UINT_MAX,
CLOSE_RANGE_UNSHARE);
if (ret)
exit(EXIT_FAILURE);
for (i = 0; i <= 100; i++)
if (fcntl(open_fds[i], F_GETFL) != -1)
exit(EXIT_FAILURE);
exit(EXIT_SUCCESS);
}
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST(close_range_cloexec)
{
int i, ret;
int open_fds[101];
struct rlimit rlimit;
for (i = 0; i < ARRAY_SIZE(open_fds); i++) {
int fd;
fd = open("/dev/null", O_RDONLY);
ASSERT_GE(fd, 0) {
if (errno == ENOENT)
SKIP(return, "Skipping test since /dev/null does not exist");
}
open_fds[i] = fd;
}
ret = sys_close_range(1000, 1000, CLOSE_RANGE_CLOEXEC);
if (ret < 0) {
if (errno == ENOSYS)
SKIP(return, "close_range() syscall not supported");
if (errno == EINVAL)
SKIP(return, "close_range() doesn't support CLOSE_RANGE_CLOEXEC");
}
/* Ensure the FD_CLOEXEC bit is set also with a resource limit in place. */
ASSERT_EQ(0, getrlimit(RLIMIT_NOFILE, &rlimit));
rlimit.rlim_cur = 25;
ASSERT_EQ(0, setrlimit(RLIMIT_NOFILE, &rlimit));
/* Set close-on-exec for two ranges: [0-50] and [75-100]. */
ret = sys_close_range(open_fds[0], open_fds[50], CLOSE_RANGE_CLOEXEC);
ASSERT_EQ(0, ret);
ret = sys_close_range(open_fds[75], open_fds[100], CLOSE_RANGE_CLOEXEC);
ASSERT_EQ(0, ret);
for (i = 0; i <= 50; i++) {
int flags = fcntl(open_fds[i], F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
}
for (i = 51; i <= 74; i++) {
int flags = fcntl(open_fds[i], F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
}
for (i = 75; i <= 100; i++) {
int flags = fcntl(open_fds[i], F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
}
/* Test a common pattern. */
ret = sys_close_range(3, UINT_MAX, CLOSE_RANGE_CLOEXEC);
for (i = 0; i <= 100; i++) {
int flags = fcntl(open_fds[i], F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
}
}
TEST(close_range_cloexec_unshare)
{
int i, ret;
int open_fds[101];
struct rlimit rlimit;
for (i = 0; i < ARRAY_SIZE(open_fds); i++) {
int fd;
fd = open("/dev/null", O_RDONLY);
ASSERT_GE(fd, 0) {
if (errno == ENOENT)
SKIP(return, "Skipping test since /dev/null does not exist");
}
open_fds[i] = fd;
}
ret = sys_close_range(1000, 1000, CLOSE_RANGE_CLOEXEC);
if (ret < 0) {
if (errno == ENOSYS)
SKIP(return, "close_range() syscall not supported");
if (errno == EINVAL)
SKIP(return, "close_range() doesn't support CLOSE_RANGE_CLOEXEC");
}
/* Ensure the FD_CLOEXEC bit is set also with a resource limit in place. */
ASSERT_EQ(0, getrlimit(RLIMIT_NOFILE, &rlimit));
rlimit.rlim_cur = 25;
ASSERT_EQ(0, setrlimit(RLIMIT_NOFILE, &rlimit));
/* Set close-on-exec for two ranges: [0-50] and [75-100]. */
ret = sys_close_range(open_fds[0], open_fds[50],
CLOSE_RANGE_CLOEXEC | CLOSE_RANGE_UNSHARE);
ASSERT_EQ(0, ret);
ret = sys_close_range(open_fds[75], open_fds[100],
CLOSE_RANGE_CLOEXEC | CLOSE_RANGE_UNSHARE);
ASSERT_EQ(0, ret);
for (i = 0; i <= 50; i++) {
int flags = fcntl(open_fds[i], F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
}
for (i = 51; i <= 74; i++) {
int flags = fcntl(open_fds[i], F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
}
for (i = 75; i <= 100; i++) {
int flags = fcntl(open_fds[i], F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
}
/* Test a common pattern. */
ret = sys_close_range(3, UINT_MAX,
CLOSE_RANGE_CLOEXEC | CLOSE_RANGE_UNSHARE);
for (i = 0; i <= 100; i++) {
int flags = fcntl(open_fds[i], F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
}
}
/*
* Regression test for [email protected]
*/
TEST(close_range_cloexec_syzbot)
{
int fd1, fd2, fd3, flags, ret, status;
pid_t pid;
struct __clone_args args = {
.flags = CLONE_FILES,
.exit_signal = SIGCHLD,
};
/* Create a huge gap in the fd table. */
fd1 = open("/dev/null", O_RDWR);
EXPECT_GT(fd1, 0);
fd2 = dup2(fd1, 1000);
EXPECT_GT(fd2, 0);
pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0) {
ret = sys_close_range(3, ~0U, CLOSE_RANGE_CLOEXEC);
if (ret)
exit(EXIT_FAILURE);
/*
* We now have a private file descriptor table and all
* our open fds should still be open but made
* close-on-exec.
*/
flags = fcntl(fd1, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
flags = fcntl(fd2, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
fd3 = dup2(fd1, 42);
EXPECT_GT(fd3, 0);
/*
* Duplicating the file descriptor must remove the
* FD_CLOEXEC flag.
*/
flags = fcntl(fd3, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
exit(EXIT_SUCCESS);
}
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
/*
* We had a shared file descriptor table before along with requesting
* close-on-exec so the original fds must not be close-on-exec.
*/
flags = fcntl(fd1, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
flags = fcntl(fd2, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
fd3 = dup2(fd1, 42);
EXPECT_GT(fd3, 0);
flags = fcntl(fd3, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
EXPECT_EQ(close(fd1), 0);
EXPECT_EQ(close(fd2), 0);
EXPECT_EQ(close(fd3), 0);
}
/*
* Regression test for [email protected]
*/
TEST(close_range_cloexec_unshare_syzbot)
{
int i, fd1, fd2, fd3, flags, ret, status;
pid_t pid;
struct __clone_args args = {
.flags = CLONE_FILES,
.exit_signal = SIGCHLD,
};
/*
* Create a huge gap in the fd table. When we now call
* CLOSE_RANGE_UNSHARE with a shared fd table and and with ~0U as upper
* bound the kernel will only copy up to fd1 file descriptors into the
* new fd table. If the kernel is buggy and doesn't handle
* CLOSE_RANGE_CLOEXEC correctly it will not have copied all file
* descriptors and we will oops!
*
* On a buggy kernel this should immediately oops. But let's loop just
* to be sure.
*/
fd1 = open("/dev/null", O_RDWR);
EXPECT_GT(fd1, 0);
fd2 = dup2(fd1, 1000);
EXPECT_GT(fd2, 0);
for (i = 0; i < 100; i++) {
pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0) {
ret = sys_close_range(3, ~0U, CLOSE_RANGE_UNSHARE |
CLOSE_RANGE_CLOEXEC);
if (ret)
exit(EXIT_FAILURE);
/*
* We now have a private file descriptor table and all
* our open fds should still be open but made
* close-on-exec.
*/
flags = fcntl(fd1, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
flags = fcntl(fd2, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
fd3 = dup2(fd1, 42);
EXPECT_GT(fd3, 0);
/*
* Duplicating the file descriptor must remove the
* FD_CLOEXEC flag.
*/
flags = fcntl(fd3, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
EXPECT_EQ(close(fd1), 0);
EXPECT_EQ(close(fd2), 0);
EXPECT_EQ(close(fd3), 0);
exit(EXIT_SUCCESS);
}
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
/*
* We created a private file descriptor table before along with
* requesting close-on-exec so the original fds must not be
* close-on-exec.
*/
flags = fcntl(fd1, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
flags = fcntl(fd2, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
fd3 = dup2(fd1, 42);
EXPECT_GT(fd3, 0);
flags = fcntl(fd3, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
EXPECT_EQ(close(fd1), 0);
EXPECT_EQ(close(fd2), 0);
EXPECT_EQ(close(fd3), 0);
}
TEST_HARNESS_MAIN
| linux-master | tools/testing/selftests/core/close_range_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <cap-ng.h>
#include <linux/capability.h>
#include <stdbool.h>
#include <string.h>
#include <stdio.h>
#include <sys/prctl.h>
#include <sys/auxv.h>
#include "../kselftest.h"
#ifndef PR_CAP_AMBIENT
#define PR_CAP_AMBIENT 47
# define PR_CAP_AMBIENT_IS_SET 1
# define PR_CAP_AMBIENT_RAISE 2
# define PR_CAP_AMBIENT_LOWER 3
# define PR_CAP_AMBIENT_CLEAR_ALL 4
#endif
#if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 19)
# define HAVE_GETAUXVAL
#endif
static bool bool_arg(char **argv, int i)
{
if (!strcmp(argv[i], "0"))
return false;
else if (!strcmp(argv[i], "1"))
return true;
else {
ksft_exit_fail_msg("wrong argv[%d]\n", i);
return false;
}
}
int main(int argc, char **argv)
{
const char *atsec = "";
/*
* Be careful just in case a setgid or setcapped copy of this
* helper gets out.
*/
if (argc != 5)
ksft_exit_fail_msg("wrong argc\n");
#ifdef HAVE_GETAUXVAL
if (getauxval(AT_SECURE))
atsec = " (AT_SECURE is set)";
else
atsec = " (AT_SECURE is not set)";
#endif
capng_get_caps_process();
if (capng_have_capability(CAPNG_EFFECTIVE, CAP_NET_BIND_SERVICE) != bool_arg(argv, 1)) {
ksft_print_msg("Wrong effective state%s\n", atsec);
return 1;
}
if (capng_have_capability(CAPNG_PERMITTED, CAP_NET_BIND_SERVICE) != bool_arg(argv, 2)) {
ksft_print_msg("Wrong permitted state%s\n", atsec);
return 1;
}
if (capng_have_capability(CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE) != bool_arg(argv, 3)) {
ksft_print_msg("Wrong inheritable state%s\n", atsec);
return 1;
}
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, CAP_NET_BIND_SERVICE, 0, 0, 0) != bool_arg(argv, 4)) {
ksft_print_msg("Wrong ambient state%s\n", atsec);
return 1;
}
ksft_print_msg("%s: Capabilities after execve were correct\n",
"validate_cap:");
return 0;
}
| linux-master | tools/testing/selftests/capabilities/validate_cap.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <cap-ng.h>
#include <linux/capability.h>
#include <stdbool.h>
#include <string.h>
#include <stdio.h>
#include <fcntl.h>
#include <errno.h>
#include <stdarg.h>
#include <sched.h>
#include <sys/mount.h>
#include <limits.h>
#include <libgen.h>
#include <malloc.h>
#include <sys/wait.h>
#include <sys/prctl.h>
#include <sys/stat.h>
#include "../kselftest.h"
#ifndef PR_CAP_AMBIENT
#define PR_CAP_AMBIENT 47
# define PR_CAP_AMBIENT_IS_SET 1
# define PR_CAP_AMBIENT_RAISE 2
# define PR_CAP_AMBIENT_LOWER 3
# define PR_CAP_AMBIENT_CLEAR_ALL 4
#endif
static int nerrs;
static pid_t mpid; /* main() pid is used to avoid duplicate test counts */
static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
{
char buf[4096];
int fd;
ssize_t written;
int buf_len;
buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
if (buf_len < 0)
ksft_exit_fail_msg("vsnprintf failed - %s\n", strerror(errno));
if (buf_len >= sizeof(buf))
ksft_exit_fail_msg("vsnprintf output truncated\n");
fd = open(filename, O_WRONLY);
if (fd < 0) {
if ((errno == ENOENT) && enoent_ok)
return;
ksft_exit_fail_msg("open of %s failed - %s\n",
filename, strerror(errno));
}
written = write(fd, buf, buf_len);
if (written != buf_len) {
if (written >= 0) {
ksft_exit_fail_msg("short write to %s\n", filename);
} else {
ksft_exit_fail_msg("write to %s failed - %s\n",
filename, strerror(errno));
}
}
if (close(fd) != 0) {
ksft_exit_fail_msg("close of %s failed - %s\n",
filename, strerror(errno));
}
}
static void maybe_write_file(char *filename, char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vmaybe_write_file(true, filename, fmt, ap);
va_end(ap);
}
static void write_file(char *filename, char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vmaybe_write_file(false, filename, fmt, ap);
va_end(ap);
}
static bool create_and_enter_ns(uid_t inner_uid)
{
uid_t outer_uid;
gid_t outer_gid;
int i;
bool have_outer_privilege;
outer_uid = getuid();
outer_gid = getgid();
/*
* TODO: If we're already root, we could skip creating the userns.
*/
if (unshare(CLONE_NEWNS) == 0) {
ksft_print_msg("[NOTE]\tUsing global UIDs for tests\n");
if (prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0) != 0)
ksft_exit_fail_msg("PR_SET_KEEPCAPS - %s\n",
strerror(errno));
if (setresuid(inner_uid, inner_uid, -1) != 0)
ksft_exit_fail_msg("setresuid - %s\n", strerror(errno));
// Re-enable effective caps
capng_get_caps_process();
for (i = 0; i < CAP_LAST_CAP; i++)
if (capng_have_capability(CAPNG_PERMITTED, i))
capng_update(CAPNG_ADD, CAPNG_EFFECTIVE, i);
if (capng_apply(CAPNG_SELECT_CAPS) != 0)
ksft_exit_fail_msg(
"capng_apply - %s\n", strerror(errno));
have_outer_privilege = true;
} else if (unshare(CLONE_NEWUSER | CLONE_NEWNS) == 0) {
ksft_print_msg("[NOTE]\tUsing a user namespace for tests\n");
maybe_write_file("/proc/self/setgroups", "deny");
write_file("/proc/self/uid_map", "%d %d 1", inner_uid, outer_uid);
write_file("/proc/self/gid_map", "0 %d 1", outer_gid);
have_outer_privilege = false;
} else {
ksft_exit_skip("must be root or be able to create a userns\n");
}
if (mount("none", "/", NULL, MS_REC | MS_PRIVATE, NULL) != 0)
ksft_exit_fail_msg("remount everything private - %s\n",
strerror(errno));
return have_outer_privilege;
}
static void chdir_to_tmpfs(void)
{
char cwd[PATH_MAX];
if (getcwd(cwd, sizeof(cwd)) != cwd)
ksft_exit_fail_msg("getcwd - %s\n", strerror(errno));
if (mount("private_tmp", ".", "tmpfs", 0, "mode=0777") != 0)
ksft_exit_fail_msg("mount private tmpfs - %s\n",
strerror(errno));
if (chdir(cwd) != 0)
ksft_exit_fail_msg("chdir to private tmpfs - %s\n",
strerror(errno));
}
static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
{
int from = openat(fromfd, fromname, O_RDONLY);
if (from == -1)
ksft_exit_fail_msg("open copy source - %s\n", strerror(errno));
int to = open(toname, O_CREAT | O_WRONLY | O_EXCL, 0700);
while (true) {
char buf[4096];
ssize_t sz = read(from, buf, sizeof(buf));
if (sz == 0)
break;
if (sz < 0)
ksft_exit_fail_msg("read - %s\n", strerror(errno));
if (write(to, buf, sz) != sz)
/* no short writes on tmpfs */
ksft_exit_fail_msg("write - %s\n", strerror(errno));
}
close(from);
close(to);
}
static bool fork_wait(void)
{
pid_t child = fork();
if (child == 0) {
nerrs = 0;
return true;
} else if (child > 0) {
int status;
if (waitpid(child, &status, 0) != child ||
!WIFEXITED(status)) {
ksft_print_msg("Child died\n");
nerrs++;
} else if (WEXITSTATUS(status) != 0) {
ksft_print_msg("Child failed\n");
nerrs++;
} else {
/* don't print this message for mpid */
if (getpid() != mpid)
ksft_test_result_pass("Passed\n");
}
return false;
} else {
ksft_exit_fail_msg("fork - %s\n", strerror(errno));
return false;
}
}
static void exec_other_validate_cap(const char *name,
bool eff, bool perm, bool inh, bool ambient)
{
execl(name, name, (eff ? "1" : "0"),
(perm ? "1" : "0"), (inh ? "1" : "0"), (ambient ? "1" : "0"),
NULL);
ksft_exit_fail_msg("execl - %s\n", strerror(errno));
}
static void exec_validate_cap(bool eff, bool perm, bool inh, bool ambient)
{
exec_other_validate_cap("./validate_cap", eff, perm, inh, ambient);
}
static int do_tests(int uid, const char *our_path)
{
bool have_outer_privilege = create_and_enter_ns(uid);
int ourpath_fd = open(our_path, O_RDONLY | O_DIRECTORY);
if (ourpath_fd == -1)
ksft_exit_fail_msg("open '%s' - %s\n",
our_path, strerror(errno));
chdir_to_tmpfs();
copy_fromat_to(ourpath_fd, "validate_cap", "validate_cap");
if (have_outer_privilege) {
uid_t gid = getegid();
copy_fromat_to(ourpath_fd, "validate_cap",
"validate_cap_suidroot");
if (chown("validate_cap_suidroot", 0, -1) != 0)
ksft_exit_fail_msg("chown - %s\n", strerror(errno));
if (chmod("validate_cap_suidroot", S_ISUID | 0700) != 0)
ksft_exit_fail_msg("chmod - %s\n", strerror(errno));
copy_fromat_to(ourpath_fd, "validate_cap",
"validate_cap_suidnonroot");
if (chown("validate_cap_suidnonroot", uid + 1, -1) != 0)
ksft_exit_fail_msg("chown - %s\n", strerror(errno));
if (chmod("validate_cap_suidnonroot", S_ISUID | 0700) != 0)
ksft_exit_fail_msg("chmod - %s\n", strerror(errno));
copy_fromat_to(ourpath_fd, "validate_cap",
"validate_cap_sgidroot");
if (chown("validate_cap_sgidroot", -1, 0) != 0)
ksft_exit_fail_msg("chown - %s\n", strerror(errno));
if (chmod("validate_cap_sgidroot", S_ISGID | 0710) != 0)
ksft_exit_fail_msg("chmod - %s\n", strerror(errno));
copy_fromat_to(ourpath_fd, "validate_cap",
"validate_cap_sgidnonroot");
if (chown("validate_cap_sgidnonroot", -1, gid + 1) != 0)
ksft_exit_fail_msg("chown - %s\n", strerror(errno));
if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
ksft_exit_fail_msg("chmod - %s\n", strerror(errno));
}
capng_get_caps_process();
/* Make sure that i starts out clear */
capng_update(CAPNG_DROP, CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE);
if (capng_apply(CAPNG_SELECT_CAPS) != 0)
ksft_exit_fail_msg("capng_apply - %s\n", strerror(errno));
if (uid == 0) {
ksft_print_msg("[RUN]\tRoot => ep\n");
if (fork_wait())
exec_validate_cap(true, true, false, false);
} else {
ksft_print_msg("[RUN]\tNon-root => no caps\n");
if (fork_wait())
exec_validate_cap(false, false, false, false);
}
ksft_print_msg("Check cap_ambient manipulation rules\n");
/* We should not be able to add ambient caps yet. */
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_BIND_SERVICE, 0, 0, 0) != -1 || errno != EPERM) {
if (errno == EINVAL)
ksft_test_result_fail(
"PR_CAP_AMBIENT_RAISE isn't supported\n");
else
ksft_test_result_fail(
"PR_CAP_AMBIENT_RAISE should have failed eith EPERM on a non-inheritable cap\n");
return 1;
}
ksft_test_result_pass(
"PR_CAP_AMBIENT_RAISE failed on non-inheritable cap\n");
capng_update(CAPNG_ADD, CAPNG_INHERITABLE, CAP_NET_RAW);
capng_update(CAPNG_DROP, CAPNG_PERMITTED, CAP_NET_RAW);
capng_update(CAPNG_DROP, CAPNG_EFFECTIVE, CAP_NET_RAW);
if (capng_apply(CAPNG_SELECT_CAPS) != 0)
ksft_exit_fail_msg("capng_apply - %s\n", strerror(errno));
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_RAW, 0, 0, 0) != -1 || errno != EPERM) {
ksft_test_result_fail(
"PR_CAP_AMBIENT_RAISE should have failed on a non-permitted cap\n");
return 1;
}
ksft_test_result_pass(
"PR_CAP_AMBIENT_RAISE failed on non-permitted cap\n");
capng_update(CAPNG_ADD, CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE);
if (capng_apply(CAPNG_SELECT_CAPS) != 0)
ksft_exit_fail_msg("capng_apply - %s\n", strerror(errno));
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0) {
ksft_test_result_fail(
"PR_CAP_AMBIENT_RAISE should have succeeded\n");
return 1;
}
ksft_test_result_pass("PR_CAP_AMBIENT_RAISE worked\n");
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, CAP_NET_BIND_SERVICE, 0, 0, 0) != 1) {
ksft_test_result_fail("PR_CAP_AMBIENT_IS_SET is broken\n");
return 1;
}
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0, 0, 0, 0) != 0)
ksft_exit_fail_msg("PR_CAP_AMBIENT_CLEAR_ALL - %s\n",
strerror(errno));
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0) {
ksft_test_result_fail(
"PR_CAP_AMBIENT_CLEAR_ALL didn't work\n");
return 1;
}
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0)
ksft_exit_fail_msg("PR_CAP_AMBIENT_RAISE - %s\n",
strerror(errno));
capng_update(CAPNG_DROP, CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE);
if (capng_apply(CAPNG_SELECT_CAPS) != 0)
ksft_exit_fail_msg("capng_apply - %s\n", strerror(errno));
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0) {
ksft_test_result_fail("Dropping I should have dropped A\n");
return 1;
}
ksft_test_result_pass("Basic manipulation appears to work\n");
capng_update(CAPNG_ADD, CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE);
if (capng_apply(CAPNG_SELECT_CAPS) != 0)
ksft_exit_fail_msg("capng_apply - %s\n", strerror(errno));
if (uid == 0) {
ksft_print_msg("[RUN]\tRoot +i => eip\n");
if (fork_wait())
exec_validate_cap(true, true, true, false);
} else {
ksft_print_msg("[RUN]\tNon-root +i => i\n");
if (fork_wait())
exec_validate_cap(false, false, true, false);
}
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0)
ksft_exit_fail_msg("PR_CAP_AMBIENT_RAISE - %s\n",
strerror(errno));
ksft_print_msg("[RUN]\tUID %d +ia => eipa\n", uid);
if (fork_wait())
exec_validate_cap(true, true, true, true);
/* The remaining tests need real privilege */
if (!have_outer_privilege) {
ksft_test_result_skip("SUID/SGID tests (needs privilege)\n");
goto done;
}
if (uid == 0) {
ksft_print_msg("[RUN]\tRoot +ia, suidroot => eipa\n");
if (fork_wait())
exec_other_validate_cap("./validate_cap_suidroot",
true, true, true, true);
ksft_print_msg("[RUN]\tRoot +ia, suidnonroot => ip\n");
if (fork_wait())
exec_other_validate_cap("./validate_cap_suidnonroot",
false, true, true, false);
ksft_print_msg("[RUN]\tRoot +ia, sgidroot => eipa\n");
if (fork_wait())
exec_other_validate_cap("./validate_cap_sgidroot",
true, true, true, true);
if (fork_wait()) {
ksft_print_msg(
"[RUN]\tRoot, gid != 0, +ia, sgidroot => eip\n");
if (setresgid(1, 1, 1) != 0)
ksft_exit_fail_msg("setresgid - %s\n",
strerror(errno));
exec_other_validate_cap("./validate_cap_sgidroot",
true, true, true, false);
}
ksft_print_msg("[RUN]\tRoot +ia, sgidnonroot => eip\n");
if (fork_wait())
exec_other_validate_cap("./validate_cap_sgidnonroot",
true, true, true, false);
} else {
ksft_print_msg("[RUN]\tNon-root +ia, sgidnonroot => i\n");
if (fork_wait())
exec_other_validate_cap("./validate_cap_sgidnonroot",
false, false, true, false);
if (fork_wait()) {
ksft_print_msg("[RUN]\tNon-root +ia, sgidroot => i\n");
if (setresgid(1, 1, 1) != 0)
ksft_exit_fail_msg("setresgid - %s\n",
strerror(errno));
exec_other_validate_cap("./validate_cap_sgidroot",
false, false, true, false);
}
}
done:
ksft_print_cnts();
return nerrs ? 1 : 0;
}
int main(int argc, char **argv)
{
char *tmp1, *tmp2, *our_path;
/* Find our path */
tmp1 = strdup(argv[0]);
if (!tmp1)
ksft_exit_fail_msg("strdup - %s\n", strerror(errno));
tmp2 = dirname(tmp1);
our_path = strdup(tmp2);
if (!our_path)
ksft_exit_fail_msg("strdup - %s\n", strerror(errno));
free(tmp1);
mpid = getpid();
if (fork_wait()) {
ksft_print_header();
ksft_set_plan(12);
ksft_print_msg("[RUN]\t+++ Tests with uid == 0 +++\n");
return do_tests(0, our_path);
}
ksft_print_msg("==================================================\n");
if (fork_wait()) {
ksft_print_header();
ksft_set_plan(9);
ksft_print_msg("[RUN]\t+++ Tests with uid != 0 +++\n");
return do_tests(1, our_path);
}
return nerrs ? 1 : 0;
}
| linux-master | tools/testing/selftests/capabilities/test_execve.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013, Michael Ellerman, IBM Corp.
*/
#include <errno.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <elf.h>
#include <fcntl.h>
#include <link.h>
#include <sys/stat.h>
#include "subunit.h"
#include "utils.h"
#define KILL_TIMEOUT 5
/* Setting timeout to -1 disables the alarm */
static uint64_t timeout = 120;
int run_test(int (test_function)(void), const char *name)
{
bool terminated;
int rc, status;
pid_t pid;
/* Make sure output is flushed before forking */
fflush(stdout);
pid = fork();
if (pid == 0) {
setpgid(0, 0);
exit(test_function());
} else if (pid == -1) {
perror("fork");
return 1;
}
setpgid(pid, pid);
if (timeout != -1)
/* Wake us up in timeout seconds */
alarm(timeout);
terminated = false;
wait:
rc = waitpid(pid, &status, 0);
if (rc == -1) {
if (errno != EINTR) {
printf("unknown error from waitpid\n");
return 1;
}
if (terminated) {
printf("!! force killing %s\n", name);
kill(-pid, SIGKILL);
return 1;
} else {
printf("!! killing %s\n", name);
kill(-pid, SIGTERM);
terminated = true;
alarm(KILL_TIMEOUT);
goto wait;
}
}
/* Kill anything else in the process group that is still running */
kill(-pid, SIGTERM);
if (WIFEXITED(status))
status = WEXITSTATUS(status);
else {
if (WIFSIGNALED(status))
printf("!! child died by signal %d\n", WTERMSIG(status));
else
printf("!! child died by unknown cause\n");
status = 1; /* Signal or other */
}
return status;
}
static void sig_handler(int signum)
{
/* Just wake us up from waitpid */
}
static struct sigaction sig_action = {
.sa_handler = sig_handler,
};
void test_harness_set_timeout(uint64_t time)
{
timeout = time;
}
int test_harness(int (test_function)(void), const char *name)
{
int rc;
test_start(name);
test_set_git_version(GIT_VERSION);
if (sigaction(SIGINT, &sig_action, NULL)) {
perror("sigaction (sigint)");
test_error(name);
return 1;
}
if (sigaction(SIGALRM, &sig_action, NULL)) {
perror("sigaction (sigalrm)");
test_error(name);
return 1;
}
rc = run_test(test_function, name);
if (rc == MAGIC_SKIP_RETURN_VALUE) {
test_skip(name);
/* so that skipped test is not marked as failed */
rc = 0;
} else
test_finish(name, rc);
return rc;
}
| linux-master | tools/testing/selftests/powerpc/harness.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013-2015, Michael Ellerman, IBM Corp.
*/
#define _GNU_SOURCE /* For CPU_ZERO etc. */
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <limits.h>
#include <link.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/sysinfo.h>
#include <sys/types.h>
#include <sys/utsname.h>
#include <unistd.h>
#include <asm/unistd.h>
#include <linux/limits.h>
#include "utils.h"
static char auxv[4096];
int read_file(const char *path, char *buf, size_t count, size_t *len)
{
ssize_t rc;
int fd;
int err;
char eof;
fd = open(path, O_RDONLY);
if (fd < 0)
return -errno;
rc = read(fd, buf, count);
if (rc < 0) {
err = -errno;
goto out;
}
if (len)
*len = rc;
/* Overflow if there are still more bytes after filling the buffer */
if (rc == count) {
rc = read(fd, &eof, 1);
if (rc != 0) {
err = -EOVERFLOW;
goto out;
}
}
err = 0;
out:
close(fd);
errno = -err;
return err;
}
int read_file_alloc(const char *path, char **buf, size_t *len)
{
size_t read_offset = 0;
size_t buffer_len = 0;
char *buffer = NULL;
int err;
int fd;
fd = open(path, O_RDONLY);
if (fd < 0)
return -errno;
/*
* We don't use stat & preallocate st_size because some non-files
* report 0 file size. Instead just dynamically grow the buffer
* as needed.
*/
while (1) {
ssize_t rc;
if (read_offset >= buffer_len / 2) {
char *next_buffer;
buffer_len = buffer_len ? buffer_len * 2 : 4096;
next_buffer = realloc(buffer, buffer_len);
if (!next_buffer) {
err = -errno;
goto out;
}
buffer = next_buffer;
}
rc = read(fd, buffer + read_offset, buffer_len - read_offset);
if (rc < 0) {
err = -errno;
goto out;
}
if (rc == 0)
break;
read_offset += rc;
}
*buf = buffer;
if (len)
*len = read_offset;
err = 0;
out:
close(fd);
if (err)
free(buffer);
errno = -err;
return err;
}
int write_file(const char *path, const char *buf, size_t count)
{
int fd;
int err;
ssize_t rc;
fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (fd < 0)
return -errno;
rc = write(fd, buf, count);
if (rc < 0) {
err = -errno;
goto out;
}
if (rc != count) {
err = -EOVERFLOW;
goto out;
}
err = 0;
out:
close(fd);
errno = -err;
return err;
}
int read_auxv(char *buf, ssize_t buf_size)
{
int err;
err = read_file("/proc/self/auxv", buf, buf_size, NULL);
if (err) {
perror("Error reading /proc/self/auxv");
return err;
}
return 0;
}
int read_debugfs_file(const char *subpath, char *buf, size_t count)
{
char path[PATH_MAX] = "/sys/kernel/debug/";
strncat(path, subpath, sizeof(path) - strlen(path) - 1);
return read_file(path, buf, count, NULL);
}
int write_debugfs_file(const char *subpath, const char *buf, size_t count)
{
char path[PATH_MAX] = "/sys/kernel/debug/";
strncat(path, subpath, sizeof(path) - strlen(path) - 1);
return write_file(path, buf, count);
}
static int validate_int_parse(const char *buffer, size_t count, char *end)
{
int err = 0;
/* Require at least one digit */
if (end == buffer) {
err = -EINVAL;
goto out;
}
/* Require all remaining characters be whitespace-ish */
for (; end < buffer + count; end++) {
if (*end == '\0')
break;
if (*end != ' ' && *end != '\n') {
err = -EINVAL;
goto out;
}
}
out:
errno = -err;
return err;
}
static int parse_bounded_int(const char *buffer, size_t count, intmax_t *result,
int base, intmax_t min, intmax_t max)
{
int err;
char *end;
errno = 0;
*result = strtoimax(buffer, &end, base);
if (errno)
return -errno;
err = validate_int_parse(buffer, count, end);
if (err)
goto out;
if (*result < min || *result > max)
err = -EOVERFLOW;
out:
errno = -err;
return err;
}
static int parse_bounded_uint(const char *buffer, size_t count, uintmax_t *result,
int base, uintmax_t max)
{
int err = 0;
char *end;
errno = 0;
*result = strtoumax(buffer, &end, base);
if (errno)
return -errno;
err = validate_int_parse(buffer, count, end);
if (err)
goto out;
if (*result > max)
err = -EOVERFLOW;
out:
errno = -err;
return err;
}
int parse_intmax(const char *buffer, size_t count, intmax_t *result, int base)
{
return parse_bounded_int(buffer, count, result, base, INTMAX_MIN, INTMAX_MAX);
}
int parse_uintmax(const char *buffer, size_t count, uintmax_t *result, int base)
{
return parse_bounded_uint(buffer, count, result, base, UINTMAX_MAX);
}
int parse_int(const char *buffer, size_t count, int *result, int base)
{
intmax_t parsed;
int err = parse_bounded_int(buffer, count, &parsed, base, INT_MIN, INT_MAX);
*result = parsed;
return err;
}
int parse_uint(const char *buffer, size_t count, unsigned int *result, int base)
{
uintmax_t parsed;
int err = parse_bounded_uint(buffer, count, &parsed, base, UINT_MAX);
*result = parsed;
return err;
}
int parse_long(const char *buffer, size_t count, long *result, int base)
{
intmax_t parsed;
int err = parse_bounded_int(buffer, count, &parsed, base, LONG_MIN, LONG_MAX);
*result = parsed;
return err;
}
int parse_ulong(const char *buffer, size_t count, unsigned long *result, int base)
{
uintmax_t parsed;
int err = parse_bounded_uint(buffer, count, &parsed, base, ULONG_MAX);
*result = parsed;
return err;
}
int read_long(const char *path, long *result, int base)
{
int err;
char buffer[32] = {0};
err = read_file(path, buffer, sizeof(buffer) - 1, NULL);
if (err)
return err;
return parse_long(buffer, sizeof(buffer), result, base);
}
int read_ulong(const char *path, unsigned long *result, int base)
{
int err;
char buffer[32] = {0};
err = read_file(path, buffer, sizeof(buffer) - 1, NULL);
if (err)
return err;
return parse_ulong(buffer, sizeof(buffer), result, base);
}
int write_long(const char *path, long result, int base)
{
int err;
int len;
char buffer[32];
/* Decimal only for now: no format specifier for signed hex values */
if (base != 10) {
err = -EINVAL;
goto out;
}
len = snprintf(buffer, sizeof(buffer), "%ld", result);
if (len < 0 || len >= sizeof(buffer)) {
err = -EOVERFLOW;
goto out;
}
err = write_file(path, buffer, len);
out:
errno = -err;
return err;
}
int write_ulong(const char *path, unsigned long result, int base)
{
int err;
int len;
char buffer[32];
char *fmt;
switch (base) {
case 10:
fmt = "%lu";
break;
case 16:
fmt = "%lx";
break;
default:
err = -EINVAL;
goto out;
}
len = snprintf(buffer, sizeof(buffer), fmt, result);
if (len < 0 || len >= sizeof(buffer)) {
err = -errno;
goto out;
}
err = write_file(path, buffer, len);
out:
errno = -err;
return err;
}
void *find_auxv_entry(int type, char *auxv)
{
ElfW(auxv_t) *p;
p = (ElfW(auxv_t) *)auxv;
while (p->a_type != AT_NULL) {
if (p->a_type == type)
return p;
p++;
}
return NULL;
}
void *get_auxv_entry(int type)
{
ElfW(auxv_t) *p;
if (read_auxv(auxv, sizeof(auxv)))
return NULL;
p = find_auxv_entry(type, auxv);
if (p)
return (void *)p->a_un.a_val;
return NULL;
}
int pick_online_cpu(void)
{
int ncpus, cpu = -1;
cpu_set_t *mask;
size_t size;
ncpus = get_nprocs_conf();
size = CPU_ALLOC_SIZE(ncpus);
mask = CPU_ALLOC(ncpus);
if (!mask) {
perror("malloc");
return -1;
}
CPU_ZERO_S(size, mask);
if (sched_getaffinity(0, size, mask)) {
perror("sched_getaffinity");
goto done;
}
/* We prefer a primary thread, but skip 0 */
for (cpu = 8; cpu < ncpus; cpu += 8)
if (CPU_ISSET_S(cpu, size, mask))
goto done;
/* Search for anything, but in reverse */
for (cpu = ncpus - 1; cpu >= 0; cpu--)
if (CPU_ISSET_S(cpu, size, mask))
goto done;
printf("No cpus in affinity mask?!\n");
done:
CPU_FREE(mask);
return cpu;
}
int bind_to_cpu(int cpu)
{
cpu_set_t mask;
int err;
if (cpu == BIND_CPU_ANY) {
cpu = pick_online_cpu();
if (cpu < 0)
return cpu;
}
printf("Binding to cpu %d\n", cpu);
CPU_ZERO(&mask);
CPU_SET(cpu, &mask);
err = sched_setaffinity(0, sizeof(mask), &mask);
if (err)
return err;
return cpu;
}
bool is_ppc64le(void)
{
struct utsname uts;
int rc;
errno = 0;
rc = uname(&uts);
if (rc) {
perror("uname");
return false;
}
return strcmp(uts.machine, "ppc64le") == 0;
}
int read_sysfs_file(char *fpath, char *result, size_t result_size)
{
char path[PATH_MAX] = "/sys/";
strncat(path, fpath, PATH_MAX - strlen(path) - 1);
return read_file(path, result, result_size, NULL);
}
int read_debugfs_int(const char *debugfs_file, int *result)
{
int err;
char value[16] = {0};
err = read_debugfs_file(debugfs_file, value, sizeof(value) - 1);
if (err)
return err;
return parse_int(value, sizeof(value), result, 10);
}
int write_debugfs_int(const char *debugfs_file, int result)
{
char value[16];
snprintf(value, 16, "%d", result);
return write_debugfs_file(debugfs_file, value, strlen(value));
}
static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid,
int cpu, int group_fd, unsigned long flags)
{
return syscall(__NR_perf_event_open, hw_event, pid, cpu,
group_fd, flags);
}
static void perf_event_attr_init(struct perf_event_attr *event_attr,
unsigned int type,
unsigned long config)
{
memset(event_attr, 0, sizeof(*event_attr));
event_attr->type = type;
event_attr->size = sizeof(struct perf_event_attr);
event_attr->config = config;
event_attr->read_format = PERF_FORMAT_GROUP;
event_attr->disabled = 1;
event_attr->exclude_kernel = 1;
event_attr->exclude_hv = 1;
event_attr->exclude_guest = 1;
}
int perf_event_open_counter(unsigned int type,
unsigned long config, int group_fd)
{
int fd;
struct perf_event_attr event_attr;
perf_event_attr_init(&event_attr, type, config);
fd = perf_event_open(&event_attr, 0, -1, group_fd, 0);
if (fd < 0)
perror("perf_event_open() failed");
return fd;
}
int perf_event_enable(int fd)
{
if (ioctl(fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP) == -1) {
perror("error while enabling perf events");
return -1;
}
return 0;
}
int perf_event_disable(int fd)
{
if (ioctl(fd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP) == -1) {
perror("error disabling perf events");
return -1;
}
return 0;
}
int perf_event_reset(int fd)
{
if (ioctl(fd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP) == -1) {
perror("error resetting perf events");
return -1;
}
return 0;
}
int using_hash_mmu(bool *using_hash)
{
char line[128];
FILE *f;
int rc;
f = fopen("/proc/cpuinfo", "r");
FAIL_IF(!f);
rc = 0;
while (fgets(line, sizeof(line), f) != NULL) {
if (!strcmp(line, "MMU : Hash\n") ||
!strcmp(line, "platform : Cell\n") ||
!strcmp(line, "platform : PowerMac\n")) {
*using_hash = true;
goto out;
}
if (strcmp(line, "MMU : Radix\n") == 0) {
*using_hash = false;
goto out;
}
}
rc = -1;
out:
fclose(f);
return rc;
}
struct sigaction push_signal_handler(int sig, void (*fn)(int, siginfo_t *, void *))
{
struct sigaction sa;
struct sigaction old_handler;
sa.sa_sigaction = fn;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO;
FAIL_IF_EXIT_MSG(sigaction(sig, &sa, &old_handler),
"failed to push signal handler");
return old_handler;
}
struct sigaction pop_signal_handler(int sig, struct sigaction old_handler)
{
struct sigaction popped;
FAIL_IF_EXIT_MSG(sigaction(sig, &old_handler, &popped),
"failed to pop signal handler");
return popped;
}
| linux-master | tools/testing/selftests/powerpc/utils.c |
// SPDX-License-Identifier: GPL-2.0+
#include <errno.h>
#include <setjmp.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "dexcr.h"
#include "reg.h"
#include "utils.h"
static jmp_buf generic_signal_jump_buf;
static void generic_signal_handler(int signum, siginfo_t *info, void *context)
{
longjmp(generic_signal_jump_buf, 0);
}
bool dexcr_exists(void)
{
struct sigaction old;
volatile bool exists;
old = push_signal_handler(SIGILL, generic_signal_handler);
if (setjmp(generic_signal_jump_buf))
goto out;
/*
* If the SPR is not recognised by the hardware it triggers
* a hypervisor emulation interrupt. If the kernel does not
* recognise/try to emulate it, we receive a SIGILL signal.
*
* If we do not receive a signal, assume we have the SPR or the
* kernel is trying to emulate it correctly.
*/
exists = false;
mfspr(SPRN_DEXCR_RO);
exists = true;
out:
pop_signal_handler(SIGILL, old);
return exists;
}
/*
* Just test if a bad hashchk triggers a signal, without checking
* for support or if the NPHIE aspect is enabled.
*/
bool hashchk_triggers(void)
{
struct sigaction old;
volatile bool triggers;
old = push_signal_handler(SIGILL, generic_signal_handler);
if (setjmp(generic_signal_jump_buf))
goto out;
triggers = true;
do_bad_hashchk();
triggers = false;
out:
pop_signal_handler(SIGILL, old);
return triggers;
}
unsigned int get_dexcr(enum dexcr_source source)
{
switch (source) {
case DEXCR:
return mfspr(SPRN_DEXCR_RO);
case HDEXCR:
return mfspr(SPRN_HDEXCR_RO);
case EFFECTIVE:
return mfspr(SPRN_DEXCR_RO) | mfspr(SPRN_HDEXCR_RO);
default:
FAIL_IF_EXIT_MSG(true, "bad enum dexcr_source");
}
}
void await_child_success(pid_t pid)
{
int wstatus;
FAIL_IF_EXIT_MSG(pid == -1, "fork failed");
FAIL_IF_EXIT_MSG(waitpid(pid, &wstatus, 0) == -1, "wait failed");
FAIL_IF_EXIT_MSG(!WIFEXITED(wstatus), "child did not exit cleanly");
FAIL_IF_EXIT_MSG(WEXITSTATUS(wstatus) != 0, "child exit error");
}
/*
* Perform a hashst instruction. The following components determine the result
*
* 1. The LR value (any register technically)
* 2. The SP value (also any register, but it must be a valid address)
* 3. A secret key managed by the kernel
*
* The result is stored to the address held in SP.
*/
void hashst(unsigned long lr, void *sp)
{
asm volatile ("addi 31, %0, 0;" /* set r31 (pretend LR) to lr */
"addi 30, %1, 8;" /* set r30 (pretend SP) to sp + 8 */
PPC_RAW_HASHST(31, -8, 30) /* compute hash into stack location */
: : "r" (lr), "r" (sp) : "r31", "r30", "memory");
}
/*
* Perform a hashchk instruction. A hash is computed as per hashst(),
* however the result is not stored to memory. Instead the existing
* value is read and compared against the computed hash.
*
* If they match, execution continues.
* If they differ, an interrupt triggers.
*/
void hashchk(unsigned long lr, void *sp)
{
asm volatile ("addi 31, %0, 0;" /* set r31 (pretend LR) to lr */
"addi 30, %1, 8;" /* set r30 (pretend SP) to sp + 8 */
PPC_RAW_HASHCHK(31, -8, 30) /* check hash at stack location */
: : "r" (lr), "r" (sp) : "r31", "r30", "memory");
}
void do_bad_hashchk(void)
{
unsigned long hash = 0;
hashst(0, &hash);
hash += 1;
hashchk(0, &hash);
}
| linux-master | tools/testing/selftests/powerpc/dexcr/dexcr.c |
// SPDX-License-Identifier: GPL-2.0+
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <sched.h>
#include <setjmp.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <unistd.h>
#include "dexcr.h"
#include "utils.h"
static int require_nphie(void)
{
SKIP_IF_MSG(!dexcr_exists(), "DEXCR not supported");
SKIP_IF_MSG(!(get_dexcr(EFFECTIVE) & DEXCR_PR_NPHIE),
"DEXCR[NPHIE] not enabled");
return 0;
}
static jmp_buf hashchk_detected_buf;
static const char *hashchk_failure_msg;
static void hashchk_handler(int signum, siginfo_t *info, void *context)
{
if (signum != SIGILL)
hashchk_failure_msg = "wrong signal received";
else if (info->si_code != ILL_ILLOPN)
hashchk_failure_msg = "wrong signal code received";
longjmp(hashchk_detected_buf, 0);
}
/*
* Check that hashchk triggers when DEXCR[NPHIE] is enabled
* and is detected as such by the kernel exception handler
*/
static int hashchk_detected_test(void)
{
struct sigaction old;
int err;
err = require_nphie();
if (err)
return err;
old = push_signal_handler(SIGILL, hashchk_handler);
if (setjmp(hashchk_detected_buf))
goto out;
hashchk_failure_msg = NULL;
do_bad_hashchk();
hashchk_failure_msg = "hashchk failed to trigger";
out:
pop_signal_handler(SIGILL, old);
FAIL_IF_MSG(hashchk_failure_msg, hashchk_failure_msg);
return 0;
}
#define HASH_COUNT 8
static unsigned long hash_values[HASH_COUNT + 1];
static void fill_hash_values(void)
{
for (unsigned long i = 0; i < HASH_COUNT; i++)
hashst(i, &hash_values[i]);
/* Used to ensure the checks uses the same addresses as the hashes */
hash_values[HASH_COUNT] = (unsigned long)&hash_values;
}
static unsigned int count_hash_values_matches(void)
{
unsigned long matches = 0;
for (unsigned long i = 0; i < HASH_COUNT; i++) {
unsigned long orig_hash = hash_values[i];
hash_values[i] = 0;
hashst(i, &hash_values[i]);
if (hash_values[i] == orig_hash)
matches++;
}
return matches;
}
static int hashchk_exec_child(void)
{
ssize_t count;
fill_hash_values();
count = write(STDOUT_FILENO, hash_values, sizeof(hash_values));
return count == sizeof(hash_values) ? 0 : EOVERFLOW;
}
static char *hashchk_exec_child_args[] = { "hashchk_exec_child", NULL };
/*
* Check that new programs get different keys so a malicious process
* can't recreate a victim's hash values.
*/
static int hashchk_exec_random_key_test(void)
{
pid_t pid;
int err;
int pipefd[2];
err = require_nphie();
if (err)
return err;
FAIL_IF_MSG(pipe(pipefd), "failed to create pipe");
pid = fork();
if (pid == 0) {
if (dup2(pipefd[1], STDOUT_FILENO) == -1)
_exit(errno);
execve("/proc/self/exe", hashchk_exec_child_args, NULL);
_exit(errno);
}
await_child_success(pid);
FAIL_IF_MSG(read(pipefd[0], hash_values, sizeof(hash_values)) != sizeof(hash_values),
"missing expected child output");
/* Verify the child used the same hash_values address */
FAIL_IF_EXIT_MSG(hash_values[HASH_COUNT] != (unsigned long)&hash_values,
"bad address check");
/* If all hashes are the same it means (most likely) same key */
FAIL_IF_MSG(count_hash_values_matches() == HASH_COUNT, "shared key detected");
return 0;
}
/*
* Check that forks share the same key so that existing hash values
* remain valid.
*/
static int hashchk_fork_share_key_test(void)
{
pid_t pid;
int err;
err = require_nphie();
if (err)
return err;
fill_hash_values();
pid = fork();
if (pid == 0) {
if (count_hash_values_matches() != HASH_COUNT)
_exit(1);
_exit(0);
}
await_child_success(pid);
return 0;
}
#define STACK_SIZE (1024 * 1024)
static int hashchk_clone_child_fn(void *args)
{
fill_hash_values();
return 0;
}
/*
* Check that threads share the same key so that existing hash values
* remain valid.
*/
static int hashchk_clone_share_key_test(void)
{
void *child_stack;
pid_t pid;
int err;
err = require_nphie();
if (err)
return err;
child_stack = mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
FAIL_IF_MSG(child_stack == MAP_FAILED, "failed to map child stack");
pid = clone(hashchk_clone_child_fn, child_stack + STACK_SIZE,
CLONE_VM | SIGCHLD, NULL);
await_child_success(pid);
FAIL_IF_MSG(count_hash_values_matches() != HASH_COUNT,
"different key detected");
return 0;
}
int main(int argc, char *argv[])
{
int err = 0;
if (argc >= 1 && !strcmp(argv[0], hashchk_exec_child_args[0]))
return hashchk_exec_child();
err |= test_harness(hashchk_detected_test, "hashchk_detected");
err |= test_harness(hashchk_exec_random_key_test, "hashchk_exec_random_key");
err |= test_harness(hashchk_fork_share_key_test, "hashchk_fork_share_key");
err |= test_harness(hashchk_clone_share_key_test, "hashchk_clone_share_key");
return err;
}
| linux-master | tools/testing/selftests/powerpc/dexcr/hashchk_test.c |
// SPDX-License-Identifier: GPL-2.0+
#include <errno.h>
#include <stddef.h>
#include <stdio.h>
#include <string.h>
#include "dexcr.h"
#include "utils.h"
static unsigned int dexcr;
static unsigned int hdexcr;
static unsigned int effective;
struct dexcr_aspect {
const char *name;
const char *desc;
unsigned int index;
};
static const struct dexcr_aspect aspects[] = {
{
.name = "SBHE",
.desc = "Speculative branch hint enable",
.index = 0,
},
{
.name = "IBRTPD",
.desc = "Indirect branch recurrent target prediction disable",
.index = 3,
},
{
.name = "SRAPD",
.desc = "Subroutine return address prediction disable",
.index = 4,
},
{
.name = "NPHIE",
.desc = "Non-privileged hash instruction enable",
.index = 5,
},
{
.name = "PHIE",
.desc = "Privileged hash instruction enable",
.index = 6,
},
};
static void print_list(const char *list[], size_t len)
{
for (size_t i = 0; i < len; i++) {
printf("%s", list[i]);
if (i + 1 < len)
printf(", ");
}
}
static void print_dexcr(char *name, unsigned int bits)
{
const char *enabled_aspects[ARRAY_SIZE(aspects) + 1] = {NULL};
size_t j = 0;
printf("%s: %08x", name, bits);
if (bits == 0) {
printf("\n");
return;
}
for (size_t i = 0; i < ARRAY_SIZE(aspects); i++) {
unsigned int mask = DEXCR_PR_BIT(aspects[i].index);
if (bits & mask) {
enabled_aspects[j++] = aspects[i].name;
bits &= ~mask;
}
}
if (bits)
enabled_aspects[j++] = "unknown";
printf(" (");
print_list(enabled_aspects, j);
printf(")\n");
}
static void print_aspect(const struct dexcr_aspect *aspect)
{
const char *attributes[8] = {NULL};
size_t j = 0;
unsigned long mask;
mask = DEXCR_PR_BIT(aspect->index);
if (dexcr & mask)
attributes[j++] = "set";
if (hdexcr & mask)
attributes[j++] = "set (hypervisor)";
if (!(effective & mask))
attributes[j++] = "clear";
printf("%12s %c (%d): ", aspect->name, effective & mask ? '*' : ' ', aspect->index);
print_list(attributes, j);
printf(" \t(%s)\n", aspect->desc);
}
int main(int argc, char *argv[])
{
if (!dexcr_exists()) {
printf("DEXCR not detected on this hardware\n");
return 1;
}
dexcr = get_dexcr(DEXCR);
hdexcr = get_dexcr(HDEXCR);
effective = dexcr | hdexcr;
print_dexcr(" DEXCR", dexcr);
print_dexcr(" HDEXCR", hdexcr);
print_dexcr("Effective", effective);
printf("\n");
for (size_t i = 0; i < ARRAY_SIZE(aspects); i++)
print_aspect(&aspects[i]);
printf("\n");
if (effective & DEXCR_PR_NPHIE) {
printf("DEXCR[NPHIE] enabled: hashst/hashchk ");
if (hashchk_triggers())
printf("working\n");
else
printf("failed to trigger\n");
} else {
printf("DEXCR[NPHIE] disabled: hashst/hashchk ");
if (hashchk_triggers())
printf("unexpectedly triggered\n");
else
printf("ignored\n");
}
return 0;
}
| linux-master | tools/testing/selftests/powerpc/dexcr/lsdexcr.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2018 IBM Corporation.
*/
#define __SANE_USERSPACE_TYPES__
#include <sys/types.h>
#include <stdint.h>
#include <malloc.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include "utils.h"
#include "flush_utils.h"
int rfi_flush_test(void)
{
char *p;
int repetitions = 10;
int fd, passes = 0, iter, rc = 0;
struct perf_event_read v;
__u64 l1d_misses_total = 0;
unsigned long iterations = 100000, zero_size = 24 * 1024;
unsigned long l1d_misses_expected;
int rfi_flush_orig, rfi_flush;
int have_entry_flush, entry_flush_orig;
SKIP_IF(geteuid() != 0);
// The PMU event we use only works on Power7 or later
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
if (read_debugfs_int("powerpc/rfi_flush", &rfi_flush_orig) < 0) {
perror("Unable to read powerpc/rfi_flush debugfs file");
SKIP_IF(1);
}
if (read_debugfs_int("powerpc/entry_flush", &entry_flush_orig) < 0) {
have_entry_flush = 0;
} else {
have_entry_flush = 1;
if (entry_flush_orig != 0) {
if (write_debugfs_int("powerpc/entry_flush", 0) < 0) {
perror("error writing to powerpc/entry_flush debugfs file");
return 1;
}
}
}
rfi_flush = rfi_flush_orig;
fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
FAIL_IF(fd < 0);
p = (char *)memalign(zero_size, CACHELINE_SIZE);
FAIL_IF(perf_event_enable(fd));
// disable L1 prefetching
set_dscr(1);
iter = repetitions;
/*
* We expect to see l1d miss for each cacheline access when rfi_flush
* is set. Allow a small variation on this.
*/
l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2);
again:
FAIL_IF(perf_event_reset(fd));
syscall_loop(p, iterations, zero_size);
FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v));
if (rfi_flush && v.l1d_misses >= l1d_misses_expected)
passes++;
else if (!rfi_flush && v.l1d_misses < (l1d_misses_expected / 2))
passes++;
l1d_misses_total += v.l1d_misses;
while (--iter)
goto again;
if (passes < repetitions) {
printf("FAIL (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d failures]\n",
rfi_flush, l1d_misses_total, rfi_flush ? '<' : '>',
rfi_flush ? repetitions * l1d_misses_expected :
repetitions * l1d_misses_expected / 2,
repetitions - passes, repetitions);
rc = 1;
} else
printf("PASS (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d pass]\n",
rfi_flush, l1d_misses_total, rfi_flush ? '>' : '<',
rfi_flush ? repetitions * l1d_misses_expected :
repetitions * l1d_misses_expected / 2,
passes, repetitions);
if (rfi_flush == rfi_flush_orig) {
rfi_flush = !rfi_flush_orig;
if (write_debugfs_int("powerpc/rfi_flush", rfi_flush) < 0) {
perror("error writing to powerpc/rfi_flush debugfs file");
return 1;
}
iter = repetitions;
l1d_misses_total = 0;
passes = 0;
goto again;
}
perf_event_disable(fd);
close(fd);
set_dscr(0);
if (write_debugfs_int("powerpc/rfi_flush", rfi_flush_orig) < 0) {
perror("unable to restore original value of powerpc/rfi_flush debugfs file");
return 1;
}
if (have_entry_flush) {
if (write_debugfs_int("powerpc/entry_flush", entry_flush_orig) < 0) {
perror("unable to restore original value of powerpc/entry_flush "
"debugfs file");
return 1;
}
}
return rc;
}
int main(int argc, char *argv[])
{
return test_harness(rfi_flush_test, "rfi_flush_test");
}
| linux-master | tools/testing/selftests/powerpc/security/rfi_flush.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2018 IBM Corporation.
*/
#define __SANE_USERSPACE_TYPES__
#include <sys/types.h>
#include <stdint.h>
#include <malloc.h>
#include <unistd.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include "utils.h"
#include "flush_utils.h"
int entry_flush_test(void)
{
char *p;
int repetitions = 10;
int fd, passes = 0, iter, rc = 0;
struct perf_event_read v;
__u64 l1d_misses_total = 0;
unsigned long iterations = 100000, zero_size = 24 * 1024;
unsigned long l1d_misses_expected;
int rfi_flush_orig;
int entry_flush, entry_flush_orig;
SKIP_IF(geteuid() != 0);
// The PMU event we use only works on Power7 or later
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
if (read_debugfs_int("powerpc/rfi_flush", &rfi_flush_orig) < 0) {
perror("Unable to read powerpc/rfi_flush debugfs file");
SKIP_IF(1);
}
if (read_debugfs_int("powerpc/entry_flush", &entry_flush_orig) < 0) {
perror("Unable to read powerpc/entry_flush debugfs file");
SKIP_IF(1);
}
if (rfi_flush_orig != 0) {
if (write_debugfs_int("powerpc/rfi_flush", 0) < 0) {
perror("error writing to powerpc/rfi_flush debugfs file");
FAIL_IF(1);
}
}
entry_flush = entry_flush_orig;
fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
FAIL_IF(fd < 0);
p = (char *)memalign(zero_size, CACHELINE_SIZE);
FAIL_IF(perf_event_enable(fd));
// disable L1 prefetching
set_dscr(1);
iter = repetitions;
/*
* We expect to see l1d miss for each cacheline access when entry_flush
* is set. Allow a small variation on this.
*/
l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2);
again:
FAIL_IF(perf_event_reset(fd));
syscall_loop(p, iterations, zero_size);
FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v));
if (entry_flush && v.l1d_misses >= l1d_misses_expected)
passes++;
else if (!entry_flush && v.l1d_misses < (l1d_misses_expected / 2))
passes++;
l1d_misses_total += v.l1d_misses;
while (--iter)
goto again;
if (passes < repetitions) {
printf("FAIL (L1D misses with entry_flush=%d: %llu %c %lu) [%d/%d failures]\n",
entry_flush, l1d_misses_total, entry_flush ? '<' : '>',
entry_flush ? repetitions * l1d_misses_expected :
repetitions * l1d_misses_expected / 2,
repetitions - passes, repetitions);
rc = 1;
} else {
printf("PASS (L1D misses with entry_flush=%d: %llu %c %lu) [%d/%d pass]\n",
entry_flush, l1d_misses_total, entry_flush ? '>' : '<',
entry_flush ? repetitions * l1d_misses_expected :
repetitions * l1d_misses_expected / 2,
passes, repetitions);
}
if (entry_flush == entry_flush_orig) {
entry_flush = !entry_flush_orig;
if (write_debugfs_int("powerpc/entry_flush", entry_flush) < 0) {
perror("error writing to powerpc/entry_flush debugfs file");
return 1;
}
iter = repetitions;
l1d_misses_total = 0;
passes = 0;
goto again;
}
perf_event_disable(fd);
close(fd);
set_dscr(0);
if (write_debugfs_int("powerpc/rfi_flush", rfi_flush_orig) < 0) {
perror("unable to restore original value of powerpc/rfi_flush debugfs file");
return 1;
}
if (write_debugfs_int("powerpc/entry_flush", entry_flush_orig) < 0) {
perror("unable to restore original value of powerpc/entry_flush debugfs file");
return 1;
}
return rc;
}
int main(int argc, char *argv[])
{
return test_harness(entry_flush_test, "entry_flush_test");
}
| linux-master | tools/testing/selftests/powerpc/security/entry_flush.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2018 IBM Corporation.
*/
#define __SANE_USERSPACE_TYPES__
#include <sys/types.h>
#include <stdint.h>
#include <unistd.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <sys/utsname.h>
#include "reg.h"
#include "utils.h"
#include "flush_utils.h"
static inline __u64 load(void *addr)
{
__u64 tmp;
asm volatile("ld %0,0(%1)" : "=r"(tmp) : "b"(addr));
return tmp;
}
void syscall_loop(char *p, unsigned long iterations,
unsigned long zero_size)
{
for (unsigned long i = 0; i < iterations; i++) {
for (unsigned long j = 0; j < zero_size; j += CACHELINE_SIZE)
load(p + j);
getppid();
}
}
void syscall_loop_uaccess(char *p, unsigned long iterations,
unsigned long zero_size)
{
struct utsname utsname;
for (unsigned long i = 0; i < iterations; i++) {
for (unsigned long j = 0; j < zero_size; j += CACHELINE_SIZE)
load(p + j);
uname(&utsname);
}
}
static void sigill_handler(int signr, siginfo_t *info, void *unused)
{
static int warned;
ucontext_t *ctx = (ucontext_t *)unused;
unsigned long *pc = &UCONTEXT_NIA(ctx);
/* mtspr 3,RS to check for move to DSCR below */
if ((*((unsigned int *)*pc) & 0xfc1fffff) == 0x7c0303a6) {
if (!warned++)
printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n");
*pc += 4;
} else {
printf("SIGILL at %p\n", pc);
abort();
}
}
void set_dscr(unsigned long val)
{
static int init;
struct sigaction sa;
if (!init) {
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = sigill_handler;
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGILL, &sa, NULL))
perror("sigill_handler");
init = 1;
}
mtspr(SPRN_DSCR, val);
}
| linux-master | tools/testing/selftests/powerpc/security/flush_utils.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2018-2019 IBM Corporation.
*/
#define __SANE_USERSPACE_TYPES__
#include <sys/types.h>
#include <stdint.h>
#include <malloc.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <sys/prctl.h>
#include "utils.h"
#include "../pmu/event.h"
extern void pattern_cache_loop(void);
extern void indirect_branch_loop(void);
static int do_count_loop(struct event *events, bool is_p9, s64 *miss_percent)
{
u64 pred, mpred;
prctl(PR_TASK_PERF_EVENTS_ENABLE);
if (is_p9)
pattern_cache_loop();
else
indirect_branch_loop();
prctl(PR_TASK_PERF_EVENTS_DISABLE);
event_read(&events[0]);
event_read(&events[1]);
// We could scale all the events by running/enabled but we're lazy
// As long as the PMU is uncontended they should all run
FAIL_IF(events[0].result.running != events[0].result.enabled);
FAIL_IF(events[1].result.running != events[1].result.enabled);
pred = events[0].result.value;
mpred = events[1].result.value;
if (is_p9) {
event_read(&events[2]);
event_read(&events[3]);
FAIL_IF(events[2].result.running != events[2].result.enabled);
FAIL_IF(events[3].result.running != events[3].result.enabled);
pred += events[2].result.value;
mpred += events[3].result.value;
}
*miss_percent = 100 * mpred / pred;
return 0;
}
static void setup_event(struct event *e, u64 config, char *name)
{
event_init_named(e, config, name);
e->attr.disabled = 1;
e->attr.exclude_kernel = 1;
e->attr.exclude_hv = 1;
e->attr.exclude_idle = 1;
}
enum spectre_v2_state {
VULNERABLE = 0,
UNKNOWN = 1, // Works with FAIL_IF()
NOT_AFFECTED,
BRANCH_SERIALISATION,
COUNT_CACHE_DISABLED,
COUNT_CACHE_FLUSH_SW,
COUNT_CACHE_FLUSH_HW,
BTB_FLUSH,
};
static enum spectre_v2_state get_sysfs_state(void)
{
enum spectre_v2_state state = UNKNOWN;
char buf[256];
int len;
memset(buf, 0, sizeof(buf));
FAIL_IF(read_sysfs_file("devices/system/cpu/vulnerabilities/spectre_v2", buf, sizeof(buf)));
// Make sure it's NULL terminated
buf[sizeof(buf) - 1] = '\0';
// Trim the trailing newline
len = strlen(buf);
FAIL_IF(len < 1);
buf[len - 1] = '\0';
printf("sysfs reports: '%s'\n", buf);
// Order matters
if (strstr(buf, "Vulnerable"))
state = VULNERABLE;
else if (strstr(buf, "Not affected"))
state = NOT_AFFECTED;
else if (strstr(buf, "Indirect branch serialisation (kernel only)"))
state = BRANCH_SERIALISATION;
else if (strstr(buf, "Indirect branch cache disabled"))
state = COUNT_CACHE_DISABLED;
else if (strstr(buf, "Software count cache flush (hardware accelerated)"))
state = COUNT_CACHE_FLUSH_HW;
else if (strstr(buf, "Software count cache flush"))
state = COUNT_CACHE_FLUSH_SW;
else if (strstr(buf, "Branch predictor state flush"))
state = BTB_FLUSH;
return state;
}
#define PM_BR_PRED_CCACHE 0x040a4 // P8 + P9
#define PM_BR_MPRED_CCACHE 0x040ac // P8 + P9
#define PM_BR_PRED_PCACHE 0x048a0 // P9 only
#define PM_BR_MPRED_PCACHE 0x048b0 // P9 only
int spectre_v2_test(void)
{
enum spectre_v2_state state;
struct event events[4];
s64 miss_percent;
bool is_p9;
// The PMU events we use only work on Power8 or later
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
state = get_sysfs_state();
if (state == UNKNOWN) {
printf("Error: couldn't determine spectre_v2 mitigation state?\n");
return -1;
}
memset(events, 0, sizeof(events));
setup_event(&events[0], PM_BR_PRED_CCACHE, "PM_BR_PRED_CCACHE");
setup_event(&events[1], PM_BR_MPRED_CCACHE, "PM_BR_MPRED_CCACHE");
FAIL_IF(event_open(&events[0]));
FAIL_IF(event_open_with_group(&events[1], events[0].fd) == -1);
is_p9 = ((mfspr(SPRN_PVR) >> 16) & 0xFFFF) == 0x4e;
if (is_p9) {
// Count pattern cache too
setup_event(&events[2], PM_BR_PRED_PCACHE, "PM_BR_PRED_PCACHE");
setup_event(&events[3], PM_BR_MPRED_PCACHE, "PM_BR_MPRED_PCACHE");
FAIL_IF(event_open_with_group(&events[2], events[0].fd) == -1);
FAIL_IF(event_open_with_group(&events[3], events[0].fd) == -1);
}
FAIL_IF(do_count_loop(events, is_p9, &miss_percent));
event_report_justified(&events[0], 18, 10);
event_report_justified(&events[1], 18, 10);
event_close(&events[0]);
event_close(&events[1]);
if (is_p9) {
event_report_justified(&events[2], 18, 10);
event_report_justified(&events[3], 18, 10);
event_close(&events[2]);
event_close(&events[3]);
}
printf("Miss percent %lld %%\n", miss_percent);
switch (state) {
case VULNERABLE:
case NOT_AFFECTED:
case COUNT_CACHE_FLUSH_SW:
case COUNT_CACHE_FLUSH_HW:
// These should all not affect userspace branch prediction
if (miss_percent > 15) {
if (miss_percent > 95) {
/*
* Such a mismatch may be caused by a system being unaware
* the count cache is disabled. This may be to enable
* guest migration between hosts with different settings.
* Return skip code to avoid detecting this as an error.
* We are not vulnerable and reporting otherwise, so
* missing such a mismatch is safe.
*/
printf("Branch misses > 95%% unexpected in this configuration.\n");
printf("Count cache likely disabled without Linux knowing.\n");
if (state == COUNT_CACHE_FLUSH_SW)
printf("WARNING: Kernel performing unnecessary flushes.\n");
return 4;
}
printf("Branch misses > 15%% unexpected in this configuration!\n");
printf("Possible mismatch between reported & actual mitigation\n");
return 1;
}
break;
case BRANCH_SERIALISATION:
// This seems to affect userspace branch prediction a bit?
if (miss_percent > 25) {
printf("Branch misses > 25%% unexpected in this configuration!\n");
printf("Possible mismatch between reported & actual mitigation\n");
return 1;
}
break;
case COUNT_CACHE_DISABLED:
if (miss_percent < 95) {
printf("Branch misses < 95%% unexpected in this configuration!\n");
printf("Possible mismatch between reported & actual mitigation\n");
return 1;
}
break;
case UNKNOWN:
case BTB_FLUSH:
printf("Not sure!\n");
return 1;
}
printf("OK - Measured branch prediction rates match reported spectre v2 mitigation.\n");
return 0;
}
int main(int argc, char *argv[])
{
return test_harness(spectre_v2_test, "spectre_v2");
}
| linux-master | tools/testing/selftests/powerpc/security/spectre_v2.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2018 IBM Corporation.
* Copyright 2020 Canonical Ltd.
*/
#define __SANE_USERSPACE_TYPES__
#include <sys/types.h>
#include <stdint.h>
#include <malloc.h>
#include <unistd.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include "utils.h"
#include "flush_utils.h"
int uaccess_flush_test(void)
{
char *p;
int repetitions = 10;
int fd, passes = 0, iter, rc = 0;
struct perf_event_read v;
__u64 l1d_misses_total = 0;
unsigned long iterations = 100000, zero_size = 24 * 1024;
unsigned long l1d_misses_expected;
int rfi_flush_orig;
int entry_flush_orig;
int uaccess_flush, uaccess_flush_orig;
SKIP_IF(geteuid() != 0);
// The PMU event we use only works on Power7 or later
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
if (read_debugfs_int("powerpc/rfi_flush", &rfi_flush_orig) < 0) {
perror("Unable to read powerpc/rfi_flush debugfs file");
SKIP_IF(1);
}
if (read_debugfs_int("powerpc/entry_flush", &entry_flush_orig) < 0) {
perror("Unable to read powerpc/entry_flush debugfs file");
SKIP_IF(1);
}
if (read_debugfs_int("powerpc/uaccess_flush", &uaccess_flush_orig) < 0) {
perror("Unable to read powerpc/entry_flush debugfs file");
SKIP_IF(1);
}
if (rfi_flush_orig != 0) {
if (write_debugfs_int("powerpc/rfi_flush", 0) < 0) {
perror("error writing to powerpc/rfi_flush debugfs file");
FAIL_IF(1);
}
}
if (entry_flush_orig != 0) {
if (write_debugfs_int("powerpc/entry_flush", 0) < 0) {
perror("error writing to powerpc/entry_flush debugfs file");
FAIL_IF(1);
}
}
uaccess_flush = uaccess_flush_orig;
fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
FAIL_IF(fd < 0);
p = (char *)memalign(zero_size, CACHELINE_SIZE);
FAIL_IF(perf_event_enable(fd));
// disable L1 prefetching
set_dscr(1);
iter = repetitions;
/*
* We expect to see l1d miss for each cacheline access when entry_flush
* is set. Allow a small variation on this.
*/
l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2);
again:
FAIL_IF(perf_event_reset(fd));
syscall_loop_uaccess(p, iterations, zero_size);
FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v));
if (uaccess_flush && v.l1d_misses >= l1d_misses_expected)
passes++;
else if (!uaccess_flush && v.l1d_misses < (l1d_misses_expected / 2))
passes++;
l1d_misses_total += v.l1d_misses;
while (--iter)
goto again;
if (passes < repetitions) {
printf("FAIL (L1D misses with uaccess_flush=%d: %llu %c %lu) [%d/%d failures]\n",
uaccess_flush, l1d_misses_total, uaccess_flush ? '<' : '>',
uaccess_flush ? repetitions * l1d_misses_expected :
repetitions * l1d_misses_expected / 2,
repetitions - passes, repetitions);
rc = 1;
} else {
printf("PASS (L1D misses with uaccess_flush=%d: %llu %c %lu) [%d/%d pass]\n",
uaccess_flush, l1d_misses_total, uaccess_flush ? '>' : '<',
uaccess_flush ? repetitions * l1d_misses_expected :
repetitions * l1d_misses_expected / 2,
passes, repetitions);
}
if (uaccess_flush == uaccess_flush_orig) {
uaccess_flush = !uaccess_flush_orig;
if (write_debugfs_int("powerpc/uaccess_flush", uaccess_flush) < 0) {
perror("error writing to powerpc/uaccess_flush debugfs file");
return 1;
}
iter = repetitions;
l1d_misses_total = 0;
passes = 0;
goto again;
}
perf_event_disable(fd);
close(fd);
set_dscr(0);
if (write_debugfs_int("powerpc/rfi_flush", rfi_flush_orig) < 0) {
perror("unable to restore original value of powerpc/rfi_flush debugfs file");
return 1;
}
if (write_debugfs_int("powerpc/entry_flush", entry_flush_orig) < 0) {
perror("unable to restore original value of powerpc/entry_flush debugfs file");
return 1;
}
if (write_debugfs_int("powerpc/uaccess_flush", uaccess_flush_orig) < 0) {
perror("unable to restore original value of powerpc/uaccess_flush debugfs file");
return 1;
}
return rc;
}
int main(int argc, char *argv[])
{
return test_harness(uaccess_flush_test, "uaccess_flush_test");
}
| linux-master | tools/testing/selftests/powerpc/security/uaccess_flush.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for TAR, PPR, DSCR registers in the TM Suspend context
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*/
#include "ptrace.h"
#include "tm.h"
#include "ptrace-tar.h"
int shm_id;
int *cptr, *pptr;
__attribute__((used)) void wait_parent(void)
{
cptr[2] = 1;
while (!cptr[1])
asm volatile("" : : : "memory");
}
void tm_spd_tar(void)
{
unsigned long result, texasr;
unsigned long regs[3];
int ret;
cptr = (int *)shmat(shm_id, NULL, 0);
trans:
cptr[2] = 0;
asm __volatile__(
"li 4, %[tar_1];"
"mtspr %[sprn_tar], 4;" /* TAR_1 */
"li 4, %[dscr_1];"
"mtspr %[sprn_dscr], 4;" /* DSCR_1 */
"or 31,31,31;" /* PPR_1*/
"1: ;"
"tbegin.;"
"beq 2f;"
"li 4, %[tar_2];"
"mtspr %[sprn_tar], 4;" /* TAR_2 */
"li 4, %[dscr_2];"
"mtspr %[sprn_dscr], 4;" /* DSCR_2 */
"or 1,1,1;" /* PPR_2 */
"tsuspend.;"
"li 4, %[tar_3];"
"mtspr %[sprn_tar], 4;" /* TAR_3 */
"li 4, %[dscr_3];"
"mtspr %[sprn_dscr], 4;" /* DSCR_3 */
"or 6,6,6;" /* PPR_3 */
"bl wait_parent;"
"tresume.;"
"tend.;"
"li 0, 0;"
"ori %[res], 0, 0;"
"b 3f;"
/* Transaction abort handler */
"2: ;"
"li 0, 1;"
"ori %[res], 0, 0;"
"mfspr %[texasr], %[sprn_texasr];"
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
: [sprn_dscr]"i"(SPRN_DSCR),
[sprn_tar]"i"(SPRN_TAR), [sprn_ppr]"i"(SPRN_PPR),
[sprn_texasr]"i"(SPRN_TEXASR), [tar_1]"i"(TAR_1),
[dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2), [dscr_2]"i"(DSCR_2),
[tar_3]"i"(TAR_3), [dscr_3]"i"(DSCR_3)
: "memory", "r0", "r3", "r4", "r5", "r6", "lr"
);
/* TM failed, analyse */
if (result) {
if (!cptr[0])
goto trans;
regs[0] = mfspr(SPRN_TAR);
regs[1] = mfspr(SPRN_PPR);
regs[2] = mfspr(SPRN_DSCR);
shmdt(&cptr);
printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
user_read, regs[0], regs[1], regs[2]);
ret = validate_tar_registers(regs, TAR_4, PPR_4, DSCR_4);
if (ret)
exit(1);
exit(0);
}
shmdt(&cptr);
exit(1);
}
int trace_tm_spd_tar(pid_t child)
{
unsigned long regs[3];
FAIL_IF(start_trace(child));
FAIL_IF(show_tar_registers(child, regs));
printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
ptrace_read_running, regs[0], regs[1], regs[2]);
FAIL_IF(validate_tar_registers(regs, TAR_3, PPR_3, DSCR_3));
FAIL_IF(show_tm_checkpointed_state(child, regs));
printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
ptrace_read_ckpt, regs[0], regs[1], regs[2]);
FAIL_IF(validate_tar_registers(regs, TAR_1, PPR_1, DSCR_1));
FAIL_IF(write_ckpt_tar_registers(child, TAR_4, PPR_4, DSCR_4));
printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
ptrace_write_ckpt, TAR_4, PPR_4, DSCR_4);
pptr[0] = 1;
pptr[1] = 1;
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
int ptrace_tm_spd_tar(void)
{
pid_t pid;
int ret, status;
SKIP_IF_MSG(!have_htm(), "Don't have transactional memory");
SKIP_IF_MSG(htm_is_synthetic(), "Transactional memory is synthetic");
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
pid = fork();
if (pid == 0)
tm_spd_tar();
pptr = (int *)shmat(shm_id, NULL, 0);
pptr[0] = 0;
pptr[1] = 0;
if (pid) {
while (!pptr[2])
asm volatile("" : : : "memory");
ret = trace_tm_spd_tar(pid);
if (ret) {
kill(pid, SIGTERM);
shmdt(&pptr);
shmctl(shm_id, IPC_RMID, NULL);
return TEST_FAIL;
}
shmdt(&pptr);
ret = wait(&status);
shmctl(shm_id, IPC_RMID, NULL);
if (ret != pid) {
printf("Child's exit status not captured\n");
return TEST_FAIL;
}
return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
TEST_PASS;
}
return TEST_PASS;
}
int main(int argc, char *argv[])
{
return test_harness(ptrace_tm_spd_tar, "ptrace_tm_spd_tar");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Ptrace test for Memory Protection Key registers
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
* Copyright (C) 2018 IBM Corporation.
*/
#include <limits.h>
#include <linux/kernel.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <fcntl.h>
#include <unistd.h>
#include "ptrace.h"
#include "child.h"
#ifndef __NR_pkey_alloc
#define __NR_pkey_alloc 384
#endif
#ifndef __NR_pkey_free
#define __NR_pkey_free 385
#endif
#ifndef NT_PPC_PKEY
#define NT_PPC_PKEY 0x110
#endif
#ifndef PKEY_DISABLE_EXECUTE
#define PKEY_DISABLE_EXECUTE 0x4
#endif
#define AMR_BITS_PER_PKEY 2
#define PKEY_REG_BITS (sizeof(u64) * 8)
#define pkeyshift(pkey) (PKEY_REG_BITS - ((pkey + 1) * AMR_BITS_PER_PKEY))
#define CORE_FILE_LIMIT (5 * 1024 * 1024) /* 5 MB should be enough */
static const char core_pattern_file[] = "/proc/sys/kernel/core_pattern";
static const char user_write[] = "[User Write (Running)]";
static const char core_read_running[] = "[Core Read (Running)]";
/* Information shared between the parent and the child. */
struct shared_info {
struct child_sync child_sync;
/* AMR value the parent expects to read in the core file. */
unsigned long amr;
/* IAMR value the parent expects to read in the core file. */
unsigned long iamr;
/* UAMOR value the parent expects to read in the core file. */
unsigned long uamor;
/* When the child crashed. */
time_t core_time;
};
static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights)
{
return syscall(__NR_pkey_alloc, flags, init_access_rights);
}
static int sys_pkey_free(int pkey)
{
return syscall(__NR_pkey_free, pkey);
}
static int increase_core_file_limit(void)
{
struct rlimit rlim;
int ret;
ret = getrlimit(RLIMIT_CORE, &rlim);
FAIL_IF(ret);
if (rlim.rlim_cur != RLIM_INFINITY && rlim.rlim_cur < CORE_FILE_LIMIT) {
rlim.rlim_cur = CORE_FILE_LIMIT;
if (rlim.rlim_max != RLIM_INFINITY &&
rlim.rlim_max < CORE_FILE_LIMIT)
rlim.rlim_max = CORE_FILE_LIMIT;
ret = setrlimit(RLIMIT_CORE, &rlim);
FAIL_IF(ret);
}
ret = getrlimit(RLIMIT_FSIZE, &rlim);
FAIL_IF(ret);
if (rlim.rlim_cur != RLIM_INFINITY && rlim.rlim_cur < CORE_FILE_LIMIT) {
rlim.rlim_cur = CORE_FILE_LIMIT;
if (rlim.rlim_max != RLIM_INFINITY &&
rlim.rlim_max < CORE_FILE_LIMIT)
rlim.rlim_max = CORE_FILE_LIMIT;
ret = setrlimit(RLIMIT_FSIZE, &rlim);
FAIL_IF(ret);
}
return TEST_PASS;
}
static int child(struct shared_info *info)
{
bool disable_execute = true;
int pkey1, pkey2, pkey3;
int *ptr, ret;
/* Wait until parent fills out the initial register values. */
ret = wait_parent(&info->child_sync);
if (ret)
return ret;
ret = increase_core_file_limit();
FAIL_IF(ret);
/* Get some pkeys so that we can change their bits in the AMR. */
pkey1 = sys_pkey_alloc(0, PKEY_DISABLE_EXECUTE);
if (pkey1 < 0) {
pkey1 = sys_pkey_alloc(0, 0);
FAIL_IF(pkey1 < 0);
disable_execute = false;
}
pkey2 = sys_pkey_alloc(0, 0);
FAIL_IF(pkey2 < 0);
pkey3 = sys_pkey_alloc(0, 0);
FAIL_IF(pkey3 < 0);
info->amr |= 3ul << pkeyshift(pkey1) | 2ul << pkeyshift(pkey2);
if (disable_execute)
info->iamr |= 1ul << pkeyshift(pkey1);
else
info->iamr &= ~(1ul << pkeyshift(pkey1));
info->iamr &= ~(1ul << pkeyshift(pkey2) | 1ul << pkeyshift(pkey3));
info->uamor |= 3ul << pkeyshift(pkey1) | 3ul << pkeyshift(pkey2);
printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n",
user_write, info->amr, pkey1, pkey2, pkey3);
set_amr(info->amr);
/*
* We won't use pkey3. This tests whether the kernel restores the UAMOR
* permissions after a key is freed.
*/
sys_pkey_free(pkey3);
info->core_time = time(NULL);
/* Crash. */
ptr = 0;
*ptr = 1;
/* Shouldn't get here. */
FAIL_IF(true);
return TEST_FAIL;
}
/* Return file size if filename exists and pass sanity check, or zero if not. */
static off_t try_core_file(const char *filename, struct shared_info *info,
pid_t pid)
{
struct stat buf;
int ret;
ret = stat(filename, &buf);
if (ret == -1)
return TEST_FAIL;
/* Make sure we're not using a stale core file. */
return buf.st_mtime >= info->core_time ? buf.st_size : TEST_FAIL;
}
static Elf64_Nhdr *next_note(Elf64_Nhdr *nhdr)
{
return (void *) nhdr + sizeof(*nhdr) +
__ALIGN_KERNEL(nhdr->n_namesz, 4) +
__ALIGN_KERNEL(nhdr->n_descsz, 4);
}
static int check_core_file(struct shared_info *info, Elf64_Ehdr *ehdr,
off_t core_size)
{
unsigned long *regs;
Elf64_Phdr *phdr;
Elf64_Nhdr *nhdr;
size_t phdr_size;
void *p = ehdr, *note;
int ret;
ret = memcmp(ehdr->e_ident, ELFMAG, SELFMAG);
FAIL_IF(ret);
FAIL_IF(ehdr->e_type != ET_CORE);
FAIL_IF(ehdr->e_machine != EM_PPC64);
FAIL_IF(ehdr->e_phoff == 0 || ehdr->e_phnum == 0);
/*
* e_phnum is at most 65535 so calculating the size of the
* program header cannot overflow.
*/
phdr_size = sizeof(*phdr) * ehdr->e_phnum;
/* Sanity check the program header table location. */
FAIL_IF(ehdr->e_phoff + phdr_size < ehdr->e_phoff);
FAIL_IF(ehdr->e_phoff + phdr_size > core_size);
/* Find the PT_NOTE segment. */
for (phdr = p + ehdr->e_phoff;
(void *) phdr < p + ehdr->e_phoff + phdr_size;
phdr += ehdr->e_phentsize)
if (phdr->p_type == PT_NOTE)
break;
FAIL_IF((void *) phdr >= p + ehdr->e_phoff + phdr_size);
/* Find the NT_PPC_PKEY note. */
for (nhdr = p + phdr->p_offset;
(void *) nhdr < p + phdr->p_offset + phdr->p_filesz;
nhdr = next_note(nhdr))
if (nhdr->n_type == NT_PPC_PKEY)
break;
FAIL_IF((void *) nhdr >= p + phdr->p_offset + phdr->p_filesz);
FAIL_IF(nhdr->n_descsz == 0);
p = nhdr;
note = p + sizeof(*nhdr) + __ALIGN_KERNEL(nhdr->n_namesz, 4);
regs = (unsigned long *) note;
printf("%-30s AMR: %016lx IAMR: %016lx UAMOR: %016lx\n",
core_read_running, regs[0], regs[1], regs[2]);
FAIL_IF(regs[0] != info->amr);
FAIL_IF(regs[1] != info->iamr);
FAIL_IF(regs[2] != info->uamor);
return TEST_PASS;
}
static int parent(struct shared_info *info, pid_t pid)
{
char *filenames, *filename[3];
int fd, i, ret, status;
unsigned long regs[3];
off_t core_size;
void *core;
/*
* Get the initial values for AMR, IAMR and UAMOR and communicate them
* to the child.
*/
ret = ptrace_read_regs(pid, NT_PPC_PKEY, regs, 3);
PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync, "PKEYs not supported");
PARENT_FAIL_IF(ret, &info->child_sync);
info->amr = regs[0];
info->iamr = regs[1];
info->uamor = regs[2];
/* Wake up child so that it can set itself up. */
ret = prod_child(&info->child_sync);
PARENT_FAIL_IF(ret, &info->child_sync);
ret = wait(&status);
if (ret != pid) {
printf("Child's exit status not captured\n");
return TEST_FAIL;
} else if (!WIFSIGNALED(status) || !WCOREDUMP(status)) {
printf("Child didn't dump core\n");
return TEST_FAIL;
}
/* Construct array of core file names to try. */
filename[0] = filenames = malloc(PATH_MAX);
if (!filenames) {
perror("Error allocating memory");
return TEST_FAIL;
}
ret = snprintf(filename[0], PATH_MAX, "core-pkey.%d", pid);
if (ret < 0 || ret >= PATH_MAX) {
ret = TEST_FAIL;
goto out;
}
filename[1] = filename[0] + ret + 1;
ret = snprintf(filename[1], PATH_MAX - ret - 1, "core.%d", pid);
if (ret < 0 || ret >= PATH_MAX - ret - 1) {
ret = TEST_FAIL;
goto out;
}
filename[2] = "core";
for (i = 0; i < 3; i++) {
core_size = try_core_file(filename[i], info, pid);
if (core_size != TEST_FAIL)
break;
}
if (i == 3) {
printf("Couldn't find core file\n");
ret = TEST_FAIL;
goto out;
}
fd = open(filename[i], O_RDONLY);
if (fd == -1) {
perror("Error opening core file");
ret = TEST_FAIL;
goto out;
}
core = mmap(NULL, core_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (core == (void *) -1) {
perror("Error mmapping core file");
ret = TEST_FAIL;
goto out;
}
ret = check_core_file(info, core, core_size);
munmap(core, core_size);
close(fd);
unlink(filename[i]);
out:
free(filenames);
return ret;
}
static int write_core_pattern(const char *core_pattern)
{
int err;
err = write_file(core_pattern_file, core_pattern, strlen(core_pattern));
if (err) {
SKIP_IF_MSG(err == -EPERM, "Try with root privileges");
perror("Error writing to core_pattern file");
return TEST_FAIL;
}
return TEST_PASS;
}
static int setup_core_pattern(char **core_pattern_, bool *changed_)
{
char *core_pattern;
size_t len;
int ret;
core_pattern = malloc(PATH_MAX);
if (!core_pattern) {
perror("Error allocating memory");
return TEST_FAIL;
}
ret = read_file(core_pattern_file, core_pattern, PATH_MAX - 1, &len);
if (ret) {
perror("Error reading core_pattern file");
ret = TEST_FAIL;
goto out;
}
core_pattern[len] = '\0';
/* Check whether we can predict the name of the core file. */
if (!strcmp(core_pattern, "core") || !strcmp(core_pattern, "core.%p"))
*changed_ = false;
else {
ret = write_core_pattern("core-pkey.%p");
if (ret)
goto out;
*changed_ = true;
}
*core_pattern_ = core_pattern;
ret = TEST_PASS;
out:
if (ret)
free(core_pattern);
return ret;
}
static int core_pkey(void)
{
char *core_pattern;
bool changed_core_pattern;
struct shared_info *info;
int shm_id;
int ret;
pid_t pid;
ret = setup_core_pattern(&core_pattern, &changed_core_pattern);
if (ret)
return ret;
shm_id = shmget(IPC_PRIVATE, sizeof(*info), 0777 | IPC_CREAT);
info = shmat(shm_id, NULL, 0);
ret = init_child_sync(&info->child_sync);
if (ret)
return ret;
pid = fork();
if (pid < 0) {
perror("fork() failed");
ret = TEST_FAIL;
} else if (pid == 0)
ret = child(info);
else
ret = parent(info, pid);
shmdt(info);
if (pid) {
destroy_child_sync(&info->child_sync);
shmctl(shm_id, IPC_RMID, NULL);
if (changed_core_pattern)
write_core_pattern(core_pattern);
}
free(core_pattern);
return ret;
}
int main(int argc, char *argv[])
{
return test_harness(core_pkey, "core_pkey");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/core-pkey.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for VMX/VSX registers
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*/
#include "ptrace.h"
#include "ptrace-vsx.h"
/* Tracer and Tracee Shared Data */
int shm_id;
int *cptr, *pptr;
unsigned long fp_load[VEC_MAX];
unsigned long fp_load_new[VEC_MAX];
unsigned long fp_store[VEC_MAX];
void vsx(void)
{
int ret;
cptr = (int *)shmat(shm_id, NULL, 0);
loadvsx(fp_load, 0);
cptr[1] = 1;
while (!cptr[0])
asm volatile("" : : : "memory");
shmdt((void *) cptr);
storevsx(fp_store, 0);
ret = compare_vsx_vmx(fp_store, fp_load_new);
if (ret)
exit(1);
exit(0);
}
int trace_vsx(pid_t child)
{
unsigned long vsx[VSX_MAX];
unsigned long vmx[VMX_MAX + 2][2];
FAIL_IF(start_trace(child));
FAIL_IF(show_vsx(child, vsx));
FAIL_IF(validate_vsx(vsx, fp_load));
FAIL_IF(show_vmx(child, vmx));
FAIL_IF(validate_vmx(vmx, fp_load));
memset(vsx, 0, sizeof(vsx));
memset(vmx, 0, sizeof(vmx));
load_vsx_vmx(fp_load_new, vsx, vmx);
FAIL_IF(write_vsx(child, vsx));
FAIL_IF(write_vmx(child, vmx));
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
int ptrace_vsx(void)
{
pid_t pid;
int ret, status, i;
SKIP_IF_MSG(!have_hwcap(PPC_FEATURE_HAS_VSX), "Don't have VSX");
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
for (i = 0; i < VEC_MAX; i++)
fp_load[i] = i + rand();
for (i = 0; i < VEC_MAX; i++)
fp_load_new[i] = i + 2 * rand();
pid = fork();
if (pid < 0) {
perror("fork() failed");
return TEST_FAIL;
}
if (pid == 0)
vsx();
if (pid) {
pptr = (int *)shmat(shm_id, NULL, 0);
while (!pptr[1])
asm volatile("" : : : "memory");
ret = trace_vsx(pid);
if (ret) {
kill(pid, SIGTERM);
shmdt((void *)pptr);
shmctl(shm_id, IPC_RMID, NULL);
return TEST_FAIL;
}
pptr[0] = 1;
shmdt((void *)pptr);
ret = wait(&status);
shmctl(shm_id, IPC_RMID, NULL);
if (ret != pid) {
printf("Child's exit status not captured\n");
return TEST_FAIL;
}
return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
TEST_PASS;
}
return TEST_PASS;
}
int main(int argc, char *argv[])
{
return test_harness(ptrace_vsx, "ptrace_vsx");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for VMX/VSX registers in the TM Suspend context
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*/
#include "ptrace.h"
#include "tm.h"
#include "ptrace-vsx.h"
int shm_id;
int *cptr, *pptr;
unsigned long fp_load[VEC_MAX];
unsigned long fp_load_new[VEC_MAX];
unsigned long fp_store[VEC_MAX];
unsigned long fp_load_ckpt[VEC_MAX];
unsigned long fp_load_ckpt_new[VEC_MAX];
__attribute__((used)) void load_vsx(void)
{
loadvsx(fp_load, 0);
}
__attribute__((used)) void load_vsx_new(void)
{
loadvsx(fp_load_new, 0);
}
__attribute__((used)) void load_vsx_ckpt(void)
{
loadvsx(fp_load_ckpt, 0);
}
__attribute__((used)) void wait_parent(void)
{
cptr[2] = 1;
while (!cptr[1])
asm volatile("" : : : "memory");
}
void tm_spd_vsx(void)
{
unsigned long result, texasr;
int ret;
cptr = (int *)shmat(shm_id, NULL, 0);
trans:
cptr[2] = 0;
asm __volatile__(
"bl load_vsx_ckpt;"
"1: ;"
"tbegin.;"
"beq 2f;"
"bl load_vsx_new;"
"tsuspend.;"
"bl load_vsx;"
"bl wait_parent;"
"tresume.;"
"tend.;"
"li 0, 0;"
"ori %[res], 0, 0;"
"b 3f;"
"2: ;"
"li 0, 1;"
"ori %[res], 0, 0;"
"mfspr %[texasr], %[sprn_texasr];"
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
: [sprn_texasr] "i" (SPRN_TEXASR)
: "memory", "r0", "r3", "r4",
"r7", "r8", "r9", "r10", "r11", "lr"
);
if (result) {
if (!cptr[0])
goto trans;
shmdt((void *)cptr);
storevsx(fp_store, 0);
ret = compare_vsx_vmx(fp_store, fp_load_ckpt_new);
if (ret)
exit(1);
exit(0);
}
shmdt((void *)cptr);
exit(1);
}
int trace_tm_spd_vsx(pid_t child)
{
unsigned long vsx[VSX_MAX];
unsigned long vmx[VMX_MAX + 2][2];
FAIL_IF(start_trace(child));
FAIL_IF(show_vsx(child, vsx));
FAIL_IF(validate_vsx(vsx, fp_load));
FAIL_IF(show_vmx(child, vmx));
FAIL_IF(validate_vmx(vmx, fp_load));
FAIL_IF(show_vsx_ckpt(child, vsx));
FAIL_IF(validate_vsx(vsx, fp_load_ckpt));
FAIL_IF(show_vmx_ckpt(child, vmx));
FAIL_IF(validate_vmx(vmx, fp_load_ckpt));
memset(vsx, 0, sizeof(vsx));
memset(vmx, 0, sizeof(vmx));
load_vsx_vmx(fp_load_ckpt_new, vsx, vmx);
FAIL_IF(write_vsx_ckpt(child, vsx));
FAIL_IF(write_vmx_ckpt(child, vmx));
pptr[0] = 1;
pptr[1] = 1;
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
int ptrace_tm_spd_vsx(void)
{
pid_t pid;
int ret, status, i;
SKIP_IF_MSG(!have_htm(), "Don't have transactional memory");
SKIP_IF_MSG(htm_is_synthetic(), "Transactional memory is synthetic");
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
for (i = 0; i < 128; i++) {
fp_load[i] = 1 + rand();
fp_load_new[i] = 1 + 2 * rand();
fp_load_ckpt[i] = 1 + 3 * rand();
fp_load_ckpt_new[i] = 1 + 4 * rand();
}
pid = fork();
if (pid < 0) {
perror("fork() failed");
return TEST_FAIL;
}
if (pid == 0)
tm_spd_vsx();
if (pid) {
pptr = (int *)shmat(shm_id, NULL, 0);
while (!pptr[2])
asm volatile("" : : : "memory");
ret = trace_tm_spd_vsx(pid);
if (ret) {
kill(pid, SIGKILL);
shmdt((void *)pptr);
shmctl(shm_id, IPC_RMID, NULL);
return TEST_FAIL;
}
shmdt((void *)pptr);
ret = wait(&status);
shmctl(shm_id, IPC_RMID, NULL);
if (ret != pid) {
printf("Child's exit status not captured\n");
return TEST_FAIL;
}
return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
TEST_PASS;
}
return TEST_PASS;
}
int main(int argc, char *argv[])
{
return test_harness(ptrace_tm_spd_vsx, "ptrace_tm_spd_vsx");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c |
// SPDX-License-Identifier: GPL-2.0+
#include <asm/unistd.h>
#include <linux/hw_breakpoint.h>
#include <linux/ptrace.h>
#include <memory.h>
#include <stdlib.h>
#include <sys/wait.h>
#include "utils.h"
/*
* Child subroutine that performs a load on the address, then traps
*/
void same_watch_addr_child(unsigned long *addr);
/* Address of the ld instruction in same_watch_addr_child() */
extern char same_watch_addr_load[];
/* Address of the end trap instruction in same_watch_addr_child() */
extern char same_watch_addr_trap[];
/*
* Child subroutine that performs a load on the first address, then a load on
* the second address (with no instructions separating this from the first
* load), then traps.
*/
void perf_then_ptrace_child(unsigned long *first_addr, unsigned long *second_addr);
/* Address of the first ld instruction in perf_then_ptrace_child() */
extern char perf_then_ptrace_load1[];
/* Address of the second ld instruction in perf_then_ptrace_child() */
extern char perf_then_ptrace_load2[];
/* Address of the end trap instruction in perf_then_ptrace_child() */
extern char perf_then_ptrace_trap[];
static inline long sys_ptrace(long request, pid_t pid, unsigned long addr, unsigned long data)
{
return syscall(__NR_ptrace, request, pid, addr, data);
}
static long ptrace_traceme(void)
{
return sys_ptrace(PTRACE_TRACEME, 0, 0, 0);
}
static long ptrace_getregs(pid_t pid, struct pt_regs *result)
{
return sys_ptrace(PTRACE_GETREGS, pid, 0, (unsigned long)result);
}
static long ptrace_setregs(pid_t pid, struct pt_regs *result)
{
return sys_ptrace(PTRACE_SETREGS, pid, 0, (unsigned long)result);
}
static long ptrace_cont(pid_t pid, long signal)
{
return sys_ptrace(PTRACE_CONT, pid, 0, signal);
}
static long ptrace_singlestep(pid_t pid, long signal)
{
return sys_ptrace(PTRACE_SINGLESTEP, pid, 0, signal);
}
static long ppc_ptrace_gethwdbginfo(pid_t pid, struct ppc_debug_info *dbginfo)
{
return sys_ptrace(PPC_PTRACE_GETHWDBGINFO, pid, 0, (unsigned long)dbginfo);
}
static long ppc_ptrace_sethwdbg(pid_t pid, struct ppc_hw_breakpoint *bp_info)
{
return sys_ptrace(PPC_PTRACE_SETHWDEBUG, pid, 0, (unsigned long)bp_info);
}
static long ppc_ptrace_delhwdbg(pid_t pid, int bp_id)
{
return sys_ptrace(PPC_PTRACE_DELHWDEBUG, pid, 0L, bp_id);
}
static long ptrace_getreg_pc(pid_t pid, void **pc)
{
struct pt_regs regs;
long err;
err = ptrace_getregs(pid, ®s);
if (err)
return err;
*pc = (void *)regs.nip;
return 0;
}
static long ptrace_setreg_pc(pid_t pid, void *pc)
{
struct pt_regs regs;
long err;
err = ptrace_getregs(pid, ®s);
if (err)
return err;
regs.nip = (unsigned long)pc;
err = ptrace_setregs(pid, ®s);
if (err)
return err;
return 0;
}
static int perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu,
int group_fd, unsigned long flags)
{
return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
}
static void perf_user_event_attr_set(struct perf_event_attr *attr, void *addr, u64 len)
{
memset(attr, 0, sizeof(struct perf_event_attr));
attr->type = PERF_TYPE_BREAKPOINT;
attr->size = sizeof(struct perf_event_attr);
attr->bp_type = HW_BREAKPOINT_R;
attr->bp_addr = (u64)addr;
attr->bp_len = len;
attr->exclude_kernel = 1;
attr->exclude_hv = 1;
}
static int perf_watchpoint_open(pid_t child_pid, void *addr, u64 len)
{
struct perf_event_attr attr;
perf_user_event_attr_set(&attr, addr, len);
return perf_event_open(&attr, child_pid, -1, -1, 0);
}
static int perf_read_counter(int perf_fd, u64 *count)
{
/*
* A perf counter is retrieved by the read() syscall. It contains
* the current count as 8 bytes that are interpreted as a u64
*/
ssize_t len = read(perf_fd, count, sizeof(*count));
if (len != sizeof(*count))
return -1;
return 0;
}
static void ppc_ptrace_init_breakpoint(struct ppc_hw_breakpoint *info,
int type, void *addr, int len)
{
info->version = 1;
info->trigger_type = type;
info->condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
info->addr = (u64)addr;
info->addr2 = (u64)addr + len;
info->condition_value = 0;
if (!len)
info->addr_mode = PPC_BREAKPOINT_MODE_EXACT;
else
info->addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
}
/*
* Checks if we can place at least 2 watchpoints on the child process
*/
static int check_watchpoints(pid_t pid)
{
struct ppc_debug_info dbginfo;
FAIL_IF_MSG(ppc_ptrace_gethwdbginfo(pid, &dbginfo), "PPC_PTRACE_GETHWDBGINFO failed");
SKIP_IF_MSG(dbginfo.num_data_bps <= 1, "Not enough data watchpoints (need at least 2)");
return 0;
}
/*
* Wrapper around a plain fork() call that sets up the child for
* ptrace-ing. Both the parent and child return from this, though
* the child is stopped until ptrace_cont(pid) is run by the parent.
*/
static int ptrace_fork_child(pid_t *pid)
{
int status;
*pid = fork();
if (*pid < 0)
FAIL_IF_MSG(1, "Failed to fork child");
if (!*pid) {
FAIL_IF_EXIT_MSG(ptrace_traceme(), "PTRACE_TRACEME failed");
FAIL_IF_EXIT_MSG(raise(SIGSTOP), "Child failed to raise SIGSTOP");
} else {
/* Synchronise on child SIGSTOP */
FAIL_IF_MSG(waitpid(*pid, &status, 0) == -1, "Failed to wait for child");
FAIL_IF_MSG(!WIFSTOPPED(status), "Child is not stopped");
}
return 0;
}
/*
* Tests the interaction between ptrace and perf watching the same data.
*
* We expect ptrace to take 'priority', as it is has before-execute
* semantics.
*
* The perf counter should not be incremented yet because perf has after-execute
* semantics. E.g., if ptrace changes the child PC, we don't even execute the
* instruction at all.
*
* When the child is stopped for ptrace, we test both continue and single step.
* Both should increment the perf counter. We also test changing the PC somewhere
* different and stepping, which should not increment the perf counter.
*/
int same_watch_addr_test(void)
{
struct ppc_hw_breakpoint bp_info; /* ptrace breakpoint info */
int bp_id; /* Breakpoint handle of ptrace watchpoint */
int perf_fd; /* File descriptor of perf performance counter */
u64 perf_count; /* Most recently fetched perf performance counter value */
pid_t pid; /* PID of child process */
void *pc; /* Most recently fetched child PC value */
int status; /* Stop status of child after waitpid */
unsigned long value; /* Dummy value to be read/written to by child */
int err;
err = ptrace_fork_child(&pid);
if (err)
return err;
if (!pid) {
same_watch_addr_child(&value);
exit(1);
}
err = check_watchpoints(pid);
if (err)
return err;
/* Place a perf watchpoint counter on value */
perf_fd = perf_watchpoint_open(pid, &value, sizeof(value));
FAIL_IF_MSG(perf_fd < 0, "Failed to open perf performance counter");
/* Place a ptrace watchpoint on value */
ppc_ptrace_init_breakpoint(&bp_info, PPC_BREAKPOINT_TRIGGER_READ, &value, sizeof(value));
bp_id = ppc_ptrace_sethwdbg(pid, &bp_info);
FAIL_IF_MSG(bp_id < 0, "Failed to set ptrace watchpoint");
/* Let the child run. It should stop on the ptrace watchpoint */
FAIL_IF_MSG(ptrace_cont(pid, 0), "Failed to continue child");
FAIL_IF_MSG(waitpid(pid, &status, 0) == -1, "Failed to wait for child");
FAIL_IF_MSG(!WIFSTOPPED(status), "Child is not stopped");
FAIL_IF_MSG(ptrace_getreg_pc(pid, &pc), "Failed to get child PC");
FAIL_IF_MSG(pc != same_watch_addr_load, "Child did not stop on load instruction");
/*
* We stopped before executing the load, so perf should not have
* recorded any events yet
*/
FAIL_IF_MSG(perf_read_counter(perf_fd, &perf_count), "Failed to read perf counter");
FAIL_IF_MSG(perf_count != 0, "perf recorded unexpected event");
/* Single stepping over the load should increment the perf counter */
FAIL_IF_MSG(ptrace_singlestep(pid, 0), "Failed to single step child");
FAIL_IF_MSG(waitpid(pid, &status, 0) == -1, "Failed to wait for child");
FAIL_IF_MSG(!WIFSTOPPED(status), "Child is not stopped");
FAIL_IF_MSG(ptrace_getreg_pc(pid, &pc), "Failed to get child PC");
FAIL_IF_MSG(pc != same_watch_addr_load + 4, "Failed to single step load instruction");
FAIL_IF_MSG(perf_read_counter(perf_fd, &perf_count), "Failed to read perf counter");
FAIL_IF_MSG(perf_count != 1, "perf counter did not increment");
/*
* Set up a ptrace watchpoint on the value again and trigger it.
* The perf counter should not have incremented because we do not
* execute the load yet.
*/
FAIL_IF_MSG(ppc_ptrace_delhwdbg(pid, bp_id), "Failed to remove old ptrace watchpoint");
bp_id = ppc_ptrace_sethwdbg(pid, &bp_info);
FAIL_IF_MSG(bp_id < 0, "Failed to set ptrace watchpoint");
FAIL_IF_MSG(ptrace_setreg_pc(pid, same_watch_addr_load), "Failed to set child PC");
FAIL_IF_MSG(ptrace_cont(pid, 0), "Failed to continue child");
FAIL_IF_MSG(waitpid(pid, &status, 0) == -1, "Failed to wait for child");
FAIL_IF_MSG(!WIFSTOPPED(status), "Child is not stopped");
FAIL_IF_MSG(ptrace_getreg_pc(pid, &pc), "Failed to get child PC");
FAIL_IF_MSG(pc != same_watch_addr_load, "Child did not stop on load trap");
FAIL_IF_MSG(perf_read_counter(perf_fd, &perf_count), "Failed to read perf counter");
FAIL_IF_MSG(perf_count != 1, "perf counter should not have changed");
/* Continuing over the load should increment the perf counter */
FAIL_IF_MSG(ptrace_cont(pid, 0), "Failed to continue child");
FAIL_IF_MSG(waitpid(pid, &status, 0) == -1, "Failed to wait for child");
FAIL_IF_MSG(!WIFSTOPPED(status), "Child is not stopped");
FAIL_IF_MSG(ptrace_getreg_pc(pid, &pc), "Failed to get child PC");
FAIL_IF_MSG(pc != same_watch_addr_trap, "Child did not stop on end trap");
FAIL_IF_MSG(perf_read_counter(perf_fd, &perf_count), "Failed to read perf counter");
FAIL_IF_MSG(perf_count != 2, "perf counter did not increment");
/*
* If we set the child PC back to the load instruction, then continue,
* we should reach the end trap (because ptrace is one-shot) and have
* another perf event.
*/
FAIL_IF_MSG(ptrace_setreg_pc(pid, same_watch_addr_load), "Failed to set child PC");
FAIL_IF_MSG(ptrace_cont(pid, 0), "Failed to continue child");
FAIL_IF_MSG(waitpid(pid, &status, 0) == -1, "Failed to wait for child");
FAIL_IF_MSG(!WIFSTOPPED(status), "Child is not stopped");
FAIL_IF_MSG(ptrace_getreg_pc(pid, &pc), "Failed to get child PC");
FAIL_IF_MSG(pc != same_watch_addr_trap, "Child did not stop on end trap");
FAIL_IF_MSG(perf_read_counter(perf_fd, &perf_count), "Failed to read perf counter");
FAIL_IF_MSG(perf_count != 3, "perf counter did not increment");
/*
* If we set the child PC back to the load instruction, set a ptrace
* watchpoint on the load, then continue, we should immediately get
* the ptrace trap without incrementing the perf counter
*/
FAIL_IF_MSG(ppc_ptrace_delhwdbg(pid, bp_id), "Failed to remove old ptrace watchpoint");
bp_id = ppc_ptrace_sethwdbg(pid, &bp_info);
FAIL_IF_MSG(bp_id < 0, "Failed to set ptrace watchpoint");
FAIL_IF_MSG(ptrace_setreg_pc(pid, same_watch_addr_load), "Failed to set child PC");
FAIL_IF_MSG(ptrace_cont(pid, 0), "Failed to continue child");
FAIL_IF_MSG(waitpid(pid, &status, 0) == -1, "Failed to wait for child");
FAIL_IF_MSG(!WIFSTOPPED(status), "Child is not stopped");
FAIL_IF_MSG(ptrace_getreg_pc(pid, &pc), "Failed to get child PC");
FAIL_IF_MSG(pc != same_watch_addr_load, "Child did not stop on load instruction");
FAIL_IF_MSG(perf_read_counter(perf_fd, &perf_count), "Failed to read perf counter");
FAIL_IF_MSG(perf_count != 3, "perf counter should not have changed");
/*
* If we change the PC while stopped on the load instruction, we should
* not increment the perf counter (because ptrace is before-execute,
* perf is after-execute).
*/
FAIL_IF_MSG(ptrace_setreg_pc(pid, same_watch_addr_load + 4), "Failed to set child PC");
FAIL_IF_MSG(ptrace_cont(pid, 0), "Failed to continue child");
FAIL_IF_MSG(waitpid(pid, &status, 0) == -1, "Failed to wait for child");
FAIL_IF_MSG(!WIFSTOPPED(status), "Child is not stopped");
FAIL_IF_MSG(ptrace_getreg_pc(pid, &pc), "Failed to get child PC");
FAIL_IF_MSG(pc != same_watch_addr_trap, "Child did not stop on end trap");
FAIL_IF_MSG(perf_read_counter(perf_fd, &perf_count), "Failed to read perf counter");
FAIL_IF_MSG(perf_count != 3, "perf counter should not have changed");
/* Clean up child */
FAIL_IF_MSG(kill(pid, SIGKILL) != 0, "Failed to kill child");
return 0;
}
/*
* Tests the interaction between ptrace and perf when:
* 1. perf watches a value
* 2. ptrace watches a different value
* 3. The perf value is read, then the ptrace value is read immediately after
*
* A breakpoint implementation may accidentally misattribute/skip one of
* the ptrace or perf handlers, as interrupt based work is done after perf
* and before ptrace.
*
* We expect the perf counter to increment before the ptrace watchpoint
* triggers.
*/
int perf_then_ptrace_test(void)
{
struct ppc_hw_breakpoint bp_info; /* ptrace breakpoint info */
int bp_id; /* Breakpoint handle of ptrace watchpoint */
int perf_fd; /* File descriptor of perf performance counter */
u64 perf_count; /* Most recently fetched perf performance counter value */
pid_t pid; /* PID of child process */
void *pc; /* Most recently fetched child PC value */
int status; /* Stop status of child after waitpid */
unsigned long perf_value; /* Dummy value to be watched by perf */
unsigned long ptrace_value; /* Dummy value to be watched by ptrace */
int err;
err = ptrace_fork_child(&pid);
if (err)
return err;
/*
* If we are the child, run a subroutine that reads the perf value,
* then reads the ptrace value with consecutive load instructions
*/
if (!pid) {
perf_then_ptrace_child(&perf_value, &ptrace_value);
exit(0);
}
err = check_watchpoints(pid);
if (err)
return err;
/* Place a perf watchpoint counter */
perf_fd = perf_watchpoint_open(pid, &perf_value, sizeof(perf_value));
FAIL_IF_MSG(perf_fd < 0, "Failed to open perf performance counter");
/* Place a ptrace watchpoint */
ppc_ptrace_init_breakpoint(&bp_info, PPC_BREAKPOINT_TRIGGER_READ,
&ptrace_value, sizeof(ptrace_value));
bp_id = ppc_ptrace_sethwdbg(pid, &bp_info);
FAIL_IF_MSG(bp_id < 0, "Failed to set ptrace watchpoint");
/* Let the child run. It should stop on the ptrace watchpoint */
FAIL_IF_MSG(ptrace_cont(pid, 0), "Failed to continue child");
FAIL_IF_MSG(waitpid(pid, &status, 0) == -1, "Failed to wait for child");
FAIL_IF_MSG(!WIFSTOPPED(status), "Child is not stopped");
FAIL_IF_MSG(ptrace_getreg_pc(pid, &pc), "Failed to get child PC");
FAIL_IF_MSG(pc != perf_then_ptrace_load2, "Child did not stop on ptrace load");
/* perf should have recorded the first load */
FAIL_IF_MSG(perf_read_counter(perf_fd, &perf_count), "Failed to read perf counter");
FAIL_IF_MSG(perf_count != 1, "perf counter did not increment");
/* Clean up child */
FAIL_IF_MSG(kill(pid, SIGKILL) != 0, "Failed to kill child");
return 0;
}
int main(int argc, char *argv[])
{
int err = 0;
err |= test_harness(same_watch_addr_test, "same_watch_addr");
err |= test_harness(perf_then_ptrace_test, "perf_then_ptrace");
return err;
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for GPR/FPR registers in TM context
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*/
#include "ptrace.h"
#include "ptrace-gpr.h"
#include "tm.h"
/* Tracer and Tracee Shared Data */
int shm_id;
unsigned long *cptr, *pptr;
double a = FPR_1;
double b = FPR_2;
double c = FPR_3;
void tm_gpr(void)
{
unsigned long gpr_buf[18];
unsigned long result, texasr;
double fpr_buf[32];
printf("Starting the child\n");
cptr = (unsigned long *)shmat(shm_id, NULL, 0);
trans:
cptr[1] = 0;
asm __volatile__(
ASM_LOAD_GPR_IMMED(gpr_1)
ASM_LOAD_FPR(flt_1)
"1: ;"
"tbegin.;"
"beq 2f;"
ASM_LOAD_GPR_IMMED(gpr_2)
ASM_LOAD_FPR(flt_2)
"tsuspend.;"
"li 7, 1;"
"stw 7, 0(%[cptr1]);"
"tresume.;"
"b .;"
"tend.;"
"li 0, 0;"
"ori %[res], 0, 0;"
"b 3f;"
/* Transaction abort handler */
"2: ;"
"li 0, 1;"
"ori %[res], 0, 0;"
"mfspr %[texasr], %[sprn_texasr];"
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
: [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2),
[sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "b" (&a),
[flt_2] "b" (&b), [cptr1] "b" (&cptr[1])
: "memory", "r0", "r7", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15", "r16",
"r17", "r18", "r19", "r20", "r21", "r22",
"r23", "r24", "r25", "r26", "r27", "r28",
"r29", "r30", "r31"
);
if (result) {
if (!cptr[0])
goto trans;
shmdt((void *)cptr);
store_gpr(gpr_buf);
store_fpr(fpr_buf);
if (validate_gpr(gpr_buf, GPR_3))
exit(1);
if (validate_fpr_double(fpr_buf, c))
exit(1);
exit(0);
}
shmdt((void *)cptr);
exit(1);
}
int trace_tm_gpr(pid_t child)
{
unsigned long gpr[18];
__u64 fpr[32];
FAIL_IF(start_trace(child));
FAIL_IF(show_gpr(child, gpr));
FAIL_IF(validate_gpr(gpr, GPR_2));
FAIL_IF(show_fpr(child, fpr));
FAIL_IF(validate_fpr(fpr, FPR_2_REP));
FAIL_IF(show_ckpt_fpr(child, fpr));
FAIL_IF(validate_fpr(fpr, FPR_1_REP));
FAIL_IF(show_ckpt_gpr(child, gpr));
FAIL_IF(validate_gpr(gpr, GPR_1));
FAIL_IF(write_ckpt_gpr(child, GPR_3));
FAIL_IF(write_ckpt_fpr(child, FPR_3_REP));
pptr[0] = 1;
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
int ptrace_tm_gpr(void)
{
pid_t pid;
int ret, status;
SKIP_IF_MSG(!have_htm(), "Don't have transactional memory");
SKIP_IF_MSG(htm_is_synthetic(), "Transactional memory is synthetic");
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
pid = fork();
if (pid < 0) {
perror("fork() failed");
return TEST_FAIL;
}
if (pid == 0)
tm_gpr();
if (pid) {
pptr = (unsigned long *)shmat(shm_id, NULL, 0);
while (!pptr[1])
asm volatile("" : : : "memory");
ret = trace_tm_gpr(pid);
if (ret) {
kill(pid, SIGTERM);
return TEST_FAIL;
}
shmdt((void *)pptr);
ret = wait(&status);
shmctl(shm_id, IPC_RMID, NULL);
if (ret != pid) {
printf("Child's exit status not captured\n");
return TEST_FAIL;
}
return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
TEST_PASS;
}
return TEST_PASS;
}
int main(int argc, char *argv[])
{
return test_harness(ptrace_tm_gpr, "ptrace_tm_gpr");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test TM SPR registers
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*/
#include "ptrace.h"
#include "tm.h"
/* Tracee and tracer shared data */
struct shared {
int flag;
struct tm_spr_regs regs;
};
unsigned long tfhar;
int shm_id;
struct shared *cptr, *pptr;
int shm_id1;
int *cptr1, *pptr1;
#define TM_KVM_SCHED 0xe0000001ac000001
int validate_tm_spr(struct tm_spr_regs *regs)
{
FAIL_IF(regs->tm_tfhar != tfhar);
FAIL_IF((regs->tm_texasr == TM_KVM_SCHED) && (regs->tm_tfiar != 0));
return TEST_PASS;
}
void tm_spr(void)
{
unsigned long result, texasr;
int ret;
cptr = (struct shared *)shmat(shm_id, NULL, 0);
cptr1 = (int *)shmat(shm_id1, NULL, 0);
trans:
cptr1[0] = 0;
asm __volatile__(
"1: ;"
/* TM failover handler should follow "tbegin.;" */
"mflr 31;"
"bl 4f;" /* $ = TFHAR - 12 */
"4: ;"
"mflr %[tfhar];"
"mtlr 31;"
"tbegin.;"
"beq 2f;"
"tsuspend.;"
"li 8, 1;"
"sth 8, 0(%[cptr1]);"
"tresume.;"
"b .;"
"tend.;"
"li 0, 0;"
"ori %[res], 0, 0;"
"b 3f;"
"2: ;"
"li 0, 1;"
"ori %[res], 0, 0;"
"mfspr %[texasr], %[sprn_texasr];"
"3: ;"
: [tfhar] "=r" (tfhar), [res] "=r" (result),
[texasr] "=r" (texasr), [cptr1] "=b" (cptr1)
: [sprn_texasr] "i" (SPRN_TEXASR)
: "memory", "r0", "r8", "r31"
);
/* There are 2 32bit instructions before tbegin. */
tfhar += 12;
if (result) {
if (!cptr->flag)
goto trans;
ret = validate_tm_spr((struct tm_spr_regs *)&cptr->regs);
shmdt((void *)cptr);
shmdt((void *)cptr1);
if (ret)
exit(1);
exit(0);
}
shmdt((void *)cptr);
shmdt((void *)cptr1);
exit(1);
}
int trace_tm_spr(pid_t child)
{
FAIL_IF(start_trace(child));
FAIL_IF(show_tm_spr(child, (struct tm_spr_regs *)&pptr->regs));
printf("TFHAR: %lx TEXASR: %lx TFIAR: %lx\n", pptr->regs.tm_tfhar,
pptr->regs.tm_texasr, pptr->regs.tm_tfiar);
pptr->flag = 1;
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
int ptrace_tm_spr(void)
{
pid_t pid;
int ret, status;
SKIP_IF_MSG(!have_htm(), "Don't have transactional memory");
SKIP_IF_MSG(htm_is_synthetic(), "Transactional memory is synthetic");
shm_id = shmget(IPC_PRIVATE, sizeof(struct shared), 0777|IPC_CREAT);
shm_id1 = shmget(IPC_PRIVATE, sizeof(int), 0777|IPC_CREAT);
pid = fork();
if (pid < 0) {
perror("fork() failed");
return TEST_FAIL;
}
if (pid == 0)
tm_spr();
if (pid) {
pptr = (struct shared *)shmat(shm_id, NULL, 0);
pptr1 = (int *)shmat(shm_id1, NULL, 0);
while (!pptr1[0])
asm volatile("" : : : "memory");
ret = trace_tm_spr(pid);
if (ret) {
kill(pid, SIGKILL);
shmdt((void *)pptr);
shmdt((void *)pptr1);
shmctl(shm_id, IPC_RMID, NULL);
shmctl(shm_id1, IPC_RMID, NULL);
return TEST_FAIL;
}
shmdt((void *)pptr);
shmdt((void *)pptr1);
ret = wait(&status);
shmctl(shm_id, IPC_RMID, NULL);
shmctl(shm_id1, IPC_RMID, NULL);
if (ret != pid) {
printf("Child's exit status not captured\n");
return TEST_FAIL;
}
return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
TEST_PASS;
}
return TEST_PASS;
}
int main(int argc, char *argv[])
{
return test_harness(ptrace_tm_spr, "ptrace_tm_spr");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for TAR, PPR, DSCR registers
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*/
#include "ptrace.h"
#include "ptrace-tar.h"
/* Tracer and Tracee Shared Data */
int shm_id;
int *cptr;
int *pptr;
void tar(void)
{
unsigned long reg[3];
int ret;
cptr = (int *)shmat(shm_id, NULL, 0);
printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
user_write, TAR_1, PPR_1, DSCR_1);
mtspr(SPRN_TAR, TAR_1);
mtspr(SPRN_PPR, PPR_1);
mtspr(SPRN_DSCR, DSCR_1);
cptr[2] = 1;
/* Wait on parent */
while (!cptr[0])
asm volatile("" : : : "memory");
reg[0] = mfspr(SPRN_TAR);
reg[1] = mfspr(SPRN_PPR);
reg[2] = mfspr(SPRN_DSCR);
printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
user_read, reg[0], reg[1], reg[2]);
/* Unblock the parent now */
cptr[1] = 1;
shmdt((int *)cptr);
ret = validate_tar_registers(reg, TAR_2, PPR_2, DSCR_2);
if (ret)
exit(1);
exit(0);
}
int trace_tar(pid_t child)
{
unsigned long reg[3];
FAIL_IF(start_trace(child));
FAIL_IF(show_tar_registers(child, reg));
printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
ptrace_read_running, reg[0], reg[1], reg[2]);
FAIL_IF(validate_tar_registers(reg, TAR_1, PPR_1, DSCR_1));
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
int trace_tar_write(pid_t child)
{
FAIL_IF(start_trace(child));
FAIL_IF(write_tar_registers(child, TAR_2, PPR_2, DSCR_2));
printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
ptrace_write_running, TAR_2, PPR_2, DSCR_2);
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
int ptrace_tar(void)
{
pid_t pid;
int ret, status;
// TAR was added in v2.07
SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_ARCH_2_07), "TAR requires ISA 2.07 compatible hardware");
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
pid = fork();
if (pid < 0) {
perror("fork() failed");
return TEST_FAIL;
}
if (pid == 0)
tar();
if (pid) {
pptr = (int *)shmat(shm_id, NULL, 0);
pptr[0] = 0;
pptr[1] = 0;
while (!pptr[2])
asm volatile("" : : : "memory");
ret = trace_tar(pid);
if (ret)
return ret;
ret = trace_tar_write(pid);
if (ret)
return ret;
/* Unblock the child now */
pptr[0] = 1;
/* Wait on child */
while (!pptr[1])
asm volatile("" : : : "memory");
shmdt((int *)pptr);
ret = wait(&status);
shmctl(shm_id, IPC_RMID, NULL);
if (ret != pid) {
printf("Child's exit status not captured\n");
return TEST_PASS;
}
return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
TEST_PASS;
}
return TEST_PASS;
}
int main(int argc, char *argv[])
{
return test_harness(ptrace_tar, "ptrace_tar");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-tar.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for GPR/FPR registers in TM Suspend context
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*/
#include "ptrace.h"
#include "ptrace-gpr.h"
#include "tm.h"
/* Tracer and Tracee Shared Data */
int shm_id;
int *cptr, *pptr;
double a = FPR_1;
double b = FPR_2;
double c = FPR_3;
double d = FPR_4;
__attribute__((used)) void wait_parent(void)
{
cptr[2] = 1;
while (!cptr[1])
asm volatile("" : : : "memory");
}
void tm_spd_gpr(void)
{
unsigned long gpr_buf[18];
unsigned long result, texasr;
double fpr_buf[32];
cptr = (int *)shmat(shm_id, NULL, 0);
trans:
cptr[2] = 0;
asm __volatile__(
ASM_LOAD_GPR_IMMED(gpr_1)
ASM_LOAD_FPR(flt_1)
"1: ;"
"tbegin.;"
"beq 2f;"
ASM_LOAD_GPR_IMMED(gpr_2)
"tsuspend.;"
ASM_LOAD_GPR_IMMED(gpr_4)
ASM_LOAD_FPR(flt_4)
"bl wait_parent;"
"tresume.;"
"tend.;"
"li 0, 0;"
"ori %[res], 0, 0;"
"b 3f;"
/* Transaction abort handler */
"2: ;"
"li 0, 1;"
"ori %[res], 0, 0;"
"mfspr %[texasr], %[sprn_texasr];"
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
: [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), [gpr_4]"i"(GPR_4),
[sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "b" (&a),
[flt_4] "b" (&d)
: "memory", "r0", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
);
if (result) {
if (!cptr[0])
goto trans;
shmdt((void *)cptr);
store_gpr(gpr_buf);
store_fpr(fpr_buf);
if (validate_gpr(gpr_buf, GPR_3))
exit(1);
if (validate_fpr_double(fpr_buf, c))
exit(1);
exit(0);
}
shmdt((void *)cptr);
exit(1);
}
int trace_tm_spd_gpr(pid_t child)
{
unsigned long gpr[18];
__u64 fpr[32];
FAIL_IF(start_trace(child));
FAIL_IF(show_gpr(child, gpr));
FAIL_IF(validate_gpr(gpr, GPR_4));
FAIL_IF(show_fpr(child, fpr));
FAIL_IF(validate_fpr(fpr, FPR_4_REP));
FAIL_IF(show_ckpt_fpr(child, fpr));
FAIL_IF(validate_fpr(fpr, FPR_1_REP));
FAIL_IF(show_ckpt_gpr(child, gpr));
FAIL_IF(validate_gpr(gpr, GPR_1));
FAIL_IF(write_ckpt_gpr(child, GPR_3));
FAIL_IF(write_ckpt_fpr(child, FPR_3_REP));
pptr[0] = 1;
pptr[1] = 1;
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
int ptrace_tm_spd_gpr(void)
{
pid_t pid;
int ret, status;
SKIP_IF_MSG(!have_htm(), "Don't have transactional memory");
SKIP_IF_MSG(htm_is_synthetic(), "Transactional memory is synthetic");
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
pid = fork();
if (pid < 0) {
perror("fork() failed");
return TEST_FAIL;
}
if (pid == 0)
tm_spd_gpr();
if (pid) {
pptr = (int *)shmat(shm_id, NULL, 0);
pptr[0] = 0;
pptr[1] = 0;
while (!pptr[2])
asm volatile("" : : : "memory");
ret = trace_tm_spd_gpr(pid);
if (ret) {
kill(pid, SIGTERM);
shmdt((void *)pptr);
shmctl(shm_id, IPC_RMID, NULL);
return TEST_FAIL;
}
shmdt((void *)pptr);
ret = wait(&status);
shmctl(shm_id, IPC_RMID, NULL);
if (ret != pid) {
printf("Child's exit status not captured\n");
return TEST_FAIL;
}
return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
TEST_PASS;
}
return TEST_PASS;
}
int main(int argc, char *argv[])
{
return test_harness(ptrace_tm_spd_gpr, "ptrace_tm_spd_gpr");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Ptrace test for hw breakpoints
*
* Based on tools/testing/selftests/breakpoints/breakpoint_test.c
*
* This test forks and the parent then traces the child doing various
* types of ptrace enabled breakpoints
*
* Copyright (C) 2018 Michael Neuling, IBM Corporation.
*/
#include <sys/ptrace.h>
#include <unistd.h>
#include <stddef.h>
#include <sys/user.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/syscall.h>
#include <linux/limits.h>
#include "ptrace.h"
#include "reg.h"
#define SPRN_PVR 0x11F
#define PVR_8xx 0x00500000
bool is_8xx;
/*
* Use volatile on all global var so that compiler doesn't
* optimise their load/stores. Otherwise selftest can fail.
*/
static volatile __u64 glvar;
#define DAWR_MAX_LEN 512
static volatile __u8 big_var[DAWR_MAX_LEN] __attribute__((aligned(512)));
#define A_LEN 6
#define B_LEN 6
struct gstruct {
__u8 a[A_LEN]; /* double word aligned */
__u8 b[B_LEN]; /* double word unaligned */
};
static volatile struct gstruct gstruct __attribute__((aligned(512)));
static volatile char cwd[PATH_MAX] __attribute__((aligned(8)));
static void get_dbginfo(pid_t child_pid, struct ppc_debug_info *dbginfo)
{
if (ptrace(PPC_PTRACE_GETHWDBGINFO, child_pid, NULL, dbginfo)) {
perror("Can't get breakpoint info");
exit(-1);
}
}
static bool dawr_present(struct ppc_debug_info *dbginfo)
{
return !!(dbginfo->features & PPC_DEBUG_FEATURE_DATA_BP_DAWR);
}
static void write_var(int len)
{
volatile __u8 *pcvar;
volatile __u16 *psvar;
volatile __u32 *pivar;
volatile __u64 *plvar;
switch (len) {
case 1:
pcvar = (volatile __u8 *)&glvar;
*pcvar = 0xff;
break;
case 2:
psvar = (volatile __u16 *)&glvar;
*psvar = 0xffff;
break;
case 4:
pivar = (volatile __u32 *)&glvar;
*pivar = 0xffffffff;
break;
case 8:
plvar = (volatile __u64 *)&glvar;
*plvar = 0xffffffffffffffffLL;
break;
}
}
static void read_var(int len)
{
__u8 cvar __attribute__((unused));
__u16 svar __attribute__((unused));
__u32 ivar __attribute__((unused));
__u64 lvar __attribute__((unused));
switch (len) {
case 1:
cvar = (volatile __u8)glvar;
break;
case 2:
svar = (volatile __u16)glvar;
break;
case 4:
ivar = (volatile __u32)glvar;
break;
case 8:
lvar = (volatile __u64)glvar;
break;
}
}
static void test_workload(void)
{
__u8 cvar __attribute__((unused));
__u32 ivar __attribute__((unused));
int len = 0;
if (ptrace(PTRACE_TRACEME, 0, NULL, 0)) {
perror("Child can't be traced?");
exit(-1);
}
/* Wake up father so that it sets up the first test */
kill(getpid(), SIGUSR1);
/* PTRACE_SET_DEBUGREG, WO test */
for (len = 1; len <= sizeof(glvar); len <<= 1)
write_var(len);
/* PTRACE_SET_DEBUGREG, RO test */
for (len = 1; len <= sizeof(glvar); len <<= 1)
read_var(len);
/* PTRACE_SET_DEBUGREG, RW test */
for (len = 1; len <= sizeof(glvar); len <<= 1) {
if (rand() % 2)
read_var(len);
else
write_var(len);
}
/* PTRACE_SET_DEBUGREG, Kernel Access Userspace test */
syscall(__NR_getcwd, &cwd, PATH_MAX);
/* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, WO test */
write_var(1);
/* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, RO test */
read_var(1);
/* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, RW test */
if (rand() % 2)
write_var(1);
else
read_var(1);
/* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, Kernel Access Userspace test */
syscall(__NR_getcwd, &cwd, PATH_MAX);
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, WO test */
gstruct.a[rand() % A_LEN] = 'a';
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, RO test */
cvar = gstruct.a[rand() % A_LEN];
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, RW test */
if (rand() % 2)
gstruct.a[rand() % A_LEN] = 'a';
else
cvar = gstruct.a[rand() % A_LEN];
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, WO test */
gstruct.b[rand() % B_LEN] = 'b';
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, RO test */
cvar = gstruct.b[rand() % B_LEN];
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, RW test */
if (rand() % 2)
gstruct.b[rand() % B_LEN] = 'b';
else
cvar = gstruct.b[rand() % B_LEN];
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, DAR OUTSIDE, RW test */
if (rand() % 2)
*((int *)(gstruct.a + 4)) = 10;
else
ivar = *((int *)(gstruct.a + 4));
/* PPC_PTRACE_SETHWDEBUG. DAWR_MAX_LEN. RW test */
if (rand() % 2)
big_var[rand() % DAWR_MAX_LEN] = 'a';
else
cvar = big_var[rand() % DAWR_MAX_LEN];
/* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW ALIGNED, WO test */
gstruct.a[rand() % A_LEN] = 'a';
/* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW UNALIGNED, RO test */
cvar = gstruct.b[rand() % B_LEN];
/* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap, WO test */
gstruct.a[rand() % A_LEN] = 'a';
/* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap, RO test */
cvar = gstruct.a[rand() % A_LEN];
}
static void check_success(pid_t child_pid, const char *name, const char *type,
unsigned long saddr, int len)
{
int status;
siginfo_t siginfo;
unsigned long eaddr = (saddr + len - 1) | 0x7;
saddr &= ~0x7;
/* Wait for the child to SIGTRAP */
wait(&status);
ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &siginfo);
if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGTRAP ||
(unsigned long)siginfo.si_addr < saddr ||
(unsigned long)siginfo.si_addr > eaddr) {
printf("%s, %s, len: %d: Fail\n", name, type, len);
exit(-1);
}
printf("%s, %s, len: %d: Ok\n", name, type, len);
if (!is_8xx) {
/*
* For ptrace registered watchpoint, signal is generated
* before executing load/store. Singlestep the instruction
* and then continue the test.
*/
ptrace(PTRACE_SINGLESTEP, child_pid, NULL, 0);
wait(NULL);
}
}
static void ptrace_set_debugreg(pid_t child_pid, unsigned long wp_addr)
{
if (ptrace(PTRACE_SET_DEBUGREG, child_pid, 0, wp_addr)) {
perror("PTRACE_SET_DEBUGREG failed");
exit(-1);
}
}
static int ptrace_sethwdebug(pid_t child_pid, struct ppc_hw_breakpoint *info)
{
int wh = ptrace(PPC_PTRACE_SETHWDEBUG, child_pid, 0, info);
if (wh <= 0) {
perror("PPC_PTRACE_SETHWDEBUG failed");
exit(-1);
}
return wh;
}
static void ptrace_delhwdebug(pid_t child_pid, int wh)
{
if (ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, wh) < 0) {
perror("PPC_PTRACE_DELHWDEBUG failed");
exit(-1);
}
}
#define DABR_READ_SHIFT 0
#define DABR_WRITE_SHIFT 1
#define DABR_TRANSLATION_SHIFT 2
static int test_set_debugreg(pid_t child_pid)
{
unsigned long wp_addr = (unsigned long)&glvar;
char *name = "PTRACE_SET_DEBUGREG";
int len;
/* PTRACE_SET_DEBUGREG, WO test*/
wp_addr &= ~0x7UL;
wp_addr |= (1UL << DABR_WRITE_SHIFT);
wp_addr |= (1UL << DABR_TRANSLATION_SHIFT);
for (len = 1; len <= sizeof(glvar); len <<= 1) {
ptrace_set_debugreg(child_pid, wp_addr);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "WO", wp_addr, len);
}
/* PTRACE_SET_DEBUGREG, RO test */
wp_addr &= ~0x7UL;
wp_addr |= (1UL << DABR_READ_SHIFT);
wp_addr |= (1UL << DABR_TRANSLATION_SHIFT);
for (len = 1; len <= sizeof(glvar); len <<= 1) {
ptrace_set_debugreg(child_pid, wp_addr);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "RO", wp_addr, len);
}
/* PTRACE_SET_DEBUGREG, RW test */
wp_addr &= ~0x7UL;
wp_addr |= (1Ul << DABR_READ_SHIFT);
wp_addr |= (1UL << DABR_WRITE_SHIFT);
wp_addr |= (1UL << DABR_TRANSLATION_SHIFT);
for (len = 1; len <= sizeof(glvar); len <<= 1) {
ptrace_set_debugreg(child_pid, wp_addr);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "RW", wp_addr, len);
}
ptrace_set_debugreg(child_pid, 0);
return 0;
}
static int test_set_debugreg_kernel_userspace(pid_t child_pid)
{
unsigned long wp_addr = (unsigned long)cwd;
char *name = "PTRACE_SET_DEBUGREG";
/* PTRACE_SET_DEBUGREG, Kernel Access Userspace test */
wp_addr &= ~0x7UL;
wp_addr |= (1Ul << DABR_READ_SHIFT);
wp_addr |= (1UL << DABR_WRITE_SHIFT);
wp_addr |= (1UL << DABR_TRANSLATION_SHIFT);
ptrace_set_debugreg(child_pid, wp_addr);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "Kernel Access Userspace", wp_addr, 8);
ptrace_set_debugreg(child_pid, 0);
return 0;
}
static void get_ppc_hw_breakpoint(struct ppc_hw_breakpoint *info, int type,
unsigned long addr, int len)
{
info->version = 1;
info->trigger_type = type;
info->condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
info->addr = (__u64)addr;
info->addr2 = (__u64)addr + len;
info->condition_value = 0;
if (!len)
info->addr_mode = PPC_BREAKPOINT_MODE_EXACT;
else
info->addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
}
static void test_sethwdebug_exact(pid_t child_pid)
{
struct ppc_hw_breakpoint info;
unsigned long wp_addr = (unsigned long)&glvar;
char *name = "PPC_PTRACE_SETHWDEBUG, MODE_EXACT";
int len = 1; /* hardcoded in kernel */
int wh;
/* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, WO test */
get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr, 0);
wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "WO", wp_addr, len);
ptrace_delhwdebug(child_pid, wh);
/* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, RO test */
get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_READ, wp_addr, 0);
wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "RO", wp_addr, len);
ptrace_delhwdebug(child_pid, wh);
/* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, RW test */
get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_RW, wp_addr, 0);
wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "RW", wp_addr, len);
ptrace_delhwdebug(child_pid, wh);
}
static void test_sethwdebug_exact_kernel_userspace(pid_t child_pid)
{
struct ppc_hw_breakpoint info;
unsigned long wp_addr = (unsigned long)&cwd;
char *name = "PPC_PTRACE_SETHWDEBUG, MODE_EXACT";
int len = 1; /* hardcoded in kernel */
int wh;
/* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, Kernel Access Userspace test */
get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr, 0);
wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "Kernel Access Userspace", wp_addr, len);
ptrace_delhwdebug(child_pid, wh);
}
static void test_sethwdebug_range_aligned(pid_t child_pid)
{
struct ppc_hw_breakpoint info;
unsigned long wp_addr;
char *name = "PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED";
int len;
int wh;
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, WO test */
wp_addr = (unsigned long)&gstruct.a;
len = A_LEN;
get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr, len);
wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "WO", wp_addr, len);
ptrace_delhwdebug(child_pid, wh);
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, RO test */
wp_addr = (unsigned long)&gstruct.a;
len = A_LEN;
get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_READ, wp_addr, len);
wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "RO", wp_addr, len);
ptrace_delhwdebug(child_pid, wh);
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, RW test */
wp_addr = (unsigned long)&gstruct.a;
len = A_LEN;
get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_RW, wp_addr, len);
wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "RW", wp_addr, len);
ptrace_delhwdebug(child_pid, wh);
}
static void test_multi_sethwdebug_range(pid_t child_pid)
{
struct ppc_hw_breakpoint info1, info2;
unsigned long wp_addr1, wp_addr2;
char *name1 = "PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW ALIGNED";
char *name2 = "PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW UNALIGNED";
int len1, len2;
int wh1, wh2;
wp_addr1 = (unsigned long)&gstruct.a;
wp_addr2 = (unsigned long)&gstruct.b;
len1 = A_LEN;
len2 = B_LEN;
get_ppc_hw_breakpoint(&info1, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr1, len1);
get_ppc_hw_breakpoint(&info2, PPC_BREAKPOINT_TRIGGER_READ, wp_addr2, len2);
/* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW ALIGNED, WO test */
wh1 = ptrace_sethwdebug(child_pid, &info1);
/* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW UNALIGNED, RO test */
wh2 = ptrace_sethwdebug(child_pid, &info2);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name1, "WO", wp_addr1, len1);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name2, "RO", wp_addr2, len2);
ptrace_delhwdebug(child_pid, wh1);
ptrace_delhwdebug(child_pid, wh2);
}
static void test_multi_sethwdebug_range_dawr_overlap(pid_t child_pid)
{
struct ppc_hw_breakpoint info1, info2;
unsigned long wp_addr1, wp_addr2;
char *name = "PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap";
int len1, len2;
int wh1, wh2;
wp_addr1 = (unsigned long)&gstruct.a;
wp_addr2 = (unsigned long)&gstruct.a;
len1 = A_LEN;
len2 = A_LEN;
get_ppc_hw_breakpoint(&info1, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr1, len1);
get_ppc_hw_breakpoint(&info2, PPC_BREAKPOINT_TRIGGER_READ, wp_addr2, len2);
/* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap, WO test */
wh1 = ptrace_sethwdebug(child_pid, &info1);
/* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap, RO test */
wh2 = ptrace_sethwdebug(child_pid, &info2);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "WO", wp_addr1, len1);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "RO", wp_addr2, len2);
ptrace_delhwdebug(child_pid, wh1);
ptrace_delhwdebug(child_pid, wh2);
}
static void test_sethwdebug_range_unaligned(pid_t child_pid)
{
struct ppc_hw_breakpoint info;
unsigned long wp_addr;
char *name = "PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED";
int len;
int wh;
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, WO test */
wp_addr = (unsigned long)&gstruct.b;
len = B_LEN;
get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr, len);
wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "WO", wp_addr, len);
ptrace_delhwdebug(child_pid, wh);
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, RO test */
wp_addr = (unsigned long)&gstruct.b;
len = B_LEN;
get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_READ, wp_addr, len);
wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "RO", wp_addr, len);
ptrace_delhwdebug(child_pid, wh);
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, RW test */
wp_addr = (unsigned long)&gstruct.b;
len = B_LEN;
get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_RW, wp_addr, len);
wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "RW", wp_addr, len);
ptrace_delhwdebug(child_pid, wh);
}
static void test_sethwdebug_range_unaligned_dar(pid_t child_pid)
{
struct ppc_hw_breakpoint info;
unsigned long wp_addr;
char *name = "PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, DAR OUTSIDE";
int len;
int wh;
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, DAR OUTSIDE, RW test */
wp_addr = (unsigned long)&gstruct.b;
len = B_LEN;
get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr, len);
wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "RW", wp_addr, len);
ptrace_delhwdebug(child_pid, wh);
}
static void test_sethwdebug_dawr_max_range(pid_t child_pid)
{
struct ppc_hw_breakpoint info;
unsigned long wp_addr;
char *name = "PPC_PTRACE_SETHWDEBUG, DAWR_MAX_LEN";
int len;
int wh;
/* PPC_PTRACE_SETHWDEBUG, DAWR_MAX_LEN, RW test */
wp_addr = (unsigned long)big_var;
len = DAWR_MAX_LEN;
get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_RW, wp_addr, len);
wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success(child_pid, name, "RW", wp_addr, len);
ptrace_delhwdebug(child_pid, wh);
}
/* Set the breakpoints and check the child successfully trigger them */
static void
run_tests(pid_t child_pid, struct ppc_debug_info *dbginfo, bool dawr)
{
test_set_debugreg(child_pid);
test_set_debugreg_kernel_userspace(child_pid);
test_sethwdebug_exact(child_pid);
test_sethwdebug_exact_kernel_userspace(child_pid);
if (dbginfo->features & PPC_DEBUG_FEATURE_DATA_BP_RANGE) {
test_sethwdebug_range_aligned(child_pid);
if (dawr || is_8xx) {
test_sethwdebug_range_unaligned(child_pid);
test_sethwdebug_range_unaligned_dar(child_pid);
test_sethwdebug_dawr_max_range(child_pid);
if (dbginfo->num_data_bps > 1) {
test_multi_sethwdebug_range(child_pid);
test_multi_sethwdebug_range_dawr_overlap(child_pid);
}
}
}
}
static int ptrace_hwbreak(void)
{
pid_t child_pid;
struct ppc_debug_info dbginfo;
bool dawr;
child_pid = fork();
if (!child_pid) {
test_workload();
return 0;
}
wait(NULL);
get_dbginfo(child_pid, &dbginfo);
SKIP_IF_MSG(dbginfo.num_data_bps == 0, "No data breakpoints present");
dawr = dawr_present(&dbginfo);
run_tests(child_pid, &dbginfo, dawr);
/* Let the child exit first. */
ptrace(PTRACE_CONT, child_pid, NULL, 0);
wait(NULL);
/*
* Testcases exits immediately with -1 on any failure. If
* it has reached here, it means all tests were successful.
*/
return TEST_PASS;
}
int main(int argc, char **argv, char **envp)
{
is_8xx = mfspr(SPRN_PVR) == PVR_8xx;
return test_harness(ptrace_hwbreak, "ptrace-hwbreak");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for VMX/VSX registers in the TM context
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*/
#include "ptrace.h"
#include "tm.h"
#include "ptrace-vsx.h"
int shm_id;
unsigned long *cptr, *pptr;
unsigned long fp_load[VEC_MAX];
unsigned long fp_store[VEC_MAX];
unsigned long fp_load_ckpt[VEC_MAX];
unsigned long fp_load_ckpt_new[VEC_MAX];
__attribute__((used)) void load_vsx(void)
{
loadvsx(fp_load, 0);
}
__attribute__((used)) void load_vsx_ckpt(void)
{
loadvsx(fp_load_ckpt, 0);
}
void tm_vsx(void)
{
unsigned long result, texasr;
int ret;
cptr = (unsigned long *)shmat(shm_id, NULL, 0);
trans:
cptr[1] = 0;
asm __volatile__(
"bl load_vsx_ckpt;"
"1: ;"
"tbegin.;"
"beq 2f;"
"bl load_vsx;"
"tsuspend.;"
"li 7, 1;"
"stw 7, 0(%[cptr1]);"
"tresume.;"
"b .;"
"tend.;"
"li 0, 0;"
"ori %[res], 0, 0;"
"b 3f;"
"2: ;"
"li 0, 1;"
"ori %[res], 0, 0;"
"mfspr %[texasr], %[sprn_texasr];"
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
: [sprn_texasr] "i" (SPRN_TEXASR), [cptr1] "b" (&cptr[1])
: "memory", "r0", "r3", "r4",
"r7", "r8", "r9", "r10", "r11", "lr"
);
if (result) {
if (!cptr[0])
goto trans;
shmdt((void *)cptr);
storevsx(fp_store, 0);
ret = compare_vsx_vmx(fp_store, fp_load_ckpt_new);
if (ret)
exit(1);
exit(0);
}
shmdt((void *)cptr);
exit(1);
}
int trace_tm_vsx(pid_t child)
{
unsigned long vsx[VSX_MAX];
unsigned long vmx[VMX_MAX + 2][2];
FAIL_IF(start_trace(child));
FAIL_IF(show_vsx(child, vsx));
FAIL_IF(validate_vsx(vsx, fp_load));
FAIL_IF(show_vmx(child, vmx));
FAIL_IF(validate_vmx(vmx, fp_load));
FAIL_IF(show_vsx_ckpt(child, vsx));
FAIL_IF(validate_vsx(vsx, fp_load_ckpt));
FAIL_IF(show_vmx_ckpt(child, vmx));
FAIL_IF(validate_vmx(vmx, fp_load_ckpt));
memset(vsx, 0, sizeof(vsx));
memset(vmx, 0, sizeof(vmx));
load_vsx_vmx(fp_load_ckpt_new, vsx, vmx);
FAIL_IF(write_vsx_ckpt(child, vsx));
FAIL_IF(write_vmx_ckpt(child, vmx));
pptr[0] = 1;
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
int ptrace_tm_vsx(void)
{
pid_t pid;
int ret, status, i;
SKIP_IF_MSG(!have_htm(), "Don't have transactional memory");
SKIP_IF_MSG(htm_is_synthetic(), "Transactional memory is synthetic");
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
for (i = 0; i < 128; i++) {
fp_load[i] = 1 + rand();
fp_load_ckpt[i] = 1 + 2 * rand();
fp_load_ckpt_new[i] = 1 + 3 * rand();
}
pid = fork();
if (pid < 0) {
perror("fork() failed");
return TEST_FAIL;
}
if (pid == 0)
tm_vsx();
if (pid) {
pptr = (unsigned long *)shmat(shm_id, NULL, 0);
while (!pptr[1])
asm volatile("" : : : "memory");
ret = trace_tm_vsx(pid);
if (ret) {
kill(pid, SIGKILL);
shmdt((void *)pptr);
shmctl(shm_id, IPC_RMID, NULL);
return TEST_FAIL;
}
shmdt((void *)pptr);
ret = wait(&status);
shmctl(shm_id, IPC_RMID, NULL);
if (ret != pid) {
printf("Child's exit status not captured\n");
return TEST_FAIL;
}
return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
TEST_PASS;
}
return TEST_PASS;
}
int main(int argc, char *argv[])
{
return test_harness(ptrace_tm_vsx, "ptrace_tm_vsx");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* A ptrace test for testing PTRACE_SYSEMU, PTRACE_SETREGS and
* PTRACE_GETREG. This test basically create a child process that executes
* syscalls and the parent process check if it is being traced appropriated.
*
* This test is heavily based on tools/testing/selftests/x86/ptrace_syscall.c
* test, and it was adapted to run on Powerpc by
* Breno Leitao <[email protected]>
*/
#define _GNU_SOURCE
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/syscall.h>
#include <sys/user.h>
#include <unistd.h>
#include <errno.h>
#include <stddef.h>
#include <stdio.h>
#include <err.h>
#include <string.h>
#include <sys/auxv.h>
#include "utils.h"
/* Bitness-agnostic defines for user_regs_struct fields. */
#define user_syscall_nr gpr[0]
#define user_arg0 gpr[3]
#define user_arg1 gpr[4]
#define user_arg2 gpr[5]
#define user_arg3 gpr[6]
#define user_arg4 gpr[7]
#define user_arg5 gpr[8]
#define user_ip nip
#define PTRACE_SYSEMU 0x1d
static int nerrs;
static void wait_trap(pid_t chld)
{
siginfo_t si;
if (waitid(P_PID, chld, &si, WEXITED|WSTOPPED) != 0)
err(1, "waitid");
if (si.si_pid != chld)
errx(1, "got unexpected pid in event\n");
if (si.si_code != CLD_TRAPPED)
errx(1, "got unexpected event type %d\n", si.si_code);
}
static void test_ptrace_syscall_restart(void)
{
int status;
struct pt_regs regs;
pid_t chld;
printf("[RUN]\tptrace-induced syscall restart\n");
chld = fork();
if (chld < 0)
err(1, "fork");
/*
* Child process is running 4 syscalls after ptrace.
*
* 1) getpid()
* 2) gettid()
* 3) tgkill() -> Send SIGSTOP
* 4) gettid() -> Where the tests will happen essentially
*/
if (chld == 0) {
if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0)
err(1, "PTRACE_TRACEME");
pid_t pid = getpid(), tid = syscall(SYS_gettid);
printf("\tChild will make one syscall\n");
syscall(SYS_tgkill, pid, tid, SIGSTOP);
syscall(SYS_gettid, 10, 11, 12, 13, 14, 15);
_exit(0);
}
/* Parent process below */
/* Wait for SIGSTOP sent by tgkill above. */
if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status))
err(1, "waitpid");
printf("[RUN]\tSYSEMU\n");
if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
err(1, "PTRACE_SYSEMU");
wait_trap(chld);
if (ptrace(PTRACE_GETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_GETREGS");
/*
* Ptrace trapped prior to executing the syscall, thus r3 still has
* the syscall number instead of the sys_gettid() result
*/
if (regs.user_syscall_nr != SYS_gettid ||
regs.user_arg0 != 10 || regs.user_arg1 != 11 ||
regs.user_arg2 != 12 || regs.user_arg3 != 13 ||
regs.user_arg4 != 14 || regs.user_arg5 != 15) {
printf("[FAIL]\tInitial args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n",
(unsigned long)regs.user_syscall_nr,
(unsigned long)regs.user_arg0,
(unsigned long)regs.user_arg1,
(unsigned long)regs.user_arg2,
(unsigned long)regs.user_arg3,
(unsigned long)regs.user_arg4,
(unsigned long)regs.user_arg5);
nerrs++;
} else {
printf("[OK]\tInitial nr and args are correct\n"); }
printf("[RUN]\tRestart the syscall (ip = 0x%lx)\n",
(unsigned long)regs.user_ip);
/*
* Rewind to retry the same syscall again. This will basically test
* the rewind process together with PTRACE_SETREGS and PTRACE_GETREGS.
*/
regs.user_ip -= 4;
if (ptrace(PTRACE_SETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_SETREGS");
if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
err(1, "PTRACE_SYSEMU");
wait_trap(chld);
if (ptrace(PTRACE_GETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_GETREGS");
if (regs.user_syscall_nr != SYS_gettid ||
regs.user_arg0 != 10 || regs.user_arg1 != 11 ||
regs.user_arg2 != 12 || regs.user_arg3 != 13 ||
regs.user_arg4 != 14 || regs.user_arg5 != 15) {
printf("[FAIL]\tRestart nr or args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n",
(unsigned long)regs.user_syscall_nr,
(unsigned long)regs.user_arg0,
(unsigned long)regs.user_arg1,
(unsigned long)regs.user_arg2,
(unsigned long)regs.user_arg3,
(unsigned long)regs.user_arg4,
(unsigned long)regs.user_arg5);
nerrs++;
} else {
printf("[OK]\tRestarted nr and args are correct\n");
}
printf("[RUN]\tChange nr and args and restart the syscall (ip = 0x%lx)\n",
(unsigned long)regs.user_ip);
/*
* Inject a new syscall (getpid) in the same place the previous
* syscall (gettid), rewind and re-execute.
*/
regs.user_syscall_nr = SYS_getpid;
regs.user_arg0 = 20;
regs.user_arg1 = 21;
regs.user_arg2 = 22;
regs.user_arg3 = 23;
regs.user_arg4 = 24;
regs.user_arg5 = 25;
regs.user_ip -= 4;
if (ptrace(PTRACE_SETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_SETREGS");
if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
err(1, "PTRACE_SYSEMU");
wait_trap(chld);
if (ptrace(PTRACE_GETREGS, chld, 0, ®s) != 0)
err(1, "PTRACE_GETREGS");
/* Check that ptrace stopped at the new syscall that was
* injected, and guarantee that it haven't executed, i.e, user_args
* contain the arguments and not the syscall return value, for
* instance.
*/
if (regs.user_syscall_nr != SYS_getpid
|| regs.user_arg0 != 20 || regs.user_arg1 != 21
|| regs.user_arg2 != 22 || regs.user_arg3 != 23
|| regs.user_arg4 != 24 || regs.user_arg5 != 25) {
printf("[FAIL]\tRestart nr or args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n",
(unsigned long)regs.user_syscall_nr,
(unsigned long)regs.user_arg0,
(unsigned long)regs.user_arg1,
(unsigned long)regs.user_arg2,
(unsigned long)regs.user_arg3,
(unsigned long)regs.user_arg4,
(unsigned long)regs.user_arg5);
nerrs++;
} else {
printf("[OK]\tReplacement nr and args are correct\n");
}
if (ptrace(PTRACE_CONT, chld, 0, 0) != 0)
err(1, "PTRACE_CONT");
if (waitpid(chld, &status, 0) != chld)
err(1, "waitpid");
/* Guarantee that the process executed properly, returning 0 */
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
printf("[FAIL]\tChild failed\n");
nerrs++;
} else {
printf("[OK]\tChild exited cleanly\n");
}
}
int ptrace_syscall(void)
{
test_ptrace_syscall_restart();
return nerrs;
}
int main(void)
{
return test_harness(ptrace_syscall, "ptrace_syscall");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-syscall.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Ptrace test for Memory Protection Key registers
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
* Copyright (C) 2018 IBM Corporation.
*/
#include "ptrace.h"
#include "child.h"
#ifndef __NR_pkey_alloc
#define __NR_pkey_alloc 384
#endif
#ifndef __NR_pkey_free
#define __NR_pkey_free 385
#endif
#ifndef NT_PPC_PKEY
#define NT_PPC_PKEY 0x110
#endif
#ifndef PKEY_DISABLE_EXECUTE
#define PKEY_DISABLE_EXECUTE 0x4
#endif
#define AMR_BITS_PER_PKEY 2
#define PKEY_REG_BITS (sizeof(u64) * 8)
#define pkeyshift(pkey) (PKEY_REG_BITS - ((pkey + 1) * AMR_BITS_PER_PKEY))
static const char user_read[] = "[User Read (Running)]";
static const char user_write[] = "[User Write (Running)]";
static const char ptrace_read_running[] = "[Ptrace Read (Running)]";
static const char ptrace_write_running[] = "[Ptrace Write (Running)]";
/* Information shared between the parent and the child. */
struct shared_info {
struct child_sync child_sync;
/* AMR value the parent expects to read from the child. */
unsigned long amr1;
/* AMR value the parent is expected to write to the child. */
unsigned long amr2;
/* AMR value that ptrace should refuse to write to the child. */
unsigned long invalid_amr;
/* IAMR value the parent expects to read from the child. */
unsigned long expected_iamr;
/* UAMOR value the parent expects to read from the child. */
unsigned long expected_uamor;
/*
* IAMR and UAMOR values that ptrace should refuse to write to the child
* (even though they're valid ones) because userspace doesn't have
* access to those registers.
*/
unsigned long invalid_iamr;
unsigned long invalid_uamor;
};
static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights)
{
return syscall(__NR_pkey_alloc, flags, init_access_rights);
}
static int child(struct shared_info *info)
{
unsigned long reg;
bool disable_execute = true;
int pkey1, pkey2, pkey3;
int ret;
/* Wait until parent fills out the initial register values. */
ret = wait_parent(&info->child_sync);
if (ret)
return ret;
/* Get some pkeys so that we can change their bits in the AMR. */
pkey1 = sys_pkey_alloc(0, PKEY_DISABLE_EXECUTE);
if (pkey1 < 0) {
pkey1 = sys_pkey_alloc(0, 0);
CHILD_FAIL_IF(pkey1 < 0, &info->child_sync);
disable_execute = false;
}
pkey2 = sys_pkey_alloc(0, 0);
CHILD_FAIL_IF(pkey2 < 0, &info->child_sync);
pkey3 = sys_pkey_alloc(0, 0);
CHILD_FAIL_IF(pkey3 < 0, &info->child_sync);
info->amr1 |= 3ul << pkeyshift(pkey1);
info->amr2 |= 3ul << pkeyshift(pkey2);
/*
* invalid amr value where we try to force write
* things which are deined by a uamor setting.
*/
info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor);
/*
* if PKEY_DISABLE_EXECUTE succeeded we should update the expected_iamr
*/
if (disable_execute)
info->expected_iamr |= 1ul << pkeyshift(pkey1);
else
info->expected_iamr &= ~(1ul << pkeyshift(pkey1));
/*
* We allocated pkey2 and pkey 3 above. Clear the IAMR bits.
*/
info->expected_iamr &= ~(1ul << pkeyshift(pkey2));
info->expected_iamr &= ~(1ul << pkeyshift(pkey3));
/*
* Create an IAMR value different from expected value.
* Kernel will reject an IAMR and UAMOR change.
*/
info->invalid_iamr = info->expected_iamr | (1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2));
info->invalid_uamor = info->expected_uamor & ~(0x3ul << pkeyshift(pkey1));
printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n",
user_write, info->amr1, pkey1, pkey2, pkey3);
set_amr(info->amr1);
/* Wait for parent to read our AMR value and write a new one. */
ret = prod_parent(&info->child_sync);
CHILD_FAIL_IF(ret, &info->child_sync);
ret = wait_parent(&info->child_sync);
if (ret)
return ret;
reg = mfspr(SPRN_AMR);
printf("%-30s AMR: %016lx\n", user_read, reg);
CHILD_FAIL_IF(reg != info->amr2, &info->child_sync);
/*
* Wait for parent to try to write an invalid AMR value.
*/
ret = prod_parent(&info->child_sync);
CHILD_FAIL_IF(ret, &info->child_sync);
ret = wait_parent(&info->child_sync);
if (ret)
return ret;
reg = mfspr(SPRN_AMR);
printf("%-30s AMR: %016lx\n", user_read, reg);
CHILD_FAIL_IF(reg != info->amr2, &info->child_sync);
/*
* Wait for parent to try to write an IAMR and a UAMOR value. We can't
* verify them, but we can verify that the AMR didn't change.
*/
ret = prod_parent(&info->child_sync);
CHILD_FAIL_IF(ret, &info->child_sync);
ret = wait_parent(&info->child_sync);
if (ret)
return ret;
reg = mfspr(SPRN_AMR);
printf("%-30s AMR: %016lx\n", user_read, reg);
CHILD_FAIL_IF(reg != info->amr2, &info->child_sync);
/* Now let parent now that we are finished. */
ret = prod_parent(&info->child_sync);
CHILD_FAIL_IF(ret, &info->child_sync);
return TEST_PASS;
}
static int parent(struct shared_info *info, pid_t pid)
{
unsigned long regs[3];
int ret, status;
/*
* Get the initial values for AMR, IAMR and UAMOR and communicate them
* to the child.
*/
ret = ptrace_read_regs(pid, NT_PPC_PKEY, regs, 3);
PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync, "PKEYs not supported");
PARENT_FAIL_IF(ret, &info->child_sync);
info->amr1 = info->amr2 = regs[0];
info->expected_iamr = regs[1];
info->expected_uamor = regs[2];
/* Wake up child so that it can set itself up. */
ret = prod_child(&info->child_sync);
PARENT_FAIL_IF(ret, &info->child_sync);
ret = wait_child(&info->child_sync);
if (ret)
return ret;
/* Verify that we can read the pkey registers from the child. */
ret = ptrace_read_regs(pid, NT_PPC_PKEY, regs, 3);
PARENT_FAIL_IF(ret, &info->child_sync);
printf("%-30s AMR: %016lx IAMR: %016lx UAMOR: %016lx\n",
ptrace_read_running, regs[0], regs[1], regs[2]);
PARENT_FAIL_IF(regs[0] != info->amr1, &info->child_sync);
PARENT_FAIL_IF(regs[1] != info->expected_iamr, &info->child_sync);
PARENT_FAIL_IF(regs[2] != info->expected_uamor, &info->child_sync);
/* Write valid AMR value in child. */
ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->amr2, 1);
PARENT_FAIL_IF(ret, &info->child_sync);
printf("%-30s AMR: %016lx\n", ptrace_write_running, info->amr2);
/* Wake up child so that it can verify it changed. */
ret = prod_child(&info->child_sync);
PARENT_FAIL_IF(ret, &info->child_sync);
ret = wait_child(&info->child_sync);
if (ret)
return ret;
/* Write invalid AMR value in child. */
ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->invalid_amr, 1);
PARENT_FAIL_IF(ret, &info->child_sync);
printf("%-30s AMR: %016lx\n", ptrace_write_running, info->invalid_amr);
/* Wake up child so that it can verify it didn't change. */
ret = prod_child(&info->child_sync);
PARENT_FAIL_IF(ret, &info->child_sync);
ret = wait_child(&info->child_sync);
if (ret)
return ret;
/* Try to write to IAMR. */
regs[0] = info->amr1;
regs[1] = info->invalid_iamr;
ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 2);
PARENT_FAIL_IF(!ret, &info->child_sync);
printf("%-30s AMR: %016lx IAMR: %016lx\n",
ptrace_write_running, regs[0], regs[1]);
/* Try to write to IAMR and UAMOR. */
regs[2] = info->invalid_uamor;
ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 3);
PARENT_FAIL_IF(!ret, &info->child_sync);
printf("%-30s AMR: %016lx IAMR: %016lx UAMOR: %016lx\n",
ptrace_write_running, regs[0], regs[1], regs[2]);
/* Verify that all registers still have their expected values. */
ret = ptrace_read_regs(pid, NT_PPC_PKEY, regs, 3);
PARENT_FAIL_IF(ret, &info->child_sync);
printf("%-30s AMR: %016lx IAMR: %016lx UAMOR: %016lx\n",
ptrace_read_running, regs[0], regs[1], regs[2]);
PARENT_FAIL_IF(regs[0] != info->amr2, &info->child_sync);
PARENT_FAIL_IF(regs[1] != info->expected_iamr, &info->child_sync);
PARENT_FAIL_IF(regs[2] != info->expected_uamor, &info->child_sync);
/* Wake up child so that it can verify AMR didn't change and wrap up. */
ret = prod_child(&info->child_sync);
PARENT_FAIL_IF(ret, &info->child_sync);
ret = wait(&status);
if (ret != pid) {
printf("Child's exit status not captured\n");
ret = TEST_PASS;
} else if (!WIFEXITED(status)) {
printf("Child exited abnormally\n");
ret = TEST_FAIL;
} else
ret = WEXITSTATUS(status) ? TEST_FAIL : TEST_PASS;
return ret;
}
static int ptrace_pkey(void)
{
struct shared_info *info;
int shm_id;
int ret;
pid_t pid;
shm_id = shmget(IPC_PRIVATE, sizeof(*info), 0777 | IPC_CREAT);
info = shmat(shm_id, NULL, 0);
ret = init_child_sync(&info->child_sync);
if (ret)
return ret;
pid = fork();
if (pid < 0) {
perror("fork() failed");
ret = TEST_FAIL;
} else if (pid == 0)
ret = child(info);
else
ret = parent(info, pid);
shmdt(info);
if (pid) {
destroy_child_sync(&info->child_sync);
shmctl(shm_id, IPC_RMID, NULL);
}
return ret;
}
int main(int argc, char *argv[])
{
return test_harness(ptrace_pkey, "ptrace_pkey");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for TAR, PPR, DSCR registers in the TM context
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*/
#include "ptrace.h"
#include "tm.h"
#include "ptrace-tar.h"
int shm_id;
unsigned long *cptr, *pptr;
void tm_tar(void)
{
unsigned long result, texasr;
unsigned long regs[3];
int ret;
cptr = (unsigned long *)shmat(shm_id, NULL, 0);
trans:
cptr[1] = 0;
asm __volatile__(
"li 4, %[tar_1];"
"mtspr %[sprn_tar], 4;" /* TAR_1 */
"li 4, %[dscr_1];"
"mtspr %[sprn_dscr], 4;" /* DSCR_1 */
"or 31,31,31;" /* PPR_1*/
"1: ;"
"tbegin.;"
"beq 2f;"
"li 4, %[tar_2];"
"mtspr %[sprn_tar], 4;" /* TAR_2 */
"li 4, %[dscr_2];"
"mtspr %[sprn_dscr], 4;" /* DSCR_2 */
"or 1,1,1;" /* PPR_2 */
"tsuspend.;"
"li 0, 1;"
"stw 0, 0(%[cptr1]);"
"tresume.;"
"b .;"
"tend.;"
"li 0, 0;"
"ori %[res], 0, 0;"
"b 3f;"
/* Transaction abort handler */
"2: ;"
"li 0, 1;"
"ori %[res], 0, 0;"
"mfspr %[texasr], %[sprn_texasr];"
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
: [sprn_dscr]"i"(SPRN_DSCR), [sprn_tar]"i"(SPRN_TAR),
[sprn_ppr]"i"(SPRN_PPR), [sprn_texasr]"i"(SPRN_TEXASR),
[tar_1]"i"(TAR_1), [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2),
[dscr_2]"i"(DSCR_2), [cptr1] "b" (&cptr[1])
: "memory", "r0", "r3", "r4", "r5", "r6"
);
/* TM failed, analyse */
if (result) {
if (!cptr[0])
goto trans;
regs[0] = mfspr(SPRN_TAR);
regs[1] = mfspr(SPRN_PPR);
regs[2] = mfspr(SPRN_DSCR);
shmdt(&cptr);
printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
user_read, regs[0], regs[1], regs[2]);
ret = validate_tar_registers(regs, TAR_4, PPR_4, DSCR_4);
if (ret)
exit(1);
exit(0);
}
shmdt(&cptr);
exit(1);
}
int trace_tm_tar(pid_t child)
{
unsigned long regs[3];
FAIL_IF(start_trace(child));
FAIL_IF(show_tar_registers(child, regs));
printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
ptrace_read_running, regs[0], regs[1], regs[2]);
FAIL_IF(validate_tar_registers(regs, TAR_2, PPR_2, DSCR_2));
FAIL_IF(show_tm_checkpointed_state(child, regs));
printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
ptrace_read_ckpt, regs[0], regs[1], regs[2]);
FAIL_IF(validate_tar_registers(regs, TAR_1, PPR_1, DSCR_1));
FAIL_IF(write_ckpt_tar_registers(child, TAR_4, PPR_4, DSCR_4));
printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
ptrace_write_ckpt, TAR_4, PPR_4, DSCR_4);
pptr[0] = 1;
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
int ptrace_tm_tar(void)
{
pid_t pid;
int ret, status;
SKIP_IF_MSG(!have_htm(), "Don't have transactional memory");
SKIP_IF_MSG(htm_is_synthetic(), "Transactional memory is synthetic");
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
pid = fork();
if (pid == 0)
tm_tar();
pptr = (unsigned long *)shmat(shm_id, NULL, 0);
pptr[0] = 0;
if (pid) {
while (!pptr[1])
asm volatile("" : : : "memory");
ret = trace_tm_tar(pid);
if (ret) {
kill(pid, SIGTERM);
shmdt(&pptr);
shmctl(shm_id, IPC_RMID, NULL);
return TEST_FAIL;
}
shmdt(&pptr);
ret = wait(&status);
shmctl(shm_id, IPC_RMID, NULL);
if (ret != pid) {
printf("Child's exit status not captured\n");
return TEST_FAIL;
}
return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
TEST_PASS;
}
return TEST_PASS;
}
int main(int argc, char *argv[])
{
return test_harness(ptrace_tm_tar, "ptrace_tm_tar");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for GPR/FPR registers
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*/
#include "ptrace.h"
#include "ptrace-gpr.h"
#include "reg.h"
#include <time.h>
/* Tracer and Tracee Shared Data */
int shm_id;
int *cptr, *pptr;
extern void gpr_child_loop(int *read_flag, int *write_flag,
unsigned long *gpr_buf, double *fpr_buf);
unsigned long child_gpr_val, parent_gpr_val;
double child_fpr_val, parent_fpr_val;
static int child(void)
{
unsigned long gpr_buf[32];
double fpr_buf[32];
int i;
cptr = (int *)shmat(shm_id, NULL, 0);
memset(gpr_buf, 0, sizeof(gpr_buf));
memset(fpr_buf, 0, sizeof(fpr_buf));
for (i = 0; i < 32; i++) {
gpr_buf[i] = child_gpr_val;
fpr_buf[i] = child_fpr_val;
}
gpr_child_loop(&cptr[0], &cptr[1], gpr_buf, fpr_buf);
shmdt((void *)cptr);
FAIL_IF(validate_gpr(gpr_buf, parent_gpr_val));
FAIL_IF(validate_fpr_double(fpr_buf, parent_fpr_val));
return 0;
}
int trace_gpr(pid_t child)
{
__u64 tmp, fpr[32], *peeked_fprs;
unsigned long gpr[18];
FAIL_IF(start_trace(child));
// Check child GPRs match what we expect using GETREGS
FAIL_IF(show_gpr(child, gpr));
FAIL_IF(validate_gpr(gpr, child_gpr_val));
// Check child FPRs match what we expect using GETFPREGS
FAIL_IF(show_fpr(child, fpr));
memcpy(&tmp, &child_fpr_val, sizeof(tmp));
FAIL_IF(validate_fpr(fpr, tmp));
// Check child FPRs match what we expect using PEEKUSR
peeked_fprs = peek_fprs(child);
FAIL_IF(!peeked_fprs);
FAIL_IF(validate_fpr(peeked_fprs, tmp));
free(peeked_fprs);
// Write child GPRs using SETREGS
FAIL_IF(write_gpr(child, parent_gpr_val));
// Write child FPRs using SETFPREGS
memcpy(&tmp, &parent_fpr_val, sizeof(tmp));
FAIL_IF(write_fpr(child, tmp));
// Check child FPRs match what we just set, using PEEKUSR
peeked_fprs = peek_fprs(child);
FAIL_IF(!peeked_fprs);
FAIL_IF(validate_fpr(peeked_fprs, tmp));
// Write child FPRs using POKEUSR
FAIL_IF(poke_fprs(child, (unsigned long *)peeked_fprs));
// Child will check its FPRs match before exiting
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
#ifndef __LONG_WIDTH__
#define __LONG_WIDTH__ (sizeof(long) * 8)
#endif
static uint64_t rand_reg(void)
{
uint64_t result;
long r;
r = random();
// Small values are typical
result = r & 0xffff;
if (r & 0x10000)
return result;
// Pointers tend to have high bits set
result |= random() << (__LONG_WIDTH__ - 31);
if (r & 0x100000)
return result;
// And sometimes we want a full 64-bit value
result ^= random() << 16;
return result;
}
int ptrace_gpr(void)
{
unsigned long seed;
int ret, status;
pid_t pid;
seed = getpid() ^ time(NULL);
printf("srand(%lu)\n", seed);
srand(seed);
child_gpr_val = rand_reg();
child_fpr_val = rand_reg();
parent_gpr_val = rand_reg();
parent_fpr_val = rand_reg();
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
pid = fork();
if (pid < 0) {
perror("fork() failed");
return TEST_FAIL;
}
if (pid == 0)
exit(child());
if (pid) {
pptr = (int *)shmat(shm_id, NULL, 0);
while (!pptr[1])
asm volatile("" : : : "memory");
ret = trace_gpr(pid);
if (ret) {
kill(pid, SIGTERM);
shmdt((void *)pptr);
shmctl(shm_id, IPC_RMID, NULL);
return TEST_FAIL;
}
pptr[0] = 1;
shmdt((void *)pptr);
ret = wait(&status);
shmctl(shm_id, IPC_RMID, NULL);
if (ret != pid) {
printf("Child's exit status not captured\n");
return TEST_FAIL;
}
return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
TEST_PASS;
}
return TEST_PASS;
}
int main(int argc, char *argv[])
{
return test_harness(ptrace_gpr, "ptrace_gpr");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* perf events self profiling example test case for hw breakpoints.
*
* This tests perf PERF_TYPE_BREAKPOINT parameters
* 1) tests all variants of the break on read/write flags
* 2) tests exclude_user == 0 and 1
* 3) test array matches (if DAWR is supported))
* 4) test different numbers of breakpoints matches
*
* Configure this breakpoint, then read and write the data a number of
* times. Then check the output count from perf is as expected.
*
* Based on:
* http://ozlabs.org/~anton/junkcode/perf_events_example1.c
*
* Copyright (C) 2018 Michael Neuling, IBM Corporation.
*/
#define _GNU_SOURCE
#include <unistd.h>
#include <assert.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/wait.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/sysinfo.h>
#include <asm/ptrace.h>
#include <elf.h>
#include <pthread.h>
#include <sys/syscall.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include "utils.h"
#ifndef PPC_DEBUG_FEATURE_DATA_BP_ARCH_31
#define PPC_DEBUG_FEATURE_DATA_BP_ARCH_31 0x20
#endif
#define MAX_LOOPS 10000
#define DAWR_LENGTH_MAX ((0x3f + 1) * 8)
int nprocs;
static volatile int a = 10;
static volatile int b = 10;
static volatile char c[512 + 8] __attribute__((aligned(512)));
static void perf_event_attr_set(struct perf_event_attr *attr,
__u32 type, __u64 addr, __u64 len,
bool exclude_user)
{
memset(attr, 0, sizeof(struct perf_event_attr));
attr->type = PERF_TYPE_BREAKPOINT;
attr->size = sizeof(struct perf_event_attr);
attr->bp_type = type;
attr->bp_addr = addr;
attr->bp_len = len;
attr->exclude_kernel = 1;
attr->exclude_hv = 1;
attr->exclude_guest = 1;
attr->exclude_user = exclude_user;
attr->disabled = 1;
}
static int
perf_process_event_open_exclude_user(__u32 type, __u64 addr, __u64 len, bool exclude_user)
{
struct perf_event_attr attr;
perf_event_attr_set(&attr, type, addr, len, exclude_user);
return syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
}
static int perf_process_event_open(__u32 type, __u64 addr, __u64 len)
{
struct perf_event_attr attr;
perf_event_attr_set(&attr, type, addr, len, 0);
return syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
}
static int perf_cpu_event_open(long cpu, __u32 type, __u64 addr, __u64 len)
{
struct perf_event_attr attr;
perf_event_attr_set(&attr, type, addr, len, 0);
return syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
}
static void close_fds(int *fd, int n)
{
int i;
for (i = 0; i < n; i++)
close(fd[i]);
}
static unsigned long read_fds(int *fd, int n)
{
int i;
unsigned long c = 0;
unsigned long count = 0;
size_t res;
for (i = 0; i < n; i++) {
res = read(fd[i], &c, sizeof(c));
assert(res == sizeof(unsigned long long));
count += c;
}
return count;
}
static void reset_fds(int *fd, int n)
{
int i;
for (i = 0; i < n; i++)
ioctl(fd[i], PERF_EVENT_IOC_RESET);
}
static void enable_fds(int *fd, int n)
{
int i;
for (i = 0; i < n; i++)
ioctl(fd[i], PERF_EVENT_IOC_ENABLE);
}
static void disable_fds(int *fd, int n)
{
int i;
for (i = 0; i < n; i++)
ioctl(fd[i], PERF_EVENT_IOC_DISABLE);
}
static int perf_systemwide_event_open(int *fd, __u32 type, __u64 addr, __u64 len)
{
int i, ncpus, cpu, ret = 0;
struct rlimit rlim;
cpu_set_t *mask;
size_t size;
if (getrlimit(RLIMIT_NOFILE, &rlim)) {
perror("getrlimit");
return -1;
}
rlim.rlim_cur = 65536;
if (setrlimit(RLIMIT_NOFILE, &rlim)) {
perror("setrlimit");
return -1;
}
ncpus = get_nprocs_conf();
size = CPU_ALLOC_SIZE(ncpus);
mask = CPU_ALLOC(ncpus);
if (!mask) {
perror("malloc");
return -1;
}
CPU_ZERO_S(size, mask);
if (sched_getaffinity(0, size, mask)) {
perror("sched_getaffinity");
ret = -1;
goto done;
}
for (i = 0, cpu = 0; i < nprocs && cpu < ncpus; cpu++) {
if (!CPU_ISSET_S(cpu, size, mask))
continue;
fd[i] = perf_cpu_event_open(cpu, type, addr, len);
if (fd[i] < 0) {
perror("perf_systemwide_event_open");
close_fds(fd, i);
ret = fd[i];
goto done;
}
i++;
}
if (i < nprocs) {
printf("Error: Number of online cpus reduced since start of test: %d < %d\n", i, nprocs);
close_fds(fd, i);
ret = -1;
}
done:
CPU_FREE(mask);
return ret;
}
static inline bool breakpoint_test(int len)
{
int fd;
/* bp_addr can point anywhere but needs to be aligned */
fd = perf_process_event_open(HW_BREAKPOINT_R, (__u64)(&fd) & 0xfffffffffffff800, len);
if (fd < 0)
return false;
close(fd);
return true;
}
static inline bool perf_breakpoint_supported(void)
{
return breakpoint_test(4);
}
static inline bool dawr_supported(void)
{
return breakpoint_test(DAWR_LENGTH_MAX);
}
static int runtestsingle(int readwriteflag, int exclude_user, int arraytest)
{
int i,j;
size_t res;
unsigned long long breaks, needed;
int readint;
int readintarraybig[2*DAWR_LENGTH_MAX/sizeof(int)];
int *readintalign;
volatile int *ptr;
int break_fd;
int loop_num = MAX_LOOPS - (rand() % 100); /* provide some variability */
volatile int *k;
__u64 len;
/* align to 0x400 boundary as required by DAWR */
readintalign = (int *)(((unsigned long)readintarraybig + 0x7ff) &
0xfffffffffffff800);
ptr = &readint;
if (arraytest)
ptr = &readintalign[0];
len = arraytest ? DAWR_LENGTH_MAX : sizeof(int);
break_fd = perf_process_event_open_exclude_user(readwriteflag, (__u64)ptr,
len, exclude_user);
if (break_fd < 0) {
perror("perf_process_event_open_exclude_user");
exit(1);
}
/* start counters */
ioctl(break_fd, PERF_EVENT_IOC_ENABLE);
/* Test a bunch of reads and writes */
k = &readint;
for (i = 0; i < loop_num; i++) {
if (arraytest)
k = &(readintalign[i % (DAWR_LENGTH_MAX/sizeof(int))]);
j = *k;
*k = j;
}
/* stop counters */
ioctl(break_fd, PERF_EVENT_IOC_DISABLE);
/* read and check counters */
res = read(break_fd, &breaks, sizeof(unsigned long long));
assert(res == sizeof(unsigned long long));
/* we read and write each loop, so subtract the ones we are counting */
needed = 0;
if (readwriteflag & HW_BREAKPOINT_R)
needed += loop_num;
if (readwriteflag & HW_BREAKPOINT_W)
needed += loop_num;
needed = needed * (1 - exclude_user);
printf("TESTED: addr:0x%lx brks:% 8lld loops:% 8i rw:%i !user:%i array:%i\n",
(unsigned long int)ptr, breaks, loop_num, readwriteflag, exclude_user, arraytest);
if (breaks != needed) {
printf("FAILED: 0x%lx brks:%lld needed:%lli %i %i %i\n\n",
(unsigned long int)ptr, breaks, needed, loop_num, readwriteflag, exclude_user);
return 1;
}
close(break_fd);
return 0;
}
static int runtest_dar_outside(void)
{
void *target;
volatile __u16 temp16;
volatile __u64 temp64;
int break_fd;
unsigned long long breaks;
int fail = 0;
size_t res;
target = malloc(8);
if (!target) {
perror("malloc failed");
exit(EXIT_FAILURE);
}
/* watch middle half of target array */
break_fd = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)(target + 2), 4);
if (break_fd < 0) {
free(target);
perror("perf_process_event_open");
exit(EXIT_FAILURE);
}
/* Shouldn't hit. */
ioctl(break_fd, PERF_EVENT_IOC_RESET);
ioctl(break_fd, PERF_EVENT_IOC_ENABLE);
temp16 = *((__u16 *)target);
*((__u16 *)target) = temp16;
ioctl(break_fd, PERF_EVENT_IOC_DISABLE);
res = read(break_fd, &breaks, sizeof(unsigned long long));
assert(res == sizeof(unsigned long long));
if (breaks == 0) {
printf("TESTED: No overlap\n");
} else {
printf("FAILED: No overlap: %lld != 0\n", breaks);
fail = 1;
}
/* Hit */
ioctl(break_fd, PERF_EVENT_IOC_RESET);
ioctl(break_fd, PERF_EVENT_IOC_ENABLE);
temp16 = *((__u16 *)(target + 1));
*((__u16 *)(target + 1)) = temp16;
ioctl(break_fd, PERF_EVENT_IOC_DISABLE);
res = read(break_fd, &breaks, sizeof(unsigned long long));
assert(res == sizeof(unsigned long long));
if (breaks == 2) {
printf("TESTED: Partial overlap\n");
} else {
printf("FAILED: Partial overlap: %lld != 2\n", breaks);
fail = 1;
}
/* Hit */
ioctl(break_fd, PERF_EVENT_IOC_RESET);
ioctl(break_fd, PERF_EVENT_IOC_ENABLE);
temp16 = *((__u16 *)(target + 5));
*((__u16 *)(target + 5)) = temp16;
ioctl(break_fd, PERF_EVENT_IOC_DISABLE);
res = read(break_fd, &breaks, sizeof(unsigned long long));
assert(res == sizeof(unsigned long long));
if (breaks == 2) {
printf("TESTED: Partial overlap\n");
} else {
printf("FAILED: Partial overlap: %lld != 2\n", breaks);
fail = 1;
}
/* Shouldn't Hit */
ioctl(break_fd, PERF_EVENT_IOC_RESET);
ioctl(break_fd, PERF_EVENT_IOC_ENABLE);
temp16 = *((__u16 *)(target + 6));
*((__u16 *)(target + 6)) = temp16;
ioctl(break_fd, PERF_EVENT_IOC_DISABLE);
res = read(break_fd, &breaks, sizeof(unsigned long long));
assert(res == sizeof(unsigned long long));
if (breaks == 0) {
printf("TESTED: No overlap\n");
} else {
printf("FAILED: No overlap: %lld != 0\n", breaks);
fail = 1;
}
/* Hit */
ioctl(break_fd, PERF_EVENT_IOC_RESET);
ioctl(break_fd, PERF_EVENT_IOC_ENABLE);
temp64 = *((__u64 *)target);
*((__u64 *)target) = temp64;
ioctl(break_fd, PERF_EVENT_IOC_DISABLE);
res = read(break_fd, &breaks, sizeof(unsigned long long));
assert(res == sizeof(unsigned long long));
if (breaks == 2) {
printf("TESTED: Full overlap\n");
} else {
printf("FAILED: Full overlap: %lld != 2\n", breaks);
fail = 1;
}
free(target);
close(break_fd);
return fail;
}
static void multi_dawr_workload(void)
{
a += 10;
b += 10;
c[512 + 1] += 'a';
}
static int test_process_multi_diff_addr(void)
{
unsigned long long breaks1 = 0, breaks2 = 0;
int fd1, fd2;
char *desc = "Process specific, Two events, diff addr";
size_t res;
fd1 = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
if (fd1 < 0) {
perror("perf_process_event_open");
exit(EXIT_FAILURE);
}
fd2 = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)&b, (__u64)sizeof(b));
if (fd2 < 0) {
close(fd1);
perror("perf_process_event_open");
exit(EXIT_FAILURE);
}
ioctl(fd1, PERF_EVENT_IOC_RESET);
ioctl(fd2, PERF_EVENT_IOC_RESET);
ioctl(fd1, PERF_EVENT_IOC_ENABLE);
ioctl(fd2, PERF_EVENT_IOC_ENABLE);
multi_dawr_workload();
ioctl(fd1, PERF_EVENT_IOC_DISABLE);
ioctl(fd2, PERF_EVENT_IOC_DISABLE);
res = read(fd1, &breaks1, sizeof(breaks1));
assert(res == sizeof(unsigned long long));
res = read(fd2, &breaks2, sizeof(breaks2));
assert(res == sizeof(unsigned long long));
close(fd1);
close(fd2);
if (breaks1 != 2 || breaks2 != 2) {
printf("FAILED: %s: %lld != 2 || %lld != 2\n", desc, breaks1, breaks2);
return 1;
}
printf("TESTED: %s\n", desc);
return 0;
}
static int test_process_multi_same_addr(void)
{
unsigned long long breaks1 = 0, breaks2 = 0;
int fd1, fd2;
char *desc = "Process specific, Two events, same addr";
size_t res;
fd1 = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
if (fd1 < 0) {
perror("perf_process_event_open");
exit(EXIT_FAILURE);
}
fd2 = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
if (fd2 < 0) {
close(fd1);
perror("perf_process_event_open");
exit(EXIT_FAILURE);
}
ioctl(fd1, PERF_EVENT_IOC_RESET);
ioctl(fd2, PERF_EVENT_IOC_RESET);
ioctl(fd1, PERF_EVENT_IOC_ENABLE);
ioctl(fd2, PERF_EVENT_IOC_ENABLE);
multi_dawr_workload();
ioctl(fd1, PERF_EVENT_IOC_DISABLE);
ioctl(fd2, PERF_EVENT_IOC_DISABLE);
res = read(fd1, &breaks1, sizeof(breaks1));
assert(res == sizeof(unsigned long long));
res = read(fd2, &breaks2, sizeof(breaks2));
assert(res == sizeof(unsigned long long));
close(fd1);
close(fd2);
if (breaks1 != 2 || breaks2 != 2) {
printf("FAILED: %s: %lld != 2 || %lld != 2\n", desc, breaks1, breaks2);
return 1;
}
printf("TESTED: %s\n", desc);
return 0;
}
static int test_process_multi_diff_addr_ro_wo(void)
{
unsigned long long breaks1 = 0, breaks2 = 0;
int fd1, fd2;
char *desc = "Process specific, Two events, diff addr, one is RO, other is WO";
size_t res;
fd1 = perf_process_event_open(HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));
if (fd1 < 0) {
perror("perf_process_event_open");
exit(EXIT_FAILURE);
}
fd2 = perf_process_event_open(HW_BREAKPOINT_R, (__u64)&b, (__u64)sizeof(b));
if (fd2 < 0) {
close(fd1);
perror("perf_process_event_open");
exit(EXIT_FAILURE);
}
ioctl(fd1, PERF_EVENT_IOC_RESET);
ioctl(fd2, PERF_EVENT_IOC_RESET);
ioctl(fd1, PERF_EVENT_IOC_ENABLE);
ioctl(fd2, PERF_EVENT_IOC_ENABLE);
multi_dawr_workload();
ioctl(fd1, PERF_EVENT_IOC_DISABLE);
ioctl(fd2, PERF_EVENT_IOC_DISABLE);
res = read(fd1, &breaks1, sizeof(breaks1));
assert(res == sizeof(unsigned long long));
res = read(fd2, &breaks2, sizeof(breaks2));
assert(res == sizeof(unsigned long long));
close(fd1);
close(fd2);
if (breaks1 != 1 || breaks2 != 1) {
printf("FAILED: %s: %lld != 1 || %lld != 1\n", desc, breaks1, breaks2);
return 1;
}
printf("TESTED: %s\n", desc);
return 0;
}
static int test_process_multi_same_addr_ro_wo(void)
{
unsigned long long breaks1 = 0, breaks2 = 0;
int fd1, fd2;
char *desc = "Process specific, Two events, same addr, one is RO, other is WO";
size_t res;
fd1 = perf_process_event_open(HW_BREAKPOINT_R, (__u64)&a, (__u64)sizeof(a));
if (fd1 < 0) {
perror("perf_process_event_open");
exit(EXIT_FAILURE);
}
fd2 = perf_process_event_open(HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));
if (fd2 < 0) {
close(fd1);
perror("perf_process_event_open");
exit(EXIT_FAILURE);
}
ioctl(fd1, PERF_EVENT_IOC_RESET);
ioctl(fd2, PERF_EVENT_IOC_RESET);
ioctl(fd1, PERF_EVENT_IOC_ENABLE);
ioctl(fd2, PERF_EVENT_IOC_ENABLE);
multi_dawr_workload();
ioctl(fd1, PERF_EVENT_IOC_DISABLE);
ioctl(fd2, PERF_EVENT_IOC_DISABLE);
res = read(fd1, &breaks1, sizeof(breaks1));
assert(res == sizeof(unsigned long long));
res = read(fd2, &breaks2, sizeof(breaks2));
assert(res == sizeof(unsigned long long));
close(fd1);
close(fd2);
if (breaks1 != 1 || breaks2 != 1) {
printf("FAILED: %s: %lld != 1 || %lld != 1\n", desc, breaks1, breaks2);
return 1;
}
printf("TESTED: %s\n", desc);
return 0;
}
static int test_syswide_multi_diff_addr(void)
{
unsigned long long breaks1 = 0, breaks2 = 0;
int *fd1 = malloc(nprocs * sizeof(int));
int *fd2 = malloc(nprocs * sizeof(int));
char *desc = "Systemwide, Two events, diff addr";
int ret;
ret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
if (ret)
exit(EXIT_FAILURE);
ret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_RW, (__u64)&b, (__u64)sizeof(b));
if (ret) {
close_fds(fd1, nprocs);
exit(EXIT_FAILURE);
}
reset_fds(fd1, nprocs);
reset_fds(fd2, nprocs);
enable_fds(fd1, nprocs);
enable_fds(fd2, nprocs);
multi_dawr_workload();
disable_fds(fd1, nprocs);
disable_fds(fd2, nprocs);
breaks1 = read_fds(fd1, nprocs);
breaks2 = read_fds(fd2, nprocs);
close_fds(fd1, nprocs);
close_fds(fd2, nprocs);
free(fd1);
free(fd2);
if (breaks1 != 2 || breaks2 != 2) {
printf("FAILED: %s: %lld != 2 || %lld != 2\n", desc, breaks1, breaks2);
return 1;
}
printf("TESTED: %s\n", desc);
return 0;
}
static int test_syswide_multi_same_addr(void)
{
unsigned long long breaks1 = 0, breaks2 = 0;
int *fd1 = malloc(nprocs * sizeof(int));
int *fd2 = malloc(nprocs * sizeof(int));
char *desc = "Systemwide, Two events, same addr";
int ret;
ret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
if (ret)
exit(EXIT_FAILURE);
ret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
if (ret) {
close_fds(fd1, nprocs);
exit(EXIT_FAILURE);
}
reset_fds(fd1, nprocs);
reset_fds(fd2, nprocs);
enable_fds(fd1, nprocs);
enable_fds(fd2, nprocs);
multi_dawr_workload();
disable_fds(fd1, nprocs);
disable_fds(fd2, nprocs);
breaks1 = read_fds(fd1, nprocs);
breaks2 = read_fds(fd2, nprocs);
close_fds(fd1, nprocs);
close_fds(fd2, nprocs);
free(fd1);
free(fd2);
if (breaks1 != 2 || breaks2 != 2) {
printf("FAILED: %s: %lld != 2 || %lld != 2\n", desc, breaks1, breaks2);
return 1;
}
printf("TESTED: %s\n", desc);
return 0;
}
static int test_syswide_multi_diff_addr_ro_wo(void)
{
unsigned long long breaks1 = 0, breaks2 = 0;
int *fd1 = malloc(nprocs * sizeof(int));
int *fd2 = malloc(nprocs * sizeof(int));
char *desc = "Systemwide, Two events, diff addr, one is RO, other is WO";
int ret;
ret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));
if (ret)
exit(EXIT_FAILURE);
ret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_R, (__u64)&b, (__u64)sizeof(b));
if (ret) {
close_fds(fd1, nprocs);
exit(EXIT_FAILURE);
}
reset_fds(fd1, nprocs);
reset_fds(fd2, nprocs);
enable_fds(fd1, nprocs);
enable_fds(fd2, nprocs);
multi_dawr_workload();
disable_fds(fd1, nprocs);
disable_fds(fd2, nprocs);
breaks1 = read_fds(fd1, nprocs);
breaks2 = read_fds(fd2, nprocs);
close_fds(fd1, nprocs);
close_fds(fd2, nprocs);
free(fd1);
free(fd2);
if (breaks1 != 1 || breaks2 != 1) {
printf("FAILED: %s: %lld != 1 || %lld != 1\n", desc, breaks1, breaks2);
return 1;
}
printf("TESTED: %s\n", desc);
return 0;
}
static int test_syswide_multi_same_addr_ro_wo(void)
{
unsigned long long breaks1 = 0, breaks2 = 0;
int *fd1 = malloc(nprocs * sizeof(int));
int *fd2 = malloc(nprocs * sizeof(int));
char *desc = "Systemwide, Two events, same addr, one is RO, other is WO";
int ret;
ret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));
if (ret)
exit(EXIT_FAILURE);
ret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_R, (__u64)&a, (__u64)sizeof(a));
if (ret) {
close_fds(fd1, nprocs);
exit(EXIT_FAILURE);
}
reset_fds(fd1, nprocs);
reset_fds(fd2, nprocs);
enable_fds(fd1, nprocs);
enable_fds(fd2, nprocs);
multi_dawr_workload();
disable_fds(fd1, nprocs);
disable_fds(fd2, nprocs);
breaks1 = read_fds(fd1, nprocs);
breaks2 = read_fds(fd2, nprocs);
close_fds(fd1, nprocs);
close_fds(fd2, nprocs);
free(fd1);
free(fd2);
if (breaks1 != 1 || breaks2 != 1) {
printf("FAILED: %s: %lld != 1 || %lld != 1\n", desc, breaks1, breaks2);
return 1;
}
printf("TESTED: %s\n", desc);
return 0;
}
static int runtest_multi_dawr(void)
{
int ret = 0;
ret |= test_process_multi_diff_addr();
ret |= test_process_multi_same_addr();
ret |= test_process_multi_diff_addr_ro_wo();
ret |= test_process_multi_same_addr_ro_wo();
ret |= test_syswide_multi_diff_addr();
ret |= test_syswide_multi_same_addr();
ret |= test_syswide_multi_diff_addr_ro_wo();
ret |= test_syswide_multi_same_addr_ro_wo();
return ret;
}
static int runtest_unaligned_512bytes(void)
{
unsigned long long breaks = 0;
int fd;
char *desc = "Process specific, 512 bytes, unaligned";
__u64 addr = (__u64)&c + 8;
size_t res;
fd = perf_process_event_open(HW_BREAKPOINT_RW, addr, 512);
if (fd < 0) {
perror("perf_process_event_open");
exit(EXIT_FAILURE);
}
ioctl(fd, PERF_EVENT_IOC_RESET);
ioctl(fd, PERF_EVENT_IOC_ENABLE);
multi_dawr_workload();
ioctl(fd, PERF_EVENT_IOC_DISABLE);
res = read(fd, &breaks, sizeof(breaks));
assert(res == sizeof(unsigned long long));
close(fd);
if (breaks != 2) {
printf("FAILED: %s: %lld != 2\n", desc, breaks);
return 1;
}
printf("TESTED: %s\n", desc);
return 0;
}
/* There is no perf api to find number of available watchpoints. Use ptrace. */
static int get_nr_wps(bool *arch_31)
{
struct ppc_debug_info dbginfo;
int child_pid;
child_pid = fork();
if (!child_pid) {
int ret = ptrace(PTRACE_TRACEME, 0, NULL, 0);
if (ret) {
perror("PTRACE_TRACEME failed\n");
exit(EXIT_FAILURE);
}
kill(getpid(), SIGUSR1);
sleep(1);
exit(EXIT_SUCCESS);
}
wait(NULL);
if (ptrace(PPC_PTRACE_GETHWDBGINFO, child_pid, NULL, &dbginfo)) {
perror("Can't get breakpoint info");
exit(EXIT_FAILURE);
}
*arch_31 = !!(dbginfo.features & PPC_DEBUG_FEATURE_DATA_BP_ARCH_31);
return dbginfo.num_data_bps;
}
static int runtest(void)
{
int rwflag;
int exclude_user;
int ret;
bool dawr = dawr_supported();
bool arch_31 = false;
int nr_wps = get_nr_wps(&arch_31);
/*
* perf defines rwflag as two bits read and write and at least
* one must be set. So range 1-3.
*/
for (rwflag = 1 ; rwflag < 4; rwflag++) {
for (exclude_user = 0 ; exclude_user < 2; exclude_user++) {
ret = runtestsingle(rwflag, exclude_user, 0);
if (ret)
return ret;
/* if we have the dawr, we can do an array test */
if (!dawr)
continue;
ret = runtestsingle(rwflag, exclude_user, 1);
if (ret)
return ret;
}
}
ret = runtest_dar_outside();
if (ret)
return ret;
if (dawr && nr_wps > 1) {
nprocs = get_nprocs();
ret = runtest_multi_dawr();
if (ret)
return ret;
}
if (dawr && arch_31)
ret = runtest_unaligned_512bytes();
return ret;
}
static int perf_hwbreak(void)
{
srand ( time(NULL) );
SKIP_IF_MSG(!perf_breakpoint_supported(), "Perf breakpoints not supported");
return runtest();
}
int main(int argc, char *argv[], char **envp)
{
return test_harness(perf_hwbreak, "perf_hwbreak");
}
| linux-master | tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) sysfs interface test
*
* This test updates to system wide DSCR default through the sysfs interface
* and then verifies that all the CPU specific DSCR defaults are updated as
* well verified from their sysfs interfaces.
*
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*/
#include "dscr.h"
static int check_cpu_dscr_default(char *file, unsigned long val)
{
unsigned long cpu_dscr;
int err;
err = read_ulong(file, &cpu_dscr, 16);
if (err)
return err;
if (cpu_dscr != val) {
printf("DSCR match failed: %ld (system) %ld (cpu)\n",
val, cpu_dscr);
return 1;
}
return 0;
}
static int check_all_cpu_dscr_defaults(unsigned long val)
{
DIR *sysfs;
struct dirent *dp;
char file[LEN_MAX];
sysfs = opendir(CPU_PATH);
if (!sysfs) {
perror("opendir() failed");
return 1;
}
while ((dp = readdir(sysfs))) {
int len;
if (!(dp->d_type & DT_DIR))
continue;
if (!strcmp(dp->d_name, "cpuidle"))
continue;
if (!strstr(dp->d_name, "cpu"))
continue;
len = snprintf(file, LEN_MAX, "%s%s/dscr", CPU_PATH, dp->d_name);
if (len >= LEN_MAX)
continue;
if (access(file, F_OK))
continue;
if (check_cpu_dscr_default(file, val)) {
closedir(sysfs);
return 1;
}
}
closedir(sysfs);
return 0;
}
int dscr_sysfs(void)
{
unsigned long orig_dscr_default;
SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
orig_dscr_default = get_default_dscr();
for (int i = 0; i < DSCR_MAX; i++) {
set_default_dscr(i);
if (check_all_cpu_dscr_defaults(i))
goto fail;
}
set_default_dscr(orig_dscr_default);
return 0;
fail:
set_default_dscr(orig_dscr_default);
return 1;
}
int main(int argc, char *argv[])
{
return test_harness(dscr_sysfs, "dscr_sysfs_test");
}
| linux-master | tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) explicit test
*
* This test modifies the DSCR value using mtspr instruction and
* verifies the change with mfspr instruction. It uses both the
* privilege state SPR and the problem state SPR for this purpose.
*
* When using the privilege state SPR, the instructions such as
* mfspr or mtspr are privileged and the kernel emulates them
* for us. Instructions using problem state SPR can be executed
* directly without any emulation if the HW supports them. Else
* they also get emulated by the kernel.
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*/
#define _GNU_SOURCE
#include "dscr.h"
#include "utils.h"
#include <pthread.h>
#include <sched.h>
#include <semaphore.h>
void *dscr_explicit_lockstep_thread(void *args)
{
sem_t *prev = (sem_t *)args;
sem_t *next = (sem_t *)args + 1;
unsigned long expected_dscr = 0;
set_dscr(expected_dscr);
srand(gettid());
for (int i = 0; i < COUNT; i++) {
FAIL_IF_EXIT(sem_wait(prev));
FAIL_IF_EXIT(expected_dscr != get_dscr());
FAIL_IF_EXIT(expected_dscr != get_dscr_usr());
expected_dscr = (expected_dscr + 1) % DSCR_MAX;
set_dscr(expected_dscr);
FAIL_IF_EXIT(sem_post(next));
}
return NULL;
}
int dscr_explicit_lockstep_test(void)
{
pthread_t thread;
sem_t semaphores[2];
sem_t *prev = &semaphores[1]; /* reversed prev/next than for the other thread */
sem_t *next = &semaphores[0];
unsigned long expected_dscr = 0;
SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
srand(gettid());
set_dscr(expected_dscr);
FAIL_IF(sem_init(prev, 0, 0));
FAIL_IF(sem_init(next, 0, 1)); /* other thread starts first */
FAIL_IF(bind_to_cpu(BIND_CPU_ANY) < 0);
FAIL_IF(pthread_create(&thread, NULL, dscr_explicit_lockstep_thread, (void *)semaphores));
for (int i = 0; i < COUNT; i++) {
FAIL_IF(sem_wait(prev));
FAIL_IF(expected_dscr != get_dscr());
FAIL_IF(expected_dscr != get_dscr_usr());
expected_dscr = (expected_dscr - 1) % DSCR_MAX;
set_dscr(expected_dscr);
FAIL_IF(sem_post(next));
}
FAIL_IF(pthread_join(thread, NULL));
FAIL_IF(sem_destroy(prev));
FAIL_IF(sem_destroy(next));
return 0;
}
struct random_thread_args {
pthread_t thread_id;
bool do_yields;
pthread_barrier_t *barrier;
};
void *dscr_explicit_random_thread(void *in)
{
struct random_thread_args *args = (struct random_thread_args *)in;
unsigned long expected_dscr = 0;
int err;
srand(gettid());
err = pthread_barrier_wait(args->barrier);
FAIL_IF_EXIT(err != 0 && err != PTHREAD_BARRIER_SERIAL_THREAD);
for (int i = 0; i < COUNT; i++) {
expected_dscr = rand() % DSCR_MAX;
set_dscr(expected_dscr);
for (int j = rand() % 5; j > 0; --j) {
FAIL_IF_EXIT(get_dscr() != expected_dscr);
FAIL_IF_EXIT(get_dscr_usr() != expected_dscr);
if (args->do_yields && rand() % 2)
sched_yield();
}
expected_dscr = rand() % DSCR_MAX;
set_dscr_usr(expected_dscr);
for (int j = rand() % 5; j > 0; --j) {
FAIL_IF_EXIT(get_dscr() != expected_dscr);
FAIL_IF_EXIT(get_dscr_usr() != expected_dscr);
if (args->do_yields && rand() % 2)
sched_yield();
}
}
return NULL;
}
int dscr_explicit_random_test(void)
{
struct random_thread_args threads[THREADS];
pthread_barrier_t barrier;
SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
FAIL_IF(pthread_barrier_init(&barrier, NULL, THREADS));
for (int i = 0; i < THREADS; i++) {
threads[i].do_yields = i % 2 == 0;
threads[i].barrier = &barrier;
FAIL_IF(pthread_create(&threads[i].thread_id, NULL,
dscr_explicit_random_thread, (void *)&threads[i]));
}
for (int i = 0; i < THREADS; i++)
FAIL_IF(pthread_join(threads[i].thread_id, NULL));
FAIL_IF(pthread_barrier_destroy(&barrier));
return 0;
}
int main(int argc, char *argv[])
{
unsigned long orig_dscr_default = 0;
int err = 0;
if (have_hwcap2(PPC_FEATURE2_DSCR))
orig_dscr_default = get_default_dscr();
err |= test_harness(dscr_explicit_lockstep_test, "dscr_explicit_lockstep_test");
err |= test_harness(dscr_explicit_random_test, "dscr_explicit_random_test");
if (have_hwcap2(PPC_FEATURE2_DSCR))
set_default_dscr(orig_dscr_default);
return err;
}
| linux-master | tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) fork test
*
* This testcase modifies the DSCR using mtspr, forks and then
* verifies that the child process has the correct changed DSCR
* value using mfspr.
*
* When using the privilege state SPR, the instructions such as
* mfspr or mtspr are privileged and the kernel emulates them
* for us. Instructions using problem state SPR can be executed
* directly without any emulation if the HW supports them. Else
* they also get emulated by the kernel.
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*/
#include "dscr.h"
int dscr_inherit(void)
{
unsigned long i, dscr = 0;
pid_t pid;
SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
srand(getpid());
set_dscr(dscr);
for (i = 0; i < COUNT; i++) {
unsigned long cur_dscr, cur_dscr_usr;
dscr++;
if (dscr > DSCR_MAX)
dscr = 0;
if (i % 2 == 0)
set_dscr_usr(dscr);
else
set_dscr(dscr);
pid = fork();
if (pid == -1) {
perror("fork() failed");
exit(1);
} else if (pid) {
int status;
if (waitpid(pid, &status, 0) == -1) {
perror("waitpid() failed");
exit(1);
}
if (!WIFEXITED(status)) {
fprintf(stderr, "Child didn't exit cleanly\n");
exit(1);
}
if (WEXITSTATUS(status) != 0) {
fprintf(stderr, "Child didn't exit cleanly\n");
return 1;
}
} else {
cur_dscr = get_dscr();
if (cur_dscr != dscr) {
fprintf(stderr, "Kernel DSCR should be %ld "
"but is %ld\n", dscr, cur_dscr);
exit(1);
}
cur_dscr_usr = get_dscr_usr();
if (cur_dscr_usr != dscr) {
fprintf(stderr, "User DSCR should be %ld "
"but is %ld\n", dscr, cur_dscr_usr);
exit(1);
}
exit(0);
}
}
return 0;
}
int main(int argc, char *argv[])
{
return test_harness(dscr_inherit, "dscr_inherit_test");
}
| linux-master | tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) default test
*
* This test modifies the system wide default DSCR through
* it's sysfs interface and then verifies that all threads
* see the correct changed DSCR value immediately.
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*/
#define _GNU_SOURCE
#include "dscr.h"
#include <pthread.h>
#include <semaphore.h>
#include <unistd.h>
static void *dscr_default_lockstep_writer(void *arg)
{
sem_t *reader_sem = (sem_t *)arg;
sem_t *writer_sem = (sem_t *)arg + 1;
unsigned long expected_dscr = 0;
for (int i = 0; i < COUNT; i++) {
FAIL_IF_EXIT(sem_wait(writer_sem));
set_default_dscr(expected_dscr);
expected_dscr = (expected_dscr + 1) % DSCR_MAX;
FAIL_IF_EXIT(sem_post(reader_sem));
}
return NULL;
}
int dscr_default_lockstep_test(void)
{
pthread_t writer;
sem_t rw_semaphores[2];
sem_t *reader_sem = &rw_semaphores[0];
sem_t *writer_sem = &rw_semaphores[1];
unsigned long expected_dscr = 0;
SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
FAIL_IF(sem_init(reader_sem, 0, 0));
FAIL_IF(sem_init(writer_sem, 0, 1)); /* writer starts first */
FAIL_IF(bind_to_cpu(BIND_CPU_ANY) < 0);
FAIL_IF(pthread_create(&writer, NULL, dscr_default_lockstep_writer, (void *)rw_semaphores));
for (int i = 0; i < COUNT ; i++) {
FAIL_IF(sem_wait(reader_sem));
FAIL_IF(get_dscr() != expected_dscr);
FAIL_IF(get_dscr_usr() != expected_dscr);
expected_dscr = (expected_dscr + 1) % DSCR_MAX;
FAIL_IF(sem_post(writer_sem));
}
FAIL_IF(pthread_join(writer, NULL));
FAIL_IF(sem_destroy(reader_sem));
FAIL_IF(sem_destroy(writer_sem));
return 0;
}
struct random_thread_args {
pthread_t thread_id;
unsigned long *expected_system_dscr;
pthread_rwlock_t *rw_lock;
pthread_barrier_t *barrier;
};
static void *dscr_default_random_thread(void *in)
{
struct random_thread_args *args = (struct random_thread_args *)in;
unsigned long *expected_dscr_p = args->expected_system_dscr;
pthread_rwlock_t *rw_lock = args->rw_lock;
int err;
srand(gettid());
err = pthread_barrier_wait(args->barrier);
FAIL_IF_EXIT(err != 0 && err != PTHREAD_BARRIER_SERIAL_THREAD);
for (int i = 0; i < COUNT; i++) {
unsigned long expected_dscr;
unsigned long current_dscr;
unsigned long current_dscr_usr;
FAIL_IF_EXIT(pthread_rwlock_rdlock(rw_lock));
expected_dscr = *expected_dscr_p;
current_dscr = get_dscr();
current_dscr_usr = get_dscr_usr();
FAIL_IF_EXIT(pthread_rwlock_unlock(rw_lock));
FAIL_IF_EXIT(current_dscr != expected_dscr);
FAIL_IF_EXIT(current_dscr_usr != expected_dscr);
if (rand() % 10 == 0) {
unsigned long next_dscr;
FAIL_IF_EXIT(pthread_rwlock_wrlock(rw_lock));
next_dscr = (*expected_dscr_p + 1) % DSCR_MAX;
set_default_dscr(next_dscr);
*expected_dscr_p = next_dscr;
FAIL_IF_EXIT(pthread_rwlock_unlock(rw_lock));
}
}
pthread_exit((void *)0);
}
int dscr_default_random_test(void)
{
struct random_thread_args threads[THREADS];
unsigned long expected_system_dscr = 0;
pthread_rwlockattr_t rwlock_attr;
pthread_rwlock_t rw_lock;
pthread_barrier_t barrier;
SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
FAIL_IF(pthread_rwlockattr_setkind_np(&rwlock_attr,
PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP));
FAIL_IF(pthread_rwlock_init(&rw_lock, &rwlock_attr));
FAIL_IF(pthread_barrier_init(&barrier, NULL, THREADS));
set_default_dscr(expected_system_dscr);
for (int i = 0; i < THREADS; i++) {
threads[i].expected_system_dscr = &expected_system_dscr;
threads[i].rw_lock = &rw_lock;
threads[i].barrier = &barrier;
FAIL_IF(pthread_create(&threads[i].thread_id, NULL,
dscr_default_random_thread, (void *)&threads[i]));
}
for (int i = 0; i < THREADS; i++)
FAIL_IF(pthread_join(threads[i].thread_id, NULL));
FAIL_IF(pthread_barrier_destroy(&barrier));
FAIL_IF(pthread_rwlock_destroy(&rw_lock));
return 0;
}
int main(int argc, char *argv[])
{
unsigned long orig_dscr_default = 0;
int err = 0;
if (have_hwcap2(PPC_FEATURE2_DSCR))
orig_dscr_default = get_default_dscr();
err |= test_harness(dscr_default_lockstep_test, "dscr_default_lockstep_test");
err |= test_harness(dscr_default_random_test, "dscr_default_random_test");
if (have_hwcap2(PPC_FEATURE2_DSCR))
set_default_dscr(orig_dscr_default);
return err;
}
| linux-master | tools/testing/selftests/powerpc/dscr/dscr_default_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) SPR test
*
* This test modifies the DSCR value through both the SPR number
* based mtspr instruction and then makes sure that the same is
* reflected through mfspr instruction using either of the SPR
* numbers.
*
* When using the privilege state SPR, the instructions such as
* mfspr or mtspr are privileged and the kernel emulates them
* for us. Instructions using problem state SPR can be executed
* directly without any emulation if the HW supports them. Else
* they also get emulated by the kernel.
*
* Copyright 2013, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*/
#include "dscr.h"
static int check_dscr(char *str)
{
unsigned long cur_dscr, cur_dscr_usr;
cur_dscr = get_dscr();
cur_dscr_usr = get_dscr_usr();
if (cur_dscr != cur_dscr_usr) {
printf("%s set, kernel get %lx != user get %lx\n",
str, cur_dscr, cur_dscr_usr);
return 1;
}
return 0;
}
int dscr_user(void)
{
int i;
SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
check_dscr("");
for (i = 0; i < COUNT; i++) {
set_dscr(i);
if (check_dscr("kernel"))
return 1;
}
for (i = 0; i < COUNT; i++) {
set_dscr_usr(i);
if (check_dscr("user"))
return 1;
}
return 0;
}
int main(int argc, char *argv[])
{
return test_harness(dscr_user, "dscr_user_test");
}
| linux-master | tools/testing/selftests/powerpc/dscr/dscr_user_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) sysfs thread test
*
* This test updates the system wide DSCR default value through
* sysfs interface which should then update all the CPU specific
* DSCR default values which must also be then visible to threads
* executing on individual CPUs on the system.
*
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*/
#define _GNU_SOURCE
#include "dscr.h"
static int test_thread_dscr(unsigned long val)
{
unsigned long cur_dscr, cur_dscr_usr;
cur_dscr = get_dscr();
cur_dscr_usr = get_dscr_usr();
if (val != cur_dscr) {
printf("[cpu %d] Kernel DSCR should be %ld but is %ld\n",
sched_getcpu(), val, cur_dscr);
return 1;
}
if (val != cur_dscr_usr) {
printf("[cpu %d] User DSCR should be %ld but is %ld\n",
sched_getcpu(), val, cur_dscr_usr);
return 1;
}
return 0;
}
static int check_cpu_dscr_thread(unsigned long val)
{
cpu_set_t mask;
int cpu;
for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
CPU_ZERO(&mask);
CPU_SET(cpu, &mask);
if (sched_setaffinity(0, sizeof(mask), &mask))
continue;
if (test_thread_dscr(val))
return 1;
}
return 0;
}
int dscr_sysfs_thread(void)
{
unsigned long orig_dscr_default;
int i, j;
SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
orig_dscr_default = get_default_dscr();
for (i = 0; i < COUNT; i++) {
for (j = 0; j < DSCR_MAX; j++) {
set_default_dscr(j);
if (check_cpu_dscr_thread(j))
goto fail;
}
}
set_default_dscr(orig_dscr_default);
return 0;
fail:
set_default_dscr(orig_dscr_default);
return 1;
}
int main(int argc, char *argv[])
{
return test_harness(dscr_sysfs_thread, "dscr_sysfs_thread_test");
}
| linux-master | tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) fork exec test
*
* This testcase modifies the DSCR using mtspr, forks & execs and
* verifies that the child is using the changed DSCR using mfspr.
*
* When using the privilege state SPR, the instructions such as
* mfspr or mtspr are privileged and the kernel emulates them
* for us. Instructions using problem state SPR can be executed
* directly without any emulation if the HW supports them. Else
* they also get emulated by the kernel.
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
*/
#include "dscr.h"
static char *prog;
static void do_exec(unsigned long parent_dscr)
{
unsigned long cur_dscr, cur_dscr_usr;
cur_dscr = get_dscr();
cur_dscr_usr = get_dscr_usr();
if (cur_dscr != parent_dscr) {
fprintf(stderr, "Parent DSCR %ld was not inherited "
"over exec (kernel value)\n", parent_dscr);
exit(1);
}
if (cur_dscr_usr != parent_dscr) {
fprintf(stderr, "Parent DSCR %ld was not inherited "
"over exec (user value)\n", parent_dscr);
exit(1);
}
exit(0);
}
int dscr_inherit_exec(void)
{
unsigned long i, dscr = 0;
pid_t pid;
SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
for (i = 0; i < COUNT; i++) {
dscr++;
if (dscr > DSCR_MAX)
dscr = 0;
if (dscr == get_default_dscr())
continue;
if (i % 2 == 0)
set_dscr_usr(dscr);
else
set_dscr(dscr);
pid = fork();
if (pid == -1) {
perror("fork() failed");
exit(1);
} else if (pid) {
int status;
if (waitpid(pid, &status, 0) == -1) {
perror("waitpid() failed");
exit(1);
}
if (!WIFEXITED(status)) {
fprintf(stderr, "Child didn't exit cleanly\n");
exit(1);
}
if (WEXITSTATUS(status) != 0) {
fprintf(stderr, "Child didn't exit cleanly\n");
return 1;
}
} else {
char dscr_str[16];
sprintf(dscr_str, "%ld", dscr);
execlp(prog, prog, "exec", dscr_str, NULL);
exit(1);
}
}
return 0;
}
int main(int argc, char *argv[])
{
if (argc == 3 && !strcmp(argv[1], "exec")) {
unsigned long parent_dscr;
parent_dscr = atoi(argv[2]);
do_exec(parent_dscr);
} else if (argc != 1) {
fprintf(stderr, "Usage: %s\n", argv[0]);
exit(1);
}
prog = argv[0];
return test_harness(dscr_inherit_exec, "dscr_inherit_exec_test");
}
| linux-master | tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#define _GNU_SOURCE
#include <elf.h>
#include <limits.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <sys/prctl.h>
#include "event.h"
#include "lib.h"
#include "utils.h"
/*
* Test that per-event excludes work.
*/
static int per_event_excludes(void)
{
struct event *e, events[4];
int i;
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
/*
* We need to create the events disabled, otherwise the running/enabled
* counts don't match up.
*/
e = &events[0];
event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS,
PERF_TYPE_HARDWARE, "instructions");
e->attr.disabled = 1;
e = &events[1];
event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS,
PERF_TYPE_HARDWARE, "instructions(k)");
e->attr.disabled = 1;
e->attr.exclude_user = 1;
e->attr.exclude_hv = 1;
e = &events[2];
event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS,
PERF_TYPE_HARDWARE, "instructions(h)");
e->attr.disabled = 1;
e->attr.exclude_user = 1;
e->attr.exclude_kernel = 1;
e = &events[3];
event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS,
PERF_TYPE_HARDWARE, "instructions(u)");
e->attr.disabled = 1;
e->attr.exclude_hv = 1;
e->attr.exclude_kernel = 1;
FAIL_IF(event_open(&events[0]));
/*
* The open here will fail if we don't have per event exclude support,
* because the second event has an incompatible set of exclude settings
* and we're asking for the events to be in a group.
*/
for (i = 1; i < 4; i++)
FAIL_IF(event_open_with_group(&events[i], events[0].fd));
/*
* Even though the above will fail without per-event excludes we keep
* testing in order to be thorough.
*/
prctl(PR_TASK_PERF_EVENTS_ENABLE);
/* Spin for a while */
for (i = 0; i < INT_MAX; i++)
asm volatile("" : : : "memory");
prctl(PR_TASK_PERF_EVENTS_DISABLE);
for (i = 0; i < 4; i++) {
FAIL_IF(event_read(&events[i]));
event_report(&events[i]);
}
/*
* We should see that all events have enabled == running. That
* shows that they were all on the PMU at once.
*/
for (i = 0; i < 4; i++)
FAIL_IF(events[i].result.running != events[i].result.enabled);
/*
* We can also check that the result for instructions is >= all the
* other counts. That's because it is counting all instructions while
* the others are counting a subset.
*/
for (i = 1; i < 4; i++)
FAIL_IF(events[0].result.value < events[i].result.value);
for (i = 0; i < 4; i++)
event_close(&events[i]);
return 0;
}
int main(void)
{
return test_harness(per_event_excludes, "per_event_excludes");
}
| linux-master | tools/testing/selftests/powerpc/pmu/per_event_excludes.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#define _GNU_SOURCE /* For CPU_ZERO etc. */
#include <errno.h>
#include <sched.h>
#include <setjmp.h>
#include <stdlib.h>
#include <sys/wait.h>
#include "utils.h"
#include "lib.h"
#define PARENT_TOKEN 0xAA
#define CHILD_TOKEN 0x55
int sync_with_child(union pipe read_pipe, union pipe write_pipe)
{
char c = PARENT_TOKEN;
FAIL_IF(write(write_pipe.write_fd, &c, 1) != 1);
FAIL_IF(read(read_pipe.read_fd, &c, 1) != 1);
if (c != CHILD_TOKEN) /* sometimes expected */
return 1;
return 0;
}
int wait_for_parent(union pipe read_pipe)
{
char c;
FAIL_IF(read(read_pipe.read_fd, &c, 1) != 1);
FAIL_IF(c != PARENT_TOKEN);
return 0;
}
int notify_parent(union pipe write_pipe)
{
char c = CHILD_TOKEN;
FAIL_IF(write(write_pipe.write_fd, &c, 1) != 1);
return 0;
}
int notify_parent_of_error(union pipe write_pipe)
{
char c = ~CHILD_TOKEN;
FAIL_IF(write(write_pipe.write_fd, &c, 1) != 1);
return 0;
}
int wait_for_child(pid_t child_pid)
{
int rc;
if (waitpid(child_pid, &rc, 0) == -1) {
perror("waitpid");
return 1;
}
if (WIFEXITED(rc))
rc = WEXITSTATUS(rc);
else
rc = 1; /* Signal or other */
return rc;
}
int kill_child_and_wait(pid_t child_pid)
{
kill(child_pid, SIGTERM);
return wait_for_child(child_pid);
}
static int eat_cpu_child(union pipe read_pipe, union pipe write_pipe)
{
volatile int i = 0;
/*
* We are just here to eat cpu and die. So make sure we can be killed,
* and also don't do any custom SIGTERM handling.
*/
signal(SIGTERM, SIG_DFL);
notify_parent(write_pipe);
wait_for_parent(read_pipe);
/* Soak up cpu forever */
while (1) i++;
return 0;
}
pid_t eat_cpu(int (test_function)(void))
{
union pipe read_pipe, write_pipe;
int rc;
pid_t pid;
FAIL_IF(bind_to_cpu(BIND_CPU_ANY) < 0);
if (pipe(read_pipe.fds) == -1)
return -1;
if (pipe(write_pipe.fds) == -1)
return -1;
pid = fork();
if (pid == 0)
exit(eat_cpu_child(write_pipe, read_pipe));
if (sync_with_child(read_pipe, write_pipe)) {
rc = -1;
goto out;
}
printf("main test running as pid %d\n", getpid());
rc = test_function();
out:
kill(pid, SIGKILL);
return rc;
}
struct addr_range libc, vdso;
int parse_proc_maps(void)
{
unsigned long start, end;
char execute, name[128];
FILE *f;
int rc;
f = fopen("/proc/self/maps", "r");
if (!f) {
perror("fopen");
return -1;
}
do {
/* This skips line with no executable which is what we want */
rc = fscanf(f, "%lx-%lx %*c%*c%c%*c %*x %*d:%*d %*d %127s\n",
&start, &end, &execute, name);
if (rc <= 0)
break;
if (execute != 'x')
continue;
if (strstr(name, "libc")) {
libc.first = start;
libc.last = end - 1;
} else if (strstr(name, "[vdso]")) {
vdso.first = start;
vdso.last = end - 1;
}
} while(1);
fclose(f);
return 0;
}
#define PARANOID_PATH "/proc/sys/kernel/perf_event_paranoid"
bool require_paranoia_below(int level)
{
int err;
long current;
err = read_long(PARANOID_PATH, ¤t, 10);
if (err) {
printf("Couldn't parse " PARANOID_PATH "?\n");
return false;
}
return current < level;
}
| linux-master | tools/testing/selftests/powerpc/pmu/lib.c |
/*
* Copyright 2013, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <sys/prctl.h>
#include "event.h"
#include "utils.h"
#include "lib.h"
extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target);
static void setup_event(struct event *e, u64 config, int type, char *name)
{
event_init_opts(e, config, type, name);
e->attr.disabled = 1;
e->attr.exclude_kernel = 1;
e->attr.exclude_hv = 1;
e->attr.exclude_idle = 1;
}
static int do_count_loop(struct event *events, u64 instructions,
u64 overhead, bool report)
{
s64 difference, expected;
double percentage;
u64 dummy;
prctl(PR_TASK_PERF_EVENTS_ENABLE);
/* Run for 1M instructions */
thirty_two_instruction_loop_with_ll_sc(instructions >> 5, &dummy);
prctl(PR_TASK_PERF_EVENTS_DISABLE);
event_read(&events[0]);
event_read(&events[1]);
event_read(&events[2]);
expected = instructions + overhead + (events[2].result.value * 10);
difference = events[0].result.value - expected;
percentage = (double)difference / events[0].result.value * 100;
if (report) {
printf("-----\n");
event_report(&events[0]);
event_report(&events[1]);
event_report(&events[2]);
printf("Looped for %llu instructions, overhead %llu\n", instructions, overhead);
printf("Expected %llu\n", expected);
printf("Actual %llu\n", events[0].result.value);
printf("Delta %lld, %f%%\n", difference, percentage);
}
event_reset(&events[0]);
event_reset(&events[1]);
event_reset(&events[2]);
if (difference < 0)
difference = -difference;
/* Tolerate a difference below 0.0001 % */
difference *= 10000 * 100;
if (difference / events[0].result.value)
return -1;
return 0;
}
/* Count how many instructions it takes to do a null loop */
static u64 determine_overhead(struct event *events)
{
u64 current, overhead;
int i;
do_count_loop(events, 0, 0, false);
overhead = events[0].result.value;
for (i = 0; i < 100; i++) {
do_count_loop(events, 0, 0, false);
current = events[0].result.value;
if (current < overhead) {
printf("Replacing overhead %llu with %llu\n", overhead, current);
overhead = current;
}
}
return overhead;
}
#define PM_MRK_STCX_FAIL 0x03e158
#define PM_STCX_FAIL 0x01e058
static int test_body(void)
{
struct event events[3];
u64 overhead;
// The STCX_FAIL event we use works on Power8 or later
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
setup_event(&events[0], PERF_COUNT_HW_INSTRUCTIONS, PERF_TYPE_HARDWARE, "instructions");
setup_event(&events[1], PERF_COUNT_HW_CPU_CYCLES, PERF_TYPE_HARDWARE, "cycles");
setup_event(&events[2], PM_STCX_FAIL, PERF_TYPE_RAW, "stcx_fail");
if (event_open(&events[0])) {
perror("perf_event_open");
return -1;
}
if (event_open_with_group(&events[1], events[0].fd)) {
perror("perf_event_open");
return -1;
}
if (event_open_with_group(&events[2], events[0].fd)) {
perror("perf_event_open");
return -1;
}
overhead = determine_overhead(events);
printf("Overhead of null loop: %llu instructions\n", overhead);
/* Run for 1Mi instructions */
FAIL_IF(do_count_loop(events, 1000000, overhead, true));
/* Run for 10Mi instructions */
FAIL_IF(do_count_loop(events, 10000000, overhead, true));
/* Run for 100Mi instructions */
FAIL_IF(do_count_loop(events, 100000000, overhead, true));
/* Run for 1Bi instructions */
FAIL_IF(do_count_loop(events, 1000000000, overhead, true));
/* Run for 16Bi instructions */
FAIL_IF(do_count_loop(events, 16000000000, overhead, true));
/* Run for 64Bi instructions */
FAIL_IF(do_count_loop(events, 64000000000, overhead, true));
event_close(&events[0]);
event_close(&events[1]);
return 0;
}
static int count_ll_sc(void)
{
return eat_cpu(test_body);
}
int main(void)
{
return test_harness(count_ll_sc, "count_ll_sc");
}
| linux-master | tools/testing/selftests/powerpc/pmu/count_stcx_fail.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013, Michael Ellerman, IBM Corp.
*/
#define _GNU_SOURCE
#include <unistd.h>
#include <sys/syscall.h>
#include <string.h>
#include <stdio.h>
#include <stdbool.h>
#include <sys/ioctl.h>
#include "event.h"
int perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu,
int group_fd, unsigned long flags)
{
return syscall(__NR_perf_event_open, attr, pid, cpu,
group_fd, flags);
}
static void __event_init_opts(struct event *e, u64 config,
int type, char *name, bool sampling)
{
memset(e, 0, sizeof(*e));
e->name = name;
e->attr.type = type;
e->attr.config = config;
e->attr.size = sizeof(e->attr);
/* This has to match the structure layout in the header */
e->attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | \
PERF_FORMAT_TOTAL_TIME_RUNNING;
if (sampling) {
e->attr.sample_period = 1000;
e->attr.sample_type = PERF_SAMPLE_REGS_INTR;
e->attr.disabled = 1;
}
}
void event_init_opts(struct event *e, u64 config, int type, char *name)
{
__event_init_opts(e, config, type, name, false);
}
void event_init_named(struct event *e, u64 config, char *name)
{
event_init_opts(e, config, PERF_TYPE_RAW, name);
}
void event_init(struct event *e, u64 config)
{
event_init_opts(e, config, PERF_TYPE_RAW, "event");
}
void event_init_sampling(struct event *e, u64 config)
{
__event_init_opts(e, config, PERF_TYPE_RAW, "event", true);
}
#define PERF_CURRENT_PID 0
#define PERF_NO_PID -1
#define PERF_NO_CPU -1
#define PERF_NO_GROUP -1
int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd)
{
e->fd = perf_event_open(&e->attr, pid, cpu, group_fd, 0);
if (e->fd == -1) {
perror("perf_event_open");
return -1;
}
return 0;
}
int event_open_with_group(struct event *e, int group_fd)
{
return event_open_with_options(e, PERF_CURRENT_PID, PERF_NO_CPU, group_fd);
}
int event_open_with_pid(struct event *e, pid_t pid)
{
return event_open_with_options(e, pid, PERF_NO_CPU, PERF_NO_GROUP);
}
int event_open_with_cpu(struct event *e, int cpu)
{
return event_open_with_options(e, PERF_NO_PID, cpu, PERF_NO_GROUP);
}
int event_open(struct event *e)
{
return event_open_with_options(e, PERF_CURRENT_PID, PERF_NO_CPU, PERF_NO_GROUP);
}
void event_close(struct event *e)
{
close(e->fd);
}
int event_enable(struct event *e)
{
return ioctl(e->fd, PERF_EVENT_IOC_ENABLE);
}
int event_disable(struct event *e)
{
return ioctl(e->fd, PERF_EVENT_IOC_DISABLE);
}
int event_reset(struct event *e)
{
return ioctl(e->fd, PERF_EVENT_IOC_RESET);
}
int event_read(struct event *e)
{
int rc;
rc = read(e->fd, &e->result, sizeof(e->result));
if (rc != sizeof(e->result)) {
fprintf(stderr, "read error on event %p!\n", e);
return -1;
}
return 0;
}
void event_report_justified(struct event *e, int name_width, int result_width)
{
printf("%*s: result %*llu ", name_width, e->name, result_width,
e->result.value);
if (e->result.running == e->result.enabled)
printf("running/enabled %llu\n", e->result.running);
else
printf("running %llu enabled %llu\n", e->result.running,
e->result.enabled);
}
void event_report(struct event *e)
{
event_report_justified(e, 0, 0);
}
| linux-master | tools/testing/selftests/powerpc/pmu/event.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "event.h"
#include "utils.h"
#define MALLOC_SIZE (0x10000 * 10) /* Ought to be enough .. */
/*
* Tests that the L3 bank handling is correct. We fixed it in commit e9aaac1.
*/
static int l3_bank_test(void)
{
struct event event;
char *p;
int i;
// The L3 bank logic is only used on Power8 or later
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
p = malloc(MALLOC_SIZE);
FAIL_IF(!p);
event_init(&event, 0x84918F);
FAIL_IF(event_open(&event));
for (i = 0; i < MALLOC_SIZE; i += 0x10000)
p[i] = i;
event_read(&event);
event_report(&event);
FAIL_IF(event.result.running == 0);
FAIL_IF(event.result.enabled == 0);
event_close(&event);
free(p);
return 0;
}
int main(void)
{
return test_harness(l3_bank_test, "l3_bank_test");
}
| linux-master | tools/testing/selftests/powerpc/pmu/l3_bank_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013, Michael Ellerman, IBM Corp.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <sys/prctl.h>
#include "event.h"
#include "utils.h"
#include "lib.h"
extern void thirty_two_instruction_loop(u64 loops);
static void setup_event(struct event *e, u64 config, char *name)
{
event_init_opts(e, config, PERF_TYPE_HARDWARE, name);
e->attr.disabled = 1;
e->attr.exclude_kernel = 1;
e->attr.exclude_hv = 1;
e->attr.exclude_idle = 1;
}
static int do_count_loop(struct event *events, u64 instructions,
u64 overhead, bool report)
{
s64 difference, expected;
double percentage;
prctl(PR_TASK_PERF_EVENTS_ENABLE);
/* Run for 1M instructions */
thirty_two_instruction_loop(instructions >> 5);
prctl(PR_TASK_PERF_EVENTS_DISABLE);
event_read(&events[0]);
event_read(&events[1]);
expected = instructions + overhead;
difference = events[0].result.value - expected;
percentage = (double)difference / events[0].result.value * 100;
if (report) {
event_report(&events[0]);
event_report(&events[1]);
printf("Looped for %llu instructions, overhead %llu\n", instructions, overhead);
printf("Expected %llu\n", expected);
printf("Actual %llu\n", events[0].result.value);
printf("Delta %lld, %f%%\n", difference, percentage);
}
event_reset(&events[0]);
event_reset(&events[1]);
if (difference < 0)
difference = -difference;
/* Tolerate a difference below 0.0001 % */
difference *= 10000 * 100;
if (difference / events[0].result.value)
return -1;
return 0;
}
/* Count how many instructions it takes to do a null loop */
static u64 determine_overhead(struct event *events)
{
u64 current, overhead;
int i;
do_count_loop(events, 0, 0, false);
overhead = events[0].result.value;
for (i = 0; i < 100; i++) {
do_count_loop(events, 0, 0, false);
current = events[0].result.value;
if (current < overhead) {
printf("Replacing overhead %llu with %llu\n", overhead, current);
overhead = current;
}
}
return overhead;
}
static int test_body(void)
{
struct event events[2];
u64 overhead;
setup_event(&events[0], PERF_COUNT_HW_INSTRUCTIONS, "instructions");
setup_event(&events[1], PERF_COUNT_HW_CPU_CYCLES, "cycles");
if (event_open(&events[0])) {
perror("perf_event_open");
return -1;
}
if (event_open_with_group(&events[1], events[0].fd)) {
perror("perf_event_open");
return -1;
}
overhead = determine_overhead(events);
printf("Overhead of null loop: %llu instructions\n", overhead);
/* Run for 1Mi instructions */
FAIL_IF(do_count_loop(events, 1000000, overhead, true));
/* Run for 10Mi instructions */
FAIL_IF(do_count_loop(events, 10000000, overhead, true));
/* Run for 100Mi instructions */
FAIL_IF(do_count_loop(events, 100000000, overhead, true));
/* Run for 1Bi instructions */
FAIL_IF(do_count_loop(events, 1000000000, overhead, true));
/* Run for 16Bi instructions */
FAIL_IF(do_count_loop(events, 16000000000, overhead, true));
/* Run for 64Bi instructions */
FAIL_IF(do_count_loop(events, 64000000000, overhead, true));
event_close(&events[0]);
event_close(&events[1]);
return 0;
}
static int count_instructions(void)
{
return eat_cpu(test_body);
}
int main(void)
{
return test_harness(count_instructions, "count_instructions");
}
| linux-master | tools/testing/selftests/powerpc/pmu/count_instructions.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <sched.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include "ebb.h"
/*
* Test that the kernel properly handles PMAE across context switches.
*
* We test this by calling into the kernel inside our EBB handler, where PMAE
* is clear. A cpu eater companion thread is running on the same CPU as us to
* encourage the scheduler to switch us.
*
* The kernel must make sure that when it context switches us back in, it
* honours the fact that we had PMAE clear.
*
* Observed to hit the failing case on the first EBB with a broken kernel.
*/
static bool mmcr0_mismatch;
static uint64_t before, after;
static void syscall_ebb_callee(void)
{
uint64_t val;
val = mfspr(SPRN_BESCR);
if (!(val & BESCR_PMEO)) {
ebb_state.stats.spurious++;
goto out;
}
ebb_state.stats.ebb_count++;
count_pmc(1, sample_period);
before = mfspr(SPRN_MMCR0);
/* Try and get ourselves scheduled, to force a PMU context switch */
sched_yield();
after = mfspr(SPRN_MMCR0);
if (before != after)
mmcr0_mismatch = true;
out:
reset_ebb();
}
static int test_body(void)
{
struct event event;
SKIP_IF(!ebb_is_supported());
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
setup_ebb_handler(syscall_ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
while (ebb_state.stats.ebb_count < 20 && !mmcr0_mismatch)
FAIL_IF(core_busy_loop());
ebb_global_disable();
ebb_freeze_pmcs();
dump_ebb_state();
if (mmcr0_mismatch)
printf("Saw MMCR0 before 0x%lx after 0x%lx\n", before, after);
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
FAIL_IF(mmcr0_mismatch);
return 0;
}
int pmae_handling(void)
{
return eat_cpu(test_body);
}
int main(void)
{
return test_harness(pmae_handling, "pmae_handling");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ebb.h"
/*
* Basic test that counts user cycles and takes EBBs.
*/
int cycles(void)
{
struct event event;
SKIP_IF(!ebb_is_supported());
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
ebb_enable_pmc_counting(1);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
while (ebb_state.stats.ebb_count < 10) {
FAIL_IF(core_busy_loop());
FAIL_IF(ebb_check_mmcr0());
}
ebb_global_disable();
ebb_freeze_pmcs();
dump_ebb_state();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
FAIL_IF(!ebb_check_count(1, sample_period, 100));
return 0;
}
int main(void)
{
return test_harness(cycles, "cycles");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "ebb.h"
/*
* Tests a per-task event vs an EBB - in that order. The EBB should push the
* per-task event off the PMU.
*/
static int setup_child_event(struct event *event, pid_t child_pid)
{
event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL");
event->attr.exclude_kernel = 1;
event->attr.exclude_hv = 1;
event->attr.exclude_idle = 1;
FAIL_IF(event_open_with_pid(event, child_pid));
FAIL_IF(event_enable(event));
return 0;
}
int task_event_vs_ebb(void)
{
union pipe read_pipe, write_pipe;
struct event event;
pid_t pid;
int rc;
SKIP_IF(!ebb_is_supported());
FAIL_IF(pipe(read_pipe.fds) == -1);
FAIL_IF(pipe(write_pipe.fds) == -1);
pid = fork();
if (pid == 0) {
/* NB order of pipes looks reversed */
exit(ebb_child(write_pipe, read_pipe));
}
/* We setup the task event first */
rc = setup_child_event(&event, pid);
if (rc) {
kill_child_and_wait(pid);
return rc;
}
/* Signal the child to install its EBB event and wait */
if (sync_with_child(read_pipe, write_pipe))
/* If it fails, wait for it to exit */
goto wait;
/* Signal the child to run */
FAIL_IF(sync_with_child(read_pipe, write_pipe));
wait:
/* The EBB event should push the task event off so the child should succeed */
FAIL_IF(wait_for_child(pid));
FAIL_IF(event_disable(&event));
FAIL_IF(event_read(&event));
event_report(&event);
/* The task event may have run, or not so we can't assert anything about it */
return 0;
}
int main(void)
{
return test_harness(task_event_vs_ebb, "task_event_vs_ebb");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include "trace.h"
struct trace_buffer *trace_buffer_allocate(u64 size)
{
struct trace_buffer *tb;
if (size < sizeof(*tb)) {
fprintf(stderr, "Error: trace buffer too small\n");
return NULL;
}
tb = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (tb == MAP_FAILED) {
perror("mmap");
return NULL;
}
tb->size = size;
tb->tail = tb->data;
tb->overflow = false;
return tb;
}
static bool trace_check_bounds(struct trace_buffer *tb, void *p)
{
return p < ((void *)tb + tb->size);
}
static bool trace_check_alloc(struct trace_buffer *tb, void *p)
{
/*
* If we ever overflowed don't allow any more input. This prevents us
* from dropping a large item and then later logging a small one. The
* buffer should just stop when overflow happened, not be patchy. If
* you're overflowing, make your buffer bigger.
*/
if (tb->overflow)
return false;
if (!trace_check_bounds(tb, p)) {
tb->overflow = true;
return false;
}
return true;
}
static void *trace_alloc(struct trace_buffer *tb, int bytes)
{
void *p, *newtail;
p = tb->tail;
newtail = tb->tail + bytes;
if (!trace_check_alloc(tb, newtail))
return NULL;
tb->tail = newtail;
return p;
}
static struct trace_entry *trace_alloc_entry(struct trace_buffer *tb, int payload_size)
{
struct trace_entry *e;
e = trace_alloc(tb, sizeof(*e) + payload_size);
if (e)
e->length = payload_size;
return e;
}
int trace_log_reg(struct trace_buffer *tb, u64 reg, u64 value)
{
struct trace_entry *e;
u64 *p;
e = trace_alloc_entry(tb, sizeof(reg) + sizeof(value));
if (!e)
return -ENOSPC;
e->type = TRACE_TYPE_REG;
p = (u64 *)e->data;
*p++ = reg;
*p++ = value;
return 0;
}
int trace_log_counter(struct trace_buffer *tb, u64 value)
{
struct trace_entry *e;
u64 *p;
e = trace_alloc_entry(tb, sizeof(value));
if (!e)
return -ENOSPC;
e->type = TRACE_TYPE_COUNTER;
p = (u64 *)e->data;
*p++ = value;
return 0;
}
int trace_log_string(struct trace_buffer *tb, char *str)
{
struct trace_entry *e;
char *p;
int len;
len = strlen(str);
/* We NULL terminate to make printing easier */
e = trace_alloc_entry(tb, len + 1);
if (!e)
return -ENOSPC;
e->type = TRACE_TYPE_STRING;
p = (char *)e->data;
memcpy(p, str, len);
p += len;
*p = '\0';
return 0;
}
int trace_log_indent(struct trace_buffer *tb)
{
struct trace_entry *e;
e = trace_alloc_entry(tb, 0);
if (!e)
return -ENOSPC;
e->type = TRACE_TYPE_INDENT;
return 0;
}
int trace_log_outdent(struct trace_buffer *tb)
{
struct trace_entry *e;
e = trace_alloc_entry(tb, 0);
if (!e)
return -ENOSPC;
e->type = TRACE_TYPE_OUTDENT;
return 0;
}
static void trace_print_header(int seq, int prefix)
{
printf("%*s[%d]: ", prefix, "", seq);
}
static char *trace_decode_reg(int reg)
{
switch (reg) {
case 769: return "SPRN_MMCR2"; break;
case 770: return "SPRN_MMCRA"; break;
case 779: return "SPRN_MMCR0"; break;
case 804: return "SPRN_EBBHR"; break;
case 805: return "SPRN_EBBRR"; break;
case 806: return "SPRN_BESCR"; break;
case 800: return "SPRN_BESCRS"; break;
case 801: return "SPRN_BESCRSU"; break;
case 802: return "SPRN_BESCRR"; break;
case 803: return "SPRN_BESCRRU"; break;
case 771: return "SPRN_PMC1"; break;
case 772: return "SPRN_PMC2"; break;
case 773: return "SPRN_PMC3"; break;
case 774: return "SPRN_PMC4"; break;
case 775: return "SPRN_PMC5"; break;
case 776: return "SPRN_PMC6"; break;
case 780: return "SPRN_SIAR"; break;
case 781: return "SPRN_SDAR"; break;
case 768: return "SPRN_SIER"; break;
}
return NULL;
}
static void trace_print_reg(struct trace_entry *e)
{
u64 *p, *reg, *value;
char *name;
p = (u64 *)e->data;
reg = p++;
value = p;
name = trace_decode_reg(*reg);
if (name)
printf("register %-10s = 0x%016llx\n", name, *value);
else
printf("register %lld = 0x%016llx\n", *reg, *value);
}
static void trace_print_counter(struct trace_entry *e)
{
u64 *value;
value = (u64 *)e->data;
printf("counter = %lld\n", *value);
}
static void trace_print_string(struct trace_entry *e)
{
char *str;
str = (char *)e->data;
puts(str);
}
#define BASE_PREFIX 2
#define PREFIX_DELTA 8
static void trace_print_entry(struct trace_entry *e, int seq, int *prefix)
{
switch (e->type) {
case TRACE_TYPE_REG:
trace_print_header(seq, *prefix);
trace_print_reg(e);
break;
case TRACE_TYPE_COUNTER:
trace_print_header(seq, *prefix);
trace_print_counter(e);
break;
case TRACE_TYPE_STRING:
trace_print_header(seq, *prefix);
trace_print_string(e);
break;
case TRACE_TYPE_INDENT:
trace_print_header(seq, *prefix);
puts("{");
*prefix += PREFIX_DELTA;
break;
case TRACE_TYPE_OUTDENT:
*prefix -= PREFIX_DELTA;
if (*prefix < BASE_PREFIX)
*prefix = BASE_PREFIX;
trace_print_header(seq, *prefix);
puts("}");
break;
default:
trace_print_header(seq, *prefix);
printf("entry @ %p type %d\n", e, e->type);
break;
}
}
void trace_buffer_print(struct trace_buffer *tb)
{
struct trace_entry *e;
int i, prefix;
void *p;
printf("Trace buffer dump:\n");
printf(" address %p \n", tb);
printf(" tail %p\n", tb->tail);
printf(" size %llu\n", tb->size);
printf(" overflow %s\n", tb->overflow ? "TRUE" : "false");
printf(" Content:\n");
p = tb->data;
i = 0;
prefix = BASE_PREFIX;
while (trace_check_bounds(tb, p) && p < tb->tail) {
e = p;
trace_print_entry(e, i, &prefix);
i++;
p = (void *)e + sizeof(*e) + e->length;
}
}
void trace_print_location(struct trace_buffer *tb)
{
printf("Trace buffer 0x%llx bytes @ %p\n", tb->size, tb);
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "ebb.h"
/*
* Tests a pinned per-task event vs an EBB - in that order. The pinned per-task
* event should prevent the EBB event from being enabled.
*/
static int setup_child_event(struct event *event, pid_t child_pid)
{
event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL");
event->attr.pinned = 1;
event->attr.exclude_kernel = 1;
event->attr.exclude_hv = 1;
event->attr.exclude_idle = 1;
FAIL_IF(event_open_with_pid(event, child_pid));
FAIL_IF(event_enable(event));
return 0;
}
int task_event_pinned_vs_ebb(void)
{
union pipe read_pipe, write_pipe;
struct event event;
pid_t pid;
int rc;
SKIP_IF(!ebb_is_supported());
FAIL_IF(pipe(read_pipe.fds) == -1);
FAIL_IF(pipe(write_pipe.fds) == -1);
pid = fork();
if (pid == 0) {
/* NB order of pipes looks reversed */
exit(ebb_child(write_pipe, read_pipe));
}
/* We setup the task event first */
rc = setup_child_event(&event, pid);
if (rc) {
kill_child_and_wait(pid);
return rc;
}
/* Signal the child to install its EBB event and wait */
if (sync_with_child(read_pipe, write_pipe))
/* If it fails, wait for it to exit */
goto wait;
/* Signal the child to run */
FAIL_IF(sync_with_child(read_pipe, write_pipe));
wait:
/* We expect it to fail to read the event */
FAIL_IF(wait_for_child(pid) != 2);
FAIL_IF(event_disable(&event));
FAIL_IF(event_read(&event));
event_report(&event);
FAIL_IF(event.result.value == 0);
/*
* For reasons I don't understand enabled is usually just slightly
* lower than running. Would be good to confirm why.
*/
FAIL_IF(event.result.enabled == 0);
FAIL_IF(event.result.running == 0);
return 0;
}
int main(void)
{
return test_harness(task_event_pinned_vs_ebb, "task_event_pinned_vs_ebb");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ebb.h"
/*
* Test basic access to the EBB regs, they should be user accessible with no
* kernel interaction required.
*/
int reg_access(void)
{
uint64_t val, expected;
SKIP_IF(!ebb_is_supported());
expected = 0x8000000100000000ull;
mtspr(SPRN_BESCR, expected);
val = mfspr(SPRN_BESCR);
FAIL_IF(val != expected);
expected = 0x0000000001000000ull;
mtspr(SPRN_EBBHR, expected);
val = mfspr(SPRN_EBBHR);
FAIL_IF(val != expected);
return 0;
}
int main(void)
{
return test_harness(reg_access, "reg_access");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/reg_access_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include "ebb.h"
#define NUMBER_OF_EBBS 50
/*
* Test that if we overflow the counter while in the EBB handler, we take
* another EBB on exiting from the handler.
*
* We do this by counting with a stupidly low sample period, causing us to
* overflow the PMU while we're still in the EBB handler, leading to another
* EBB.
*
* We get out of what would otherwise be an infinite loop by leaving the
* counter frozen once we've taken enough EBBs.
*/
static void ebb_callee(void)
{
uint64_t siar, val;
val = mfspr(SPRN_BESCR);
if (!(val & BESCR_PMEO)) {
ebb_state.stats.spurious++;
goto out;
}
ebb_state.stats.ebb_count++;
trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count);
/* Resets the PMC */
count_pmc(1, sample_period);
out:
if (ebb_state.stats.ebb_count == NUMBER_OF_EBBS)
/* Reset but leave counters frozen */
reset_ebb_with_clear_mask(MMCR0_PMAO);
else
/* Unfreezes */
reset_ebb();
/* Do some stuff to chew some cycles and pop the counter */
siar = mfspr(SPRN_SIAR);
trace_log_reg(ebb_state.trace, SPRN_SIAR, siar);
val = mfspr(SPRN_PMC1);
trace_log_reg(ebb_state.trace, SPRN_PMC1, val);
val = mfspr(SPRN_MMCR0);
trace_log_reg(ebb_state.trace, SPRN_MMCR0, val);
}
int back_to_back_ebbs(void)
{
struct event event;
SKIP_IF(!ebb_is_supported());
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
setup_ebb_handler(ebb_callee);
FAIL_IF(ebb_event_enable(&event));
sample_period = 5;
ebb_freeze_pmcs();
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
ebb_global_enable();
ebb_unfreeze_pmcs();
while (ebb_state.stats.ebb_count < NUMBER_OF_EBBS)
FAIL_IF(core_busy_loop());
ebb_global_disable();
ebb_freeze_pmcs();
dump_ebb_state();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count != NUMBER_OF_EBBS);
return 0;
}
int main(void)
{
return test_harness(back_to_back_ebbs, "back_to_back_ebbs");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include "ebb.h"
/*
* Test running multiple EBB using processes at once on a single CPU. They
* should all run happily without interfering with each other.
*/
static bool child_should_exit;
static void sigint_handler(int signal)
{
child_should_exit = true;
}
struct sigaction sigint_action = {
.sa_handler = sigint_handler,
};
static int cycles_child(void)
{
struct event event;
if (sigaction(SIGINT, &sigint_action, NULL)) {
perror("sigaction");
return 1;
}
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
ebb_enable_pmc_counting(1);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
while (!child_should_exit) {
FAIL_IF(core_busy_loop());
FAIL_IF(ebb_check_mmcr0());
}
ebb_global_disable();
ebb_freeze_pmcs();
dump_summary_ebb_state();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
return 0;
}
#define NR_CHILDREN 4
int multi_ebb_procs(void)
{
pid_t pids[NR_CHILDREN];
int rc, i;
SKIP_IF(!ebb_is_supported());
FAIL_IF(bind_to_cpu(BIND_CPU_ANY) < 0);
for (i = 0; i < NR_CHILDREN; i++) {
pids[i] = fork();
if (pids[i] == 0)
exit(cycles_child());
}
/* Have them all run for "a while" */
sleep(10);
rc = 0;
for (i = 0; i < NR_CHILDREN; i++) {
/* Tell them to stop */
kill(pids[i], SIGINT);
/* And wait */
rc |= wait_for_child(pids[i]);
}
return rc;
}
int main(void)
{
return test_harness(multi_ebb_procs, "multi_ebb_procs");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include "ebb.h"
/*
* Test that tries to trigger CPU_FTR_PMAO_BUG. Which is a hardware defect
* where an exception triggers but we context switch before it is delivered and
* lose the exception.
*/
static int test_body(void)
{
int i, orig_period, max_period;
struct event event;
SKIP_IF(!ebb_is_supported());
/* We use PMC4 to make sure the kernel switches all counters correctly */
event_init_named(&event, 0x40002, "instructions");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
ebb_enable_pmc_counting(4);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
/*
* We want a low sample period, but we also want to get out of the EBB
* handler without tripping up again.
*
* This value picked after much experimentation.
*/
orig_period = max_period = sample_period = 400;
mtspr(SPRN_PMC4, pmc_sample_period(sample_period));
while (ebb_state.stats.ebb_count < 1000000) {
/*
* We are trying to get the EBB exception to race exactly with
* us entering the kernel to do the syscall. We then need the
* kernel to decide our timeslice is up and context switch to
* the other thread. When we come back our EBB will have been
* lost and we'll spin in this while loop forever.
*/
for (i = 0; i < 100000; i++)
sched_yield();
/* Change the sample period slightly to try and hit the race */
if (sample_period >= (orig_period + 200))
sample_period = orig_period;
else
sample_period++;
if (sample_period > max_period)
max_period = sample_period;
}
ebb_freeze_pmcs();
ebb_global_disable();
mtspr(SPRN_PMC4, 0xdead);
dump_summary_ebb_state();
dump_ebb_hw_state();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
/* We vary our sample period so we need extra fudge here */
FAIL_IF(!ebb_check_count(4, orig_period, 2 * (max_period - orig_period)));
return 0;
}
static int lost_exception(void)
{
return eat_cpu(test_body);
}
int main(void)
{
test_harness_set_timeout(300);
return test_harness(lost_exception, "lost_exception");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <setjmp.h>
#include "ebb.h"
/*
* Test that a fork clears the PMU state of the child. eg. BESCR/EBBHR/EBBRR
* are cleared, and MMCR0_PMCC is reset, preventing the child from accessing
* the PMU.
*/
static struct event event;
static int child(void)
{
/* Even though we have EBE=0 we can still see the EBB regs */
FAIL_IF(mfspr(SPRN_BESCR) != 0);
FAIL_IF(mfspr(SPRN_EBBHR) != 0);
FAIL_IF(mfspr(SPRN_EBBRR) != 0);
FAIL_IF(catch_sigill(write_pmc1));
/* We can still read from the event, though it is on our parent */
FAIL_IF(event_read(&event));
return 0;
}
/* Tests that fork clears EBB state */
int fork_cleanup(void)
{
pid_t pid;
SKIP_IF(!ebb_is_supported());
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
FAIL_IF(event_open(&event));
ebb_enable_pmc_counting(1);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_MMCR0, MMCR0_FC);
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
/* Don't need to actually take any EBBs */
pid = fork();
if (pid == 0)
exit(child());
/* Child does the actual testing */
FAIL_IF(wait_for_child(pid));
/* After fork */
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(fork_cleanup, "fork_cleanup");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "ebb.h"
/*
* Tests we can setup an EBB on our child. The child expects this and enables
* EBBs, which are then delivered to the child, even though the event is
* created by the parent.
*/
static int victim_child(union pipe read_pipe, union pipe write_pipe)
{
FAIL_IF(wait_for_parent(read_pipe));
/* Setup our EBB handler, before the EBB event is created */
ebb_enable_pmc_counting(1);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(notify_parent(write_pipe));
while (ebb_state.stats.ebb_count < 20) {
FAIL_IF(core_busy_loop());
}
ebb_global_disable();
ebb_freeze_pmcs();
dump_ebb_state();
FAIL_IF(ebb_state.stats.ebb_count == 0);
return 0;
}
/* Tests we can setup an EBB on our child - if it's expecting it */
int ebb_on_willing_child(void)
{
union pipe read_pipe, write_pipe;
struct event event;
pid_t pid;
SKIP_IF(!ebb_is_supported());
FAIL_IF(pipe(read_pipe.fds) == -1);
FAIL_IF(pipe(write_pipe.fds) == -1);
pid = fork();
if (pid == 0) {
/* NB order of pipes looks reversed */
exit(victim_child(write_pipe, read_pipe));
}
/* Signal the child to setup its EBB handler */
FAIL_IF(sync_with_child(read_pipe, write_pipe));
/* Child is running now */
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open_with_pid(&event, pid));
FAIL_IF(ebb_event_enable(&event));
/* Child show now take EBBs and then exit */
FAIL_IF(wait_for_child(pid));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(ebb_on_willing_child, "ebb_on_willing_child");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "ebb.h"
/*
* Tests an EBB vs a cpu event - in that order. The EBB should force the cpu
* event off the PMU.
*/
static int setup_cpu_event(struct event *event, int cpu)
{
event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL");
event->attr.exclude_kernel = 1;
event->attr.exclude_hv = 1;
event->attr.exclude_idle = 1;
SKIP_IF(require_paranoia_below(1));
FAIL_IF(event_open_with_cpu(event, cpu));
FAIL_IF(event_enable(event));
return 0;
}
int ebb_vs_cpu_event(void)
{
union pipe read_pipe, write_pipe;
struct event event;
int cpu, rc;
pid_t pid;
SKIP_IF(!ebb_is_supported());
cpu = bind_to_cpu(BIND_CPU_ANY);
FAIL_IF(cpu < 0);
FAIL_IF(pipe(read_pipe.fds) == -1);
FAIL_IF(pipe(write_pipe.fds) == -1);
pid = fork();
if (pid == 0) {
/* NB order of pipes looks reversed */
exit(ebb_child(write_pipe, read_pipe));
}
/* Signal the child to install its EBB event and wait */
FAIL_IF(sync_with_child(read_pipe, write_pipe));
/* Now try to install our CPU event */
rc = setup_cpu_event(&event, cpu);
if (rc) {
kill_child_and_wait(pid);
return rc;
}
/* Signal the child to run */
FAIL_IF(sync_with_child(read_pipe, write_pipe));
/* .. and wait for it to complete */
FAIL_IF(wait_for_child(pid));
FAIL_IF(event_disable(&event));
FAIL_IF(event_read(&event));
event_report(&event);
/* The cpu event may have run, but we don't expect 100% */
FAIL_IF(event.result.enabled >= event.result.running);
return 0;
}
int main(void)
{
return test_harness(ebb_vs_cpu_event, "ebb_vs_cpu_event");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ebb.h"
/*
* Test that PMC5 & 6 are frozen (ie. don't overflow) when they are not being
* used. Tests the MMCR0_FC56 logic in the kernel.
*/
static int pmc56_overflowed;
static void ebb_callee(void)
{
uint64_t val;
val = mfspr(SPRN_BESCR);
if (!(val & BESCR_PMEO)) {
ebb_state.stats.spurious++;
goto out;
}
ebb_state.stats.ebb_count++;
count_pmc(2, sample_period);
val = mfspr(SPRN_PMC5);
if (val >= COUNTER_OVERFLOW)
pmc56_overflowed++;
count_pmc(5, COUNTER_OVERFLOW);
val = mfspr(SPRN_PMC6);
if (val >= COUNTER_OVERFLOW)
pmc56_overflowed++;
count_pmc(6, COUNTER_OVERFLOW);
out:
reset_ebb();
}
int pmc56_overflow(void)
{
struct event event;
SKIP_IF(!ebb_is_supported());
/* Use PMC2 so we set PMCjCE, which enables PMC5/6 */
event_init(&event, 0x2001e);
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
setup_ebb_handler(ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
mtspr(SPRN_PMC5, 0);
mtspr(SPRN_PMC6, 0);
while (ebb_state.stats.ebb_count < 10)
FAIL_IF(core_busy_loop());
ebb_global_disable();
ebb_freeze_pmcs();
dump_ebb_state();
printf("PMC5/6 overflow %d\n", pmc56_overflowed);
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0 || pmc56_overflowed != 0);
return 0;
}
int main(void)
{
return test_harness(pmc56_overflow, "pmc56_overflow");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "ebb.h"
/*
* Tests a pinned cpu event vs an EBB - in that order. The pinned cpu event
* should remain and the EBB event should fail to enable.
*/
static int setup_cpu_event(struct event *event, int cpu)
{
event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL");
event->attr.pinned = 1;
event->attr.exclude_kernel = 1;
event->attr.exclude_hv = 1;
event->attr.exclude_idle = 1;
SKIP_IF(require_paranoia_below(1));
FAIL_IF(event_open_with_cpu(event, cpu));
FAIL_IF(event_enable(event));
return 0;
}
int cpu_event_pinned_vs_ebb(void)
{
union pipe read_pipe, write_pipe;
struct event event;
int cpu, rc;
pid_t pid;
SKIP_IF(!ebb_is_supported());
cpu = bind_to_cpu(BIND_CPU_ANY);
FAIL_IF(cpu < 0);
FAIL_IF(pipe(read_pipe.fds) == -1);
FAIL_IF(pipe(write_pipe.fds) == -1);
pid = fork();
if (pid == 0) {
/* NB order of pipes looks reversed */
exit(ebb_child(write_pipe, read_pipe));
}
/* We setup the cpu event first */
rc = setup_cpu_event(&event, cpu);
if (rc) {
kill_child_and_wait(pid);
return rc;
}
/* Signal the child to install its EBB event and wait */
if (sync_with_child(read_pipe, write_pipe))
/* If it fails, wait for it to exit */
goto wait;
/* Signal the child to run */
FAIL_IF(sync_with_child(read_pipe, write_pipe));
wait:
/* We expect it to fail to read the event */
FAIL_IF(wait_for_child(pid) != 2);
FAIL_IF(event_disable(&event));
FAIL_IF(event_read(&event));
event_report(&event);
/* The cpu event should have run */
FAIL_IF(event.result.value == 0);
FAIL_IF(event.result.enabled != event.result.running);
return 0;
}
int main(void)
{
return test_harness(cpu_event_pinned_vs_ebb, "cpu_event_pinned_vs_ebb");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "ebb.h"
/*
* Tests a cpu event vs an EBB - in that order. The EBB should force the cpu
* event off the PMU.
*/
static int setup_cpu_event(struct event *event, int cpu)
{
event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL");
event->attr.exclude_kernel = 1;
event->attr.exclude_hv = 1;
event->attr.exclude_idle = 1;
SKIP_IF(require_paranoia_below(1));
FAIL_IF(event_open_with_cpu(event, cpu));
FAIL_IF(event_enable(event));
return 0;
}
int cpu_event_vs_ebb(void)
{
union pipe read_pipe, write_pipe;
struct event event;
int cpu, rc;
pid_t pid;
SKIP_IF(!ebb_is_supported());
cpu = bind_to_cpu(BIND_CPU_ANY);
FAIL_IF(cpu < 0);
FAIL_IF(pipe(read_pipe.fds) == -1);
FAIL_IF(pipe(write_pipe.fds) == -1);
pid = fork();
if (pid == 0) {
/* NB order of pipes looks reversed */
exit(ebb_child(write_pipe, read_pipe));
}
/* We setup the cpu event first */
rc = setup_cpu_event(&event, cpu);
if (rc) {
kill_child_and_wait(pid);
return rc;
}
/* Signal the child to install its EBB event and wait */
if (sync_with_child(read_pipe, write_pipe))
/* If it fails, wait for it to exit */
goto wait;
/* Signal the child to run */
FAIL_IF(sync_with_child(read_pipe, write_pipe));
wait:
/* We expect the child to succeed */
FAIL_IF(wait_for_child(pid));
FAIL_IF(event_disable(&event));
FAIL_IF(event_read(&event));
event_report(&event);
/* The cpu event may have run */
return 0;
}
int main(void)
{
return test_harness(cpu_event_vs_ebb, "cpu_event_vs_ebb");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#define _GNU_SOURCE /* For CPU_ZERO etc. */
#include <sched.h>
#include <sys/wait.h>
#include <setjmp.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "trace.h"
#include "ebb.h"
void (*ebb_user_func)(void);
void ebb_hook(void)
{
if (ebb_user_func)
ebb_user_func();
}
struct ebb_state ebb_state;
u64 sample_period = 0x40000000ull;
void reset_ebb_with_clear_mask(unsigned long mmcr0_clear_mask)
{
u64 val;
/* 2) clear MMCR0[PMAO] - docs say BESCR[PMEO] should do this */
/* 3) set MMCR0[PMAE] - docs say BESCR[PME] should do this */
val = mfspr(SPRN_MMCR0);
mtspr(SPRN_MMCR0, (val & ~mmcr0_clear_mask) | MMCR0_PMAE);
/* 4) clear BESCR[PMEO] */
mtspr(SPRN_BESCRR, BESCR_PMEO);
/* 5) set BESCR[PME] */
mtspr(SPRN_BESCRS, BESCR_PME);
/* 6) rfebb 1 - done in our caller */
}
void reset_ebb(void)
{
reset_ebb_with_clear_mask(MMCR0_PMAO | MMCR0_FC);
}
/* Called outside of the EBB handler to check MMCR0 is sane */
int ebb_check_mmcr0(void)
{
u64 val;
val = mfspr(SPRN_MMCR0);
if ((val & (MMCR0_FC | MMCR0_PMAO)) == MMCR0_FC) {
/* It's OK if we see FC & PMAO, but not FC by itself */
printf("Outside of loop, only FC set 0x%llx\n", val);
return 1;
}
return 0;
}
bool ebb_check_count(int pmc, u64 sample_period, int fudge)
{
u64 count, upper, lower;
count = ebb_state.stats.pmc_count[PMC_INDEX(pmc)];
lower = ebb_state.stats.ebb_count * (sample_period - fudge);
if (count < lower) {
printf("PMC%d count (0x%llx) below lower limit 0x%llx (-0x%llx)\n",
pmc, count, lower, lower - count);
return false;
}
upper = ebb_state.stats.ebb_count * (sample_period + fudge);
if (count > upper) {
printf("PMC%d count (0x%llx) above upper limit 0x%llx (+0x%llx)\n",
pmc, count, upper, count - upper);
return false;
}
printf("PMC%d count (0x%llx) is between 0x%llx and 0x%llx delta +0x%llx/-0x%llx\n",
pmc, count, lower, upper, count - lower, upper - count);
return true;
}
void standard_ebb_callee(void)
{
int found, i;
u64 val;
val = mfspr(SPRN_BESCR);
if (!(val & BESCR_PMEO)) {
ebb_state.stats.spurious++;
goto out;
}
ebb_state.stats.ebb_count++;
trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count);
val = mfspr(SPRN_MMCR0);
trace_log_reg(ebb_state.trace, SPRN_MMCR0, val);
found = 0;
for (i = 1; i <= 6; i++) {
if (ebb_state.pmc_enable[PMC_INDEX(i)])
found += count_pmc(i, sample_period);
}
if (!found)
ebb_state.stats.no_overflow++;
out:
reset_ebb();
}
extern void ebb_handler(void);
void setup_ebb_handler(void (*callee)(void))
{
u64 entry;
#if defined(_CALL_ELF) && _CALL_ELF == 2
entry = (u64)ebb_handler;
#else
struct opd
{
u64 entry;
u64 toc;
} *opd;
opd = (struct opd *)ebb_handler;
entry = opd->entry;
#endif
printf("EBB Handler is at %#llx\n", entry);
ebb_user_func = callee;
/* Ensure ebb_user_func is set before we set the handler */
mb();
mtspr(SPRN_EBBHR, entry);
/* Make sure the handler is set before we return */
mb();
}
void clear_ebb_stats(void)
{
memset(&ebb_state.stats, 0, sizeof(ebb_state.stats));
}
void dump_summary_ebb_state(void)
{
printf("ebb_state:\n" \
" ebb_count = %d\n" \
" spurious = %d\n" \
" negative = %d\n" \
" no_overflow = %d\n" \
" pmc[1] count = 0x%llx\n" \
" pmc[2] count = 0x%llx\n" \
" pmc[3] count = 0x%llx\n" \
" pmc[4] count = 0x%llx\n" \
" pmc[5] count = 0x%llx\n" \
" pmc[6] count = 0x%llx\n",
ebb_state.stats.ebb_count, ebb_state.stats.spurious,
ebb_state.stats.negative, ebb_state.stats.no_overflow,
ebb_state.stats.pmc_count[0], ebb_state.stats.pmc_count[1],
ebb_state.stats.pmc_count[2], ebb_state.stats.pmc_count[3],
ebb_state.stats.pmc_count[4], ebb_state.stats.pmc_count[5]);
}
static char *decode_mmcr0(u32 value)
{
static char buf[16];
buf[0] = '\0';
if (value & (1 << 31))
strcat(buf, "FC ");
if (value & (1 << 26))
strcat(buf, "PMAE ");
if (value & (1 << 7))
strcat(buf, "PMAO ");
return buf;
}
static char *decode_bescr(u64 value)
{
static char buf[16];
buf[0] = '\0';
if (value & (1ull << 63))
strcat(buf, "GE ");
if (value & (1ull << 32))
strcat(buf, "PMAE ");
if (value & 1)
strcat(buf, "PMAO ");
return buf;
}
void dump_ebb_hw_state(void)
{
u64 bescr;
u32 mmcr0;
mmcr0 = mfspr(SPRN_MMCR0);
bescr = mfspr(SPRN_BESCR);
printf("HW state:\n" \
"MMCR0 0x%016x %s\n" \
"MMCR2 0x%016lx\n" \
"EBBHR 0x%016lx\n" \
"BESCR 0x%016llx %s\n" \
"PMC1 0x%016lx\n" \
"PMC2 0x%016lx\n" \
"PMC3 0x%016lx\n" \
"PMC4 0x%016lx\n" \
"PMC5 0x%016lx\n" \
"PMC6 0x%016lx\n" \
"SIAR 0x%016lx\n",
mmcr0, decode_mmcr0(mmcr0), mfspr(SPRN_MMCR2),
mfspr(SPRN_EBBHR), bescr, decode_bescr(bescr),
mfspr(SPRN_PMC1), mfspr(SPRN_PMC2), mfspr(SPRN_PMC3),
mfspr(SPRN_PMC4), mfspr(SPRN_PMC5), mfspr(SPRN_PMC6),
mfspr(SPRN_SIAR));
}
void dump_ebb_state(void)
{
dump_summary_ebb_state();
dump_ebb_hw_state();
trace_buffer_print(ebb_state.trace);
}
int count_pmc(int pmc, uint32_t sample_period)
{
uint32_t start_value;
u64 val;
/* 0) Read PMC */
start_value = pmc_sample_period(sample_period);
val = read_pmc(pmc);
if (val < start_value)
ebb_state.stats.negative++;
else
ebb_state.stats.pmc_count[PMC_INDEX(pmc)] += val - start_value;
trace_log_reg(ebb_state.trace, SPRN_PMC1 + pmc - 1, val);
/* 1) Reset PMC */
write_pmc(pmc, start_value);
/* Report if we overflowed */
return val >= COUNTER_OVERFLOW;
}
int ebb_event_enable(struct event *e)
{
int rc;
/* Ensure any SPR writes are ordered vs us */
mb();
rc = ioctl(e->fd, PERF_EVENT_IOC_ENABLE);
if (rc)
return rc;
rc = event_read(e);
/* Ditto */
mb();
return rc;
}
void ebb_freeze_pmcs(void)
{
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC);
mb();
}
void ebb_unfreeze_pmcs(void)
{
/* Unfreeze counters */
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC);
mb();
}
void ebb_global_enable(void)
{
/* Enable EBBs globally and PMU EBBs */
mtspr(SPRN_BESCR, 0x8000000100000000ull);
mb();
}
void ebb_global_disable(void)
{
/* Disable EBBs & freeze counters, events are still scheduled */
mtspr(SPRN_BESCRR, BESCR_PME);
mb();
}
bool ebb_is_supported(void)
{
#ifdef PPC_FEATURE2_EBB
/* EBB requires at least POWER8 */
return have_hwcap2(PPC_FEATURE2_EBB);
#else
return false;
#endif
}
void event_ebb_init(struct event *e)
{
e->attr.config |= (1ull << 63);
}
void event_bhrb_init(struct event *e, unsigned ifm)
{
e->attr.config |= (1ull << 62) | ((u64)ifm << 60);
}
void event_leader_ebb_init(struct event *e)
{
event_ebb_init(e);
e->attr.exclusive = 1;
e->attr.pinned = 1;
}
int ebb_child(union pipe read_pipe, union pipe write_pipe)
{
struct event event;
uint64_t val;
FAIL_IF(wait_for_parent(read_pipe));
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
ebb_enable_pmc_counting(1);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(event_enable(&event));
if (event_read(&event)) {
/*
* Some tests expect to fail here, so don't report an error on
* this line, and return a distinguisable error code. Tell the
* parent an error happened.
*/
notify_parent_of_error(write_pipe);
return 2;
}
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
FAIL_IF(notify_parent(write_pipe));
FAIL_IF(wait_for_parent(read_pipe));
FAIL_IF(notify_parent(write_pipe));
while (ebb_state.stats.ebb_count < 20) {
FAIL_IF(core_busy_loop());
/* To try and hit SIGILL case */
val = mfspr(SPRN_MMCRA);
val |= mfspr(SPRN_MMCR2);
val |= mfspr(SPRN_MMCR0);
}
ebb_global_disable();
ebb_freeze_pmcs();
dump_ebb_state();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
return 0;
}
static jmp_buf setjmp_env;
static void sigill_handler(int signal)
{
printf("Took sigill\n");
longjmp(setjmp_env, 1);
}
static struct sigaction sigill_action = {
.sa_handler = sigill_handler,
};
int catch_sigill(void (*func)(void))
{
if (sigaction(SIGILL, &sigill_action, NULL)) {
perror("sigaction");
return 1;
}
if (setjmp(setjmp_env) == 0) {
func();
return 1;
}
return 0;
}
void write_pmc1(void)
{
mtspr(SPRN_PMC1, 0);
}
void write_pmc(int pmc, u64 value)
{
switch (pmc) {
case 1: mtspr(SPRN_PMC1, value); break;
case 2: mtspr(SPRN_PMC2, value); break;
case 3: mtspr(SPRN_PMC3, value); break;
case 4: mtspr(SPRN_PMC4, value); break;
case 5: mtspr(SPRN_PMC5, value); break;
case 6: mtspr(SPRN_PMC6, value); break;
}
}
u64 read_pmc(int pmc)
{
switch (pmc) {
case 1: return mfspr(SPRN_PMC1);
case 2: return mfspr(SPRN_PMC2);
case 3: return mfspr(SPRN_PMC3);
case 4: return mfspr(SPRN_PMC4);
case 5: return mfspr(SPRN_PMC5);
case 6: return mfspr(SPRN_PMC6);
}
return 0;
}
static void term_handler(int signal)
{
dump_summary_ebb_state();
dump_ebb_hw_state();
abort();
}
struct sigaction term_action = {
.sa_handler = term_handler,
};
static void __attribute__((constructor)) ebb_init(void)
{
clear_ebb_stats();
if (sigaction(SIGTERM, &term_action, NULL))
perror("sigaction");
ebb_state.trace = trace_buffer_allocate(1 * 1024 * 1024);
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/ebb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include <setjmp.h>
#include <signal.h>
#include "ebb.h"
/* Test that things work sanely if we have no handler */
static int no_handler_test(void)
{
struct event event;
u64 val;
int i;
SKIP_IF(!ebb_is_supported());
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
FAIL_IF(ebb_event_enable(&event));
val = mfspr(SPRN_EBBHR);
FAIL_IF(val != 0);
/* Make sure it overflows quickly */
sample_period = 1000;
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
/* Spin to make sure the event has time to overflow */
for (i = 0; i < 1000; i++)
mb();
dump_ebb_state();
/* We expect to see the PMU frozen & PMAO set */
val = mfspr(SPRN_MMCR0);
FAIL_IF(val != 0x0000000080000080);
event_close(&event);
/* The real test is that we never took an EBB at 0x0 */
return 0;
}
int main(void)
{
return test_harness(no_handler_test,"no_handler_test");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/ioctl.h>
#include "ebb.h"
/*
* Test counting multiple events using EBBs.
*/
int multi_counter(void)
{
struct event events[6];
int i, group_fd;
SKIP_IF(!ebb_is_supported());
event_init_named(&events[0], 0x1001C, "PM_CMPLU_STALL_THRD");
event_init_named(&events[1], 0x2D016, "PM_CMPLU_STALL_FXU");
event_init_named(&events[2], 0x30006, "PM_CMPLU_STALL_OTHER_CMPL");
event_init_named(&events[3], 0x4000A, "PM_CMPLU_STALL");
event_init_named(&events[4], 0x600f4, "PM_RUN_CYC");
event_init_named(&events[5], 0x500fa, "PM_RUN_INST_CMPL");
event_leader_ebb_init(&events[0]);
for (i = 1; i < 6; i++)
event_ebb_init(&events[i]);
group_fd = -1;
for (i = 0; i < 6; i++) {
events[i].attr.exclude_kernel = 1;
events[i].attr.exclude_hv = 1;
events[i].attr.exclude_idle = 1;
FAIL_IF(event_open_with_group(&events[i], group_fd));
if (group_fd == -1)
group_fd = events[0].fd;
}
ebb_enable_pmc_counting(1);
ebb_enable_pmc_counting(2);
ebb_enable_pmc_counting(3);
ebb_enable_pmc_counting(4);
ebb_enable_pmc_counting(5);
ebb_enable_pmc_counting(6);
setup_ebb_handler(standard_ebb_callee);
FAIL_IF(ioctl(events[0].fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP));
FAIL_IF(event_read(&events[0]));
ebb_global_enable();
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
mtspr(SPRN_PMC3, pmc_sample_period(sample_period));
mtspr(SPRN_PMC4, pmc_sample_period(sample_period));
mtspr(SPRN_PMC5, pmc_sample_period(sample_period));
mtspr(SPRN_PMC6, pmc_sample_period(sample_period));
while (ebb_state.stats.ebb_count < 50) {
FAIL_IF(core_busy_loop());
FAIL_IF(ebb_check_mmcr0());
}
ebb_global_disable();
ebb_freeze_pmcs();
dump_ebb_state();
for (i = 0; i < 6; i++)
event_close(&events[i]);
FAIL_IF(ebb_state.stats.ebb_count == 0);
return 0;
}
int main(void)
{
return test_harness(multi_counter, "multi_counter");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <sys/prctl.h>
#include "ebb.h"
/*
* Run a calibrated instruction loop and count instructions executed using
* EBBs. Make sure the counts look right.
*/
extern void thirty_two_instruction_loop(uint64_t loops);
static bool counters_frozen = true;
static int do_count_loop(struct event *event, uint64_t instructions,
uint64_t overhead, bool report)
{
int64_t difference, expected;
double percentage;
clear_ebb_stats();
counters_frozen = false;
mb();
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC);
thirty_two_instruction_loop(instructions >> 5);
counters_frozen = true;
mb();
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC);
count_pmc(4, sample_period);
event->result.value = ebb_state.stats.pmc_count[4-1];
expected = instructions + overhead;
difference = event->result.value - expected;
percentage = (double)difference / event->result.value * 100;
if (report) {
printf("Looped for %lu instructions, overhead %lu\n", instructions, overhead);
printf("Expected %lu\n", expected);
printf("Actual %llu\n", event->result.value);
printf("Delta %ld, %f%%\n", difference, percentage);
printf("Took %d EBBs\n", ebb_state.stats.ebb_count);
}
if (difference < 0)
difference = -difference;
/* Tolerate a difference of up to 0.0001 % */
difference *= 10000 * 100;
if (difference / event->result.value)
return -1;
return 0;
}
/* Count how many instructions it takes to do a null loop */
static uint64_t determine_overhead(struct event *event)
{
uint64_t current, overhead;
int i;
do_count_loop(event, 0, 0, false);
overhead = event->result.value;
for (i = 0; i < 100; i++) {
do_count_loop(event, 0, 0, false);
current = event->result.value;
if (current < overhead) {
printf("Replacing overhead %lu with %lu\n", overhead, current);
overhead = current;
}
}
return overhead;
}
static void pmc4_ebb_callee(void)
{
uint64_t val;
val = mfspr(SPRN_BESCR);
if (!(val & BESCR_PMEO)) {
ebb_state.stats.spurious++;
goto out;
}
ebb_state.stats.ebb_count++;
count_pmc(4, sample_period);
out:
if (counters_frozen)
reset_ebb_with_clear_mask(MMCR0_PMAO);
else
reset_ebb();
}
int instruction_count(void)
{
struct event event;
uint64_t overhead;
SKIP_IF(!ebb_is_supported());
event_init_named(&event, 0x400FA, "PM_RUN_INST_CMPL");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
FAIL_IF(ebb_event_enable(&event));
sample_period = COUNTER_OVERFLOW;
setup_ebb_handler(pmc4_ebb_callee);
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC);
ebb_global_enable();
overhead = determine_overhead(&event);
printf("Overhead of null loop: %lu instructions\n", overhead);
/* Run for 1M instructions */
FAIL_IF(do_count_loop(&event, 0x100000, overhead, true));
/* Run for 10M instructions */
FAIL_IF(do_count_loop(&event, 0xa00000, overhead, true));
/* Run for 100M instructions */
FAIL_IF(do_count_loop(&event, 0x6400000, overhead, true));
/* Run for 1G instructions */
FAIL_IF(do_count_loop(&event, 0x40000000, overhead, true));
/* Run for 16G instructions */
FAIL_IF(do_count_loop(&event, 0x400000000, overhead, true));
/* Run for 64G instructions */
FAIL_IF(do_count_loop(&event, 0x1000000000, overhead, true));
/* Run for 128G instructions */
FAIL_IF(do_count_loop(&event, 0x2000000000, overhead, true));
ebb_global_disable();
event_close(&event);
printf("Finished OK\n");
return 0;
}
int main(void)
{
test_harness_set_timeout(300);
return test_harness(instruction_count, "instruction_count");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2021, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include <setjmp.h>
#include <signal.h>
#include "ebb.h"
/*
* Test that closing the EBB event clears MMCR0_PMCC and
* sets MMCR0_PMCCEXT preventing further read access to the
* group B PMU registers.
*/
static int regs_access_pmccext(void)
{
struct event event;
SKIP_IF(!ebb_is_supported());
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
FAIL_IF(event_open(&event));
ebb_enable_pmc_counting(1);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
while (ebb_state.stats.ebb_count < 1)
FAIL_IF(core_busy_loop());
ebb_global_disable();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
/*
* For ISA v3.1, verify the test takes a SIGILL when reading
* PMU regs after the event is closed. With the control bit
* in MMCR0 (PMCCEXT) restricting access to group B PMU regs,
* sigill is expected.
*/
if (have_hwcap2(PPC_FEATURE2_ARCH_3_1))
FAIL_IF(catch_sigill(dump_ebb_state));
else
dump_ebb_state();
return 0;
}
int main(void)
{
return test_harness(regs_access_pmccext, "regs_access_pmccext");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/regs_access_pmccext_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ebb.h"
/*
* Test various attributes of the EBB event are enforced.
*/
int event_attributes(void)
{
struct event event, leader;
SKIP_IF(!ebb_is_supported());
event_init(&event, 0x1001e);
event_leader_ebb_init(&event);
/* Expected to succeed */
FAIL_IF(event_open(&event));
event_close(&event);
event_init(&event, 0x001e); /* CYCLES - no PMC specified */
event_leader_ebb_init(&event);
/* Expected to fail, no PMC specified */
FAIL_IF(event_open(&event) == 0);
event_init(&event, 0x2001e);
event_leader_ebb_init(&event);
event.attr.exclusive = 0;
/* Expected to fail, not exclusive */
FAIL_IF(event_open(&event) == 0);
event_init(&event, 0x3001e);
event_leader_ebb_init(&event);
event.attr.freq = 1;
/* Expected to fail, sets freq */
FAIL_IF(event_open(&event) == 0);
event_init(&event, 0x4001e);
event_leader_ebb_init(&event);
event.attr.sample_period = 1;
/* Expected to fail, sets sample_period */
FAIL_IF(event_open(&event) == 0);
event_init(&event, 0x1001e);
event_leader_ebb_init(&event);
event.attr.enable_on_exec = 1;
/* Expected to fail, sets enable_on_exec */
FAIL_IF(event_open(&event) == 0);
event_init(&event, 0x1001e);
event_leader_ebb_init(&event);
event.attr.inherit = 1;
/* Expected to fail, sets inherit */
FAIL_IF(event_open(&event) == 0);
event_init(&leader, 0x1001e);
event_leader_ebb_init(&leader);
FAIL_IF(event_open(&leader));
event_init(&event, 0x20002);
event_ebb_init(&event);
/* Expected to succeed */
FAIL_IF(event_open_with_group(&event, leader.fd));
event_close(&leader);
event_close(&event);
event_init(&leader, 0x1001e);
event_leader_ebb_init(&leader);
FAIL_IF(event_open(&leader));
event_init(&event, 0x20002);
/* Expected to fail, event doesn't request EBB, leader does */
FAIL_IF(event_open_with_group(&event, leader.fd) == 0);
event_close(&leader);
event_init(&leader, 0x1001e);
event_leader_ebb_init(&leader);
/* Clear the EBB flag */
leader.attr.config &= ~(1ull << 63);
FAIL_IF(event_open(&leader));
event_init(&event, 0x20002);
event_ebb_init(&event);
/* Expected to fail, leader doesn't request EBB */
FAIL_IF(event_open_with_group(&event, leader.fd) == 0);
event_close(&leader);
event_init(&leader, 0x1001e);
event_leader_ebb_init(&leader);
leader.attr.exclusive = 0;
/* Expected to fail, leader isn't exclusive */
FAIL_IF(event_open(&leader) == 0);
event_init(&leader, 0x1001e);
event_leader_ebb_init(&leader);
leader.attr.pinned = 0;
/* Expected to fail, leader isn't pinned */
FAIL_IF(event_open(&leader) == 0);
event_init(&event, 0x1001e);
event_leader_ebb_init(&event);
/* Expected to fail, not a task event */
SKIP_IF(require_paranoia_below(1));
FAIL_IF(event_open_with_cpu(&event, 0) == 0);
return 0;
}
int main(void)
{
return test_harness(event_attributes, "event_attributes");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "ebb.h"
/*
* Tests we can setup an EBB on our child. Nothing interesting happens, because
* even though the event is enabled and running the child hasn't enabled the
* actual delivery of the EBBs.
*/
static int victim_child(union pipe read_pipe, union pipe write_pipe)
{
int i;
FAIL_IF(wait_for_parent(read_pipe));
FAIL_IF(notify_parent(write_pipe));
/* Parent creates EBB event */
FAIL_IF(wait_for_parent(read_pipe));
FAIL_IF(notify_parent(write_pipe));
/* Check the EBB is enabled by writing PMC1 */
write_pmc1();
/* EBB event is enabled here */
for (i = 0; i < 1000000; i++) ;
return 0;
}
int ebb_on_child(void)
{
union pipe read_pipe, write_pipe;
struct event event;
pid_t pid;
SKIP_IF(!ebb_is_supported());
FAIL_IF(pipe(read_pipe.fds) == -1);
FAIL_IF(pipe(write_pipe.fds) == -1);
pid = fork();
if (pid == 0) {
/* NB order of pipes looks reversed */
exit(victim_child(write_pipe, read_pipe));
}
FAIL_IF(sync_with_child(read_pipe, write_pipe));
/* Child is running now */
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open_with_pid(&event, pid));
FAIL_IF(ebb_event_enable(&event));
FAIL_IF(sync_with_child(read_pipe, write_pipe));
/* Child should just exit happily */
FAIL_IF(wait_for_child(pid));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(ebb_on_child, "ebb_on_child");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include <setjmp.h>
#include <signal.h>
#include "ebb.h"
/*
* Test that closing the EBB event clears MMCR0_PMCC, preventing further access
* by userspace to the PMU hardware.
*/
int close_clears_pmcc(void)
{
struct event event;
SKIP_IF(!ebb_is_supported());
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
FAIL_IF(event_open(&event));
ebb_enable_pmc_counting(1);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
while (ebb_state.stats.ebb_count < 1)
FAIL_IF(core_busy_loop());
ebb_global_disable();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
/* The real test is here, do we take a SIGILL when writing PMU regs now
* that we have closed the event. We expect that we will. */
FAIL_IF(catch_sigill(write_pmc1));
/* We should still be able to read EBB regs though */
mfspr(SPRN_EBBHR);
mfspr(SPRN_EBBRR);
mfspr(SPRN_BESCR);
return 0;
}
int main(void)
{
return test_harness(close_clears_pmcc, "close_clears_pmcc");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include "ebb.h"
/*
* Test of counting cycles while manipulating the user accessible bits in MMCR2.
*/
/* We use two values because the first freezes PMC1 and so we would get no EBBs */
#define MMCR2_EXPECTED_1 0x4020100804020000UL /* (FC1P|FC2P|FC3P|FC4P|FC5P|FC6P) */
#define MMCR2_EXPECTED_2 0x0020100804020000UL /* ( FC2P|FC3P|FC4P|FC5P|FC6P) */
int cycles_with_mmcr2(void)
{
struct event event;
uint64_t val, expected[2], actual;
int i;
bool bad_mmcr2;
SKIP_IF(!ebb_is_supported());
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
ebb_enable_pmc_counting(1);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
/* XXX Set of MMCR2 must be after enable */
expected[0] = MMCR2_EXPECTED_1;
expected[1] = MMCR2_EXPECTED_2;
i = 0;
bad_mmcr2 = false;
actual = 0;
/* Make sure we loop until we take at least one EBB */
while ((ebb_state.stats.ebb_count < 20 && !bad_mmcr2) ||
ebb_state.stats.ebb_count < 1)
{
mtspr(SPRN_MMCR2, expected[i % 2]);
FAIL_IF(core_busy_loop());
val = mfspr(SPRN_MMCR2);
if (val != expected[i % 2]) {
bad_mmcr2 = true;
actual = val;
}
i++;
}
ebb_global_disable();
ebb_freeze_pmcs();
dump_ebb_state();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
if (bad_mmcr2)
printf("Bad MMCR2 value seen is 0x%lx\n", actual);
FAIL_IF(bad_mmcr2);
return 0;
}
int main(void)
{
return test_harness(cycles_with_mmcr2, "cycles_with_mmcr2");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include "ebb.h"
/*
* Test of counting cycles while using MMCR0_FC (freeze counters) to only count
* parts of the code. This is complicated by the fact that FC is set by the
* hardware when the event overflows. We may take the EBB after we have set FC,
* so we have to be careful about whether we clear FC at the end of the EBB
* handler or not.
*/
static bool counters_frozen = false;
static int ebbs_while_frozen = 0;
static void ebb_callee(void)
{
uint64_t mask, val;
mask = MMCR0_PMAO | MMCR0_FC;
val = mfspr(SPRN_BESCR);
if (!(val & BESCR_PMEO)) {
ebb_state.stats.spurious++;
goto out;
}
ebb_state.stats.ebb_count++;
trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count);
val = mfspr(SPRN_MMCR0);
trace_log_reg(ebb_state.trace, SPRN_MMCR0, val);
if (counters_frozen) {
trace_log_string(ebb_state.trace, "frozen");
ebbs_while_frozen++;
mask &= ~MMCR0_FC;
}
count_pmc(1, sample_period);
out:
reset_ebb_with_clear_mask(mask);
}
int cycles_with_freeze(void)
{
struct event event;
uint64_t val;
bool fc_cleared;
SKIP_IF(!ebb_is_supported());
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
setup_ebb_handler(ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
fc_cleared = false;
/* Make sure we loop until we take at least one EBB */
while ((ebb_state.stats.ebb_count < 20 && !fc_cleared) ||
ebb_state.stats.ebb_count < 1)
{
counters_frozen = false;
mb();
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC);
FAIL_IF(core_busy_loop());
counters_frozen = true;
mb();
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC);
val = mfspr(SPRN_MMCR0);
if (! (val & MMCR0_FC)) {
printf("Outside of loop, FC NOT set MMCR0 0x%lx\n", val);
fc_cleared = true;
}
}
ebb_global_disable();
ebb_freeze_pmcs();
dump_ebb_state();
printf("EBBs while frozen %d\n", ebbs_while_frozen);
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
FAIL_IF(fc_cleared);
return 0;
}
int main(void)
{
return test_harness(cycles_with_freeze, "cycles_with_freeze");
}
| linux-master | tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
/* All successful D-side store dispatches for this thread that were L2 Miss */
#define EventCode 0x46880
extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target);
/*
* A perf sampling test for mmcr1
* fields : comb.
*/
static int mmcr1_comb(void)
{
struct event event;
u64 *intr_regs;
u64 dummy;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
/* Init the event for the sampling test */
event_init_sampling(&event, EventCode);
event.attr.sample_regs_intr = platform_extended_mask;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop_with_ll_sc(10000000, &dummy);
FAIL_IF(event_disable(&event));
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/*
* Verify that comb field match with
* corresponding event code fields
*/
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, comb) !=
get_mmcr1_comb(get_reg_value(intr_regs, "MMCR1"), 4));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcr1_comb, "mmcr1_comb");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_comb_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void thirty_two_instruction_loop(int loops);
/*
* A perf sampling test for mmcr0
* field: cc56run.
*/
static int mmcr0_cc56run(void)
{
struct event event;
u64 *intr_regs;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
/* Init the event for the sampling test */
event_init_sampling(&event, 0x500fa);
event.attr.sample_regs_intr = platform_extended_mask;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop(10000);
FAIL_IF(event_disable(&event));
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/* Verify that cc56run bit is set in MMCR0 */
FAIL_IF(!get_mmcr0_cc56run(get_reg_value(intr_regs, "MMCR0"), 5));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcr0_cc56run, "mmcr0_cc56run");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_cc56run_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
/*
* Primary PMU event used here is PM_MRK_INST_CMPL (0x401e0)
* Threshold event selection used is issue to complete for cycles
* Sampling criteria is Load only sampling
*/
#define EventCode 0x35340401e0
extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target);
/* A perf sampling test to test mmcra fields */
static int mmcra_thresh_marked_sample(void)
{
struct event event;
u64 *intr_regs;
u64 dummy;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
/* Init the event for the sampling test */
event_init_sampling(&event, EventCode);
event.attr.sample_regs_intr = platform_extended_mask;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop_with_ll_sc(1000000, &dummy);
FAIL_IF(event_disable(&event));
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/*
* Verify that thresh sel/start/stop, marked, random sample
* eligibility, sdar mode and sample mode fields match with
* the corresponding event code fields
*/
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, thd_sel) !=
get_mmcra_thd_sel(get_reg_value(intr_regs, "MMCRA"), 4));
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, thd_start) !=
get_mmcra_thd_start(get_reg_value(intr_regs, "MMCRA"), 4));
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, thd_stop) !=
get_mmcra_thd_stop(get_reg_value(intr_regs, "MMCRA"), 4));
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, marked) !=
get_mmcra_marked(get_reg_value(intr_regs, "MMCRA"), 4));
FAIL_IF((EV_CODE_EXTRACT(event.attr.config, sample) >> 2) !=
get_mmcra_rand_samp_elig(get_reg_value(intr_regs, "MMCRA"), 4));
FAIL_IF((EV_CODE_EXTRACT(event.attr.config, sample) & 0x3) !=
get_mmcra_sample_mode(get_reg_value(intr_regs, "MMCRA"), 4));
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sm) !=
get_mmcra_sm(get_reg_value(intr_regs, "MMCRA"), 4));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcra_thresh_marked_sample, "mmcra_thresh_marked_sample");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target);
/* The data cache was reloaded from local core's L3 due to a demand load */
#define EventCode 0x1340000001c040
/*
* A perf sampling test for mmcr3
* fields.
*/
static int mmcr3_src(void)
{
struct event event;
u64 *intr_regs;
u64 dummy;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
/* Init the event for the sampling test */
event_init_sampling(&event, EventCode);
event.attr.sample_regs_intr = platform_extended_mask;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make event overflow */
thirty_two_instruction_loop_with_ll_sc(1000000, &dummy);
FAIL_IF(event_disable(&event));
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/*
* Verify that src field of MMCR3 match with
* corresponding event code field
*/
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, mmcr3_src) !=
get_mmcr3_src(get_reg_value(intr_regs, "MMCR3"), 1));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcr3_src, "mmcr3_src");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr3_src_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void thirty_two_instruction_loop(int loops);
/* Instructions */
#define EventCode 0x500fa
/* ifm field for conditional branch mode */
#define IFM_COND_BRANCH 0x3
/*
* A perf sampling test for mmcra
* field: ifm for bhrb cond call.
*/
static int mmcra_bhrb_cond_test(void)
{
struct event event;
u64 *intr_regs;
/*
* Check for platform support for the test.
* This test is only aplicable on power10
*/
SKIP_IF(check_pvr_for_sampling_tests());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
/* Init the event for the sampling test */
event_init_sampling(&event, EventCode);
event.attr.sample_regs_intr = platform_extended_mask;
event.attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_COND;
event.attr.exclude_kernel = 1;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop(10000);
FAIL_IF(event_disable(&event));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/* Verify that ifm bit is set properly in MMCRA */
FAIL_IF(get_mmcra_ifm(get_reg_value(intr_regs, "MMCRA"), 5) != IFM_COND_BRANCH);
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcra_bhrb_cond_test, "mmcra_bhrb_cond_test");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_cond_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Kajol Jain, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
/*
* Primary PMU event used here is PM_MRK_INST_CMPL (0x401e0)
* Threshold event selection used is issue to complete for cycles
* Sampling criteria is Load only sampling
*/
#define p9_EventCode 0x13E35340401e0
#define p10_EventCode 0x35340401e0
extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target);
/* A perf sampling test to test mmcra fields */
static int mmcra_thresh_cmp(void)
{
struct event event;
u64 *intr_regs;
u64 dummy;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
/* Skip for comapt mode */
SKIP_IF(check_for_compat_mode());
/* Init the event for the sampling test */
if (!have_hwcap2(PPC_FEATURE2_ARCH_3_1)) {
event_init_sampling(&event, p9_EventCode);
} else {
event_init_sampling(&event, p10_EventCode);
event.attr.config1 = 1000;
}
event.attr.sample_regs_intr = platform_extended_mask;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop_with_ll_sc(1000000, &dummy);
FAIL_IF(event_disable(&event));
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/* Verify that thresh cmp match with the corresponding event code fields */
FAIL_IF(get_thresh_cmp_val(event) !=
get_mmcra_thd_cmp(get_reg_value(intr_regs, "MMCRA"), 4));
event_close(&event);
return 0;
}
int main(void)
{
FAIL_IF(test_harness(mmcra_thresh_cmp, "mmcra_thresh_cmp"));
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_cmp_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void thirty_two_instruction_loop(int loops);
/*
* A perf sampling test for mmcr0
* fields : pmae, pmao.
*/
static int mmcr0_exceptionbits(void)
{
struct event event;
u64 *intr_regs;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
/* Init the event for the sampling test */
event_init_sampling(&event, 0x500fa);
event.attr.sample_regs_intr = platform_extended_mask;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop(10000);
FAIL_IF(event_disable(&event));
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/* Verify that pmae is cleared and pmao is set in MMCR0 */
FAIL_IF(get_mmcr0_pmae(get_reg_value(intr_regs, "MMCR0"), 5));
FAIL_IF(!get_mmcr0_pmao(get_reg_value(intr_regs, "MMCR0"), 5));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcr0_exceptionbits, "mmcr0_exceptionbits");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_exceptionbits_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
extern void thirty_two_instruction_loop(int loops);
/*
* A perf sampling test for mmcr0
* field: pmccext
*/
static int mmcr0_pmccext(void)
{
struct event event;
u64 *intr_regs;
/* Check for platform support for the test */
SKIP_IF(check_pvr_for_sampling_tests());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
/* Init the event for the sampling test */
event_init_sampling(&event, 0x4001e);
event.attr.sample_regs_intr = platform_extended_mask;
FAIL_IF(event_open(&event));
event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
FAIL_IF(event_enable(&event));
/* workload to make the event overflow */
thirty_two_instruction_loop(10000);
FAIL_IF(event_disable(&event));
/* Check for sample count */
FAIL_IF(!collect_samples(event.mmap_buffer));
intr_regs = get_intr_regs(&event, event.mmap_buffer);
/* Check for intr_regs */
FAIL_IF(!intr_regs);
/* Verify that pmccext field is set in MMCR0 */
FAIL_IF(!get_mmcr0_pmccext(get_reg_value(intr_regs, "MMCR0"), 4));
event_close(&event);
return 0;
}
int main(void)
{
return test_harness(mmcr0_pmccext, "mmcr0_pmccext");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmccext_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022, Athira Rajeev, IBM Corp.
*/
#include <stdio.h>
#include <stdlib.h>
#include "../event.h"
#include "misc.h"
#include "utils.h"
/*
* A perf sampling test to check bhrb filter
* map. All the branch filters are not supported
* in powerpc. Supported filters in:
* power10: any, any_call, ind_call, cond
* power9: any, any_call
*
* Testcase checks event open for invalid bhrb filter
* types should fail and valid filter types should pass.
* Testcase does validity check for these branch
* sample types.
*/
/* Invalid types for powerpc */
/* Valid bhrb filters in power9/power10 */
int bhrb_filter_map_valid_common[] = {
PERF_SAMPLE_BRANCH_ANY,
PERF_SAMPLE_BRANCH_ANY_CALL,
};
/* Valid bhrb filters in power10 */
int bhrb_filter_map_valid_p10[] = {
PERF_SAMPLE_BRANCH_IND_CALL,
PERF_SAMPLE_BRANCH_COND,
};
#define EventCode 0x1001e
static int bhrb_filter_map_test(void)
{
struct event event;
int i;
/* Check for platform support for the test */
SKIP_IF(platform_check_for_tests());
/*
* Skip for Generic compat PMU since
* bhrb filters is not supported
*/
SKIP_IF(check_for_generic_compat_pmu());
/* Init the event for the sampling test */
event_init(&event, EventCode);
event.attr.sample_period = 1000;
event.attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
event.attr.disabled = 1;
/* Invalid filter maps which are expected to fail in event_open */
for (i = PERF_SAMPLE_BRANCH_USER_SHIFT; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
/* Skip the valid branch sample type */
if (i == PERF_SAMPLE_BRANCH_ANY_SHIFT || i == PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT \
|| i == PERF_SAMPLE_BRANCH_IND_CALL_SHIFT || i == PERF_SAMPLE_BRANCH_COND_SHIFT)
continue;
event.attr.branch_sample_type = 1U << i;
FAIL_IF(!event_open(&event));
}
/* valid filter maps for power9/power10 which are expected to pass in event_open */
for (i = 0; i < ARRAY_SIZE(bhrb_filter_map_valid_common); i++) {
event.attr.branch_sample_type = bhrb_filter_map_valid_common[i];
FAIL_IF(event_open(&event));
event_close(&event);
}
/*
* filter maps which are valid in power10 and invalid in power9.
* PVR check is used here since PMU specific data like bhrb filter
* alternative tests is handled by respective PMU driver code and
* using PVR will work correctly for all cases including generic
* compat mode.
*/
if (PVR_VER(mfspr(SPRN_PVR)) == POWER10) {
for (i = 0; i < ARRAY_SIZE(bhrb_filter_map_valid_p10); i++) {
event.attr.branch_sample_type = bhrb_filter_map_valid_p10[i];
FAIL_IF(event_open(&event));
event_close(&event);
}
} else {
for (i = 0; i < ARRAY_SIZE(bhrb_filter_map_valid_p10); i++) {
event.attr.branch_sample_type = bhrb_filter_map_valid_p10[i];
FAIL_IF(!event_open(&event));
}
}
/*
* Combine filter maps which includes a valid branch filter and an invalid branch
* filter. Example: any ( PERF_SAMPLE_BRANCH_ANY) and any_call
* (PERF_SAMPLE_BRANCH_ANY_CALL).
* The perf_event_open should fail in this case.
*/
event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY | PERF_SAMPLE_BRANCH_ANY_CALL;
FAIL_IF(!event_open(&event));
return 0;
}
int main(void)
{
return test_harness(bhrb_filter_map_test, "bhrb_filter_map_test");
}
| linux-master | tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c |
Subsets and Splits