repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
criu
|
criu-master/compel/arch/ppc64/src/lib/include/uapi/asm/sigframe.h
|
#ifndef UAPI_COMPEL_ASM_SIGFRAME_H__
#define UAPI_COMPEL_ASM_SIGFRAME_H__
#include <asm/ptrace.h>
#include <asm/elf.h>
#include <asm/types.h>
/*
* sigcontext structure defined in file
* /usr/include/powerpc64le-linux-gnu/bits/sigcontext.h,
* included from /usr/include/signal.h
*
* Kernel definition can be found in arch/powerpc/include/uapi/asm/sigcontext.h
*/
#include <signal.h>
// XXX: the idetifier rt_sigcontext is expected to be struct by the CRIU code
#define rt_sigcontext sigcontext
#include <compel/sigframe-common.h>
#define RT_SIGFRAME_OFFSET(rt_sigframe) 0
/* Copied from the Linux kernel header arch/powerpc/include/asm/ptrace.h */
#define USER_REDZONE_SIZE 512
#if _CALL_ELF != 2
#error Only supporting ABIv2.
#else
#define STACK_FRAME_MIN_SIZE 32
#endif
/* Copied from the Linux kernel source file arch/powerpc/kernel/signal_64.c */
#define TRAMP_SIZE 6
/*
* ucontext_t defined in /usr/include/powerpc64le-linux-gnu/sys/ucontext.h
*/
struct rt_sigframe {
/* sys_rt_sigreturn requires the ucontext be the first field */
ucontext_t uc;
ucontext_t uc_transact; /* Transactional state */
unsigned long _unused[2];
unsigned int tramp[TRAMP_SIZE];
struct rt_siginfo *pinfo;
void *puc;
struct rt_siginfo info;
/* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
char abigap[USER_REDZONE_SIZE];
} __attribute__((aligned(16)));
/* clang-format off */
#define ARCH_RT_SIGRETURN(new_sp, rt_sigframe) \
asm volatile( \
"mr 1, %0 \n" \
"li 0, "__stringify(__NR_rt_sigreturn)" \n" \
"sc \n" \
: \
: "r"(new_sp) \
: "memory")
/* clang-format on */
#if _CALL_ELF != 2
#error Only supporting ABIv2.
#else
#define FRAME_MIN_SIZE_PARM 96
#endif
#define RT_SIGFRAME_UC(rt_sigframe) (&(rt_sigframe)->uc)
#define RT_SIGFRAME_REGIP(rt_sigframe) ((long unsigned int)(rt_sigframe)->uc.uc_mcontext.gp_regs[PT_NIP])
#define RT_SIGFRAME_HAS_FPU(rt_sigframe) (1)
#define RT_SIGFRAME_FPU(rt_sigframe) (&(rt_sigframe)->uc.uc_mcontext)
#define rt_sigframe_erase_sigset(sigframe) memset(&sigframe->uc.uc_sigmask, 0, sizeof(k_rtsigset_t))
#define rt_sigframe_copy_sigset(sigframe, from) memcpy(&sigframe->uc.uc_sigmask, from, sizeof(k_rtsigset_t))
#define MSR_TMA (1UL << 34) /* bit 29 Trans Mem state: Transactional */
#define MSR_TMS (1UL << 33) /* bit 30 Trans Mem state: Suspended */
#define MSR_TM (1UL << 32) /* bit 31 Trans Mem Available */
#define MSR_VEC (1UL << 25)
#define MSR_VSX (1UL << 23)
#define MSR_TM_ACTIVE(x) ((((x)&MSR_TM) && ((x) & (MSR_TMA | MSR_TMS))) != 0)
#endif /* UAPI_COMPEL_ASM_SIGFRAME_H__ */
| 2,609 | 29.705882 | 108 |
h
|
criu
|
criu-master/compel/arch/s390/plugins/include/asm/syscall-types.h
|
#ifndef COMPEL_ARCH_SYSCALL_TYPES_H__
#define COMPEL_ARCH_SYSCALL_TYPES_H__
#define SA_RESTORER 0x04000000U
typedef void rt_signalfn_t(int, siginfo_t *, void *);
typedef rt_signalfn_t *rt_sighandler_t;
typedef void rt_restorefn_t(void);
typedef rt_restorefn_t *rt_sigrestore_t;
#define _KNSIG 64
#define _NSIG_BPW 64
#define _KNSIG_WORDS (_KNSIG / _NSIG_BPW)
typedef struct {
unsigned long sig[_KNSIG_WORDS];
} k_rtsigset_t;
/*
* Used for rt_sigaction() system call - see kernel "struct sigaction" in
* include/linux/signal.h.
*/
typedef struct {
rt_sighandler_t rt_sa_handler;
unsigned long rt_sa_flags;
rt_sigrestore_t rt_sa_restorer;
k_rtsigset_t rt_sa_mask;
} rt_sigaction_t;
struct mmap_arg_struct;
#endif /* COMPEL_ARCH_SYSCALL_TYPES_H__ */
| 766 | 20.914286 | 73 |
h
|
criu
|
criu-master/compel/arch/s390/plugins/std/syscalls/syscalls-s390.c
|
#include "asm/infect-types.h"
/*
* Define prototype because of compile error if we include uapi/std/syscall.h
*/
long sys_old_mmap(struct mmap_arg_struct *);
/*
* On s390 we have defined __ARCH_WANT_SYS_OLD_MMAP - Therefore implement
* system call with one parameter "mmap_arg_struct".
*/
unsigned long sys_mmap(void *addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long offset)
{
struct mmap_arg_struct arg_struct;
arg_struct.addr = (unsigned long)addr;
arg_struct.len = len;
arg_struct.prot = prot;
arg_struct.flags = flags;
arg_struct.fd = fd;
arg_struct.offset = offset;
return sys_old_mmap(&arg_struct);
}
| 682 | 25.269231 | 112 |
c
|
criu
|
criu-master/compel/arch/s390/src/lib/infect.c
|
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/user.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <elf.h>
#include <compel/plugins/std/syscall-codes.h>
#include "uapi/compel/asm/infect-types.h"
#include "errno.h"
#include "log.h"
#include "common/bug.h"
#include "infect.h"
#include "ptrace.h"
#include "infect-priv.h"
#define NT_PRFPREG 2
#define NT_S390_VXRS_LOW 0x309
#define NT_S390_VXRS_HIGH 0x30a
#define NT_S390_GS_CB 0x30b
#define NT_S390_GS_BC 0x30c
#define NT_S390_RI_CB 0x30d
/*
* Print general purpose and access registers
*/
static void print_user_regs_struct(const char *msg, int pid, user_regs_struct_t *regs)
{
int i;
pr_debug("%s: Registers for pid=%d\n", msg, pid);
pr_debug("system_call %08lx\n", (unsigned long)regs->system_call);
pr_debug(" psw %016lx %016lx\n", regs->prstatus.psw.mask, regs->prstatus.psw.addr);
pr_debug(" orig_gpr2 %016lx\n", regs->prstatus.orig_gpr2);
for (i = 0; i < 16; i++)
pr_debug(" g%02d %016lx\n", i, regs->prstatus.gprs[i]);
for (i = 0; i < 16; i++)
pr_debug(" a%02d %08x\n", i, regs->prstatus.acrs[i]);
}
/*
* Print vector registers
*/
static void print_vxrs(user_fpregs_struct_t *fpregs)
{
int i;
if (!(fpregs->flags & USER_FPREGS_VXRS)) {
pr_debug(" No VXRS\n");
return;
}
for (i = 0; i < 16; i++)
pr_debug(" vx_low%02d %016lx\n", i, fpregs->vxrs_low[i]);
for (i = 0; i < 16; i++)
pr_debug(" vx_high%02d %016lx %016lx\n", i, fpregs->vxrs_high[i].part1, fpregs->vxrs_high[i].part2);
}
/*
* Print guarded-storage control block
*/
static void print_gs_cb(user_fpregs_struct_t *fpregs)
{
int i;
if (!(fpregs->flags & USER_GS_CB)) {
pr_debug(" No GS_CB\n");
return;
}
for (i = 0; i < 4; i++)
pr_debug(" gs_cb%02d %016lx\n", i, fpregs->gs_cb[i]);
}
/*
* Print guarded-storage broadcast control block
*/
static void print_gs_bc(user_fpregs_struct_t *fpregs)
{
int i;
if (!(fpregs->flags & USER_GS_BC)) {
pr_debug(" No GS_BC\n");
return;
}
for (i = 0; i < 4; i++)
pr_debug(" gs_bc%02d %016lx\n", i, fpregs->gs_bc[i]);
}
/*
* Print runtime-instrumentation control block
*/
static void print_ri_cb(user_fpregs_struct_t *fpregs)
{
int i;
if (!(fpregs->flags & USER_RI_CB)) {
pr_debug(" No RI_CB\n");
return;
}
for (i = 0; i < 8; i++)
pr_debug(" ri_cb%02d %016lx\n", i, fpregs->ri_cb[i]);
}
/*
* Print FP registers, VX registers, guarded-storage, and
* runtime-instrumentation
*/
static void print_user_fpregs_struct(const char *msg, int pid, user_fpregs_struct_t *fpregs)
{
int i;
pr_debug("%s: FP registers for pid=%d\n", msg, pid);
pr_debug(" fpc %08x\n", fpregs->prfpreg.fpc);
for (i = 0; i < 16; i++)
pr_debug(" f%02d %016lx\n", i, fpregs->prfpreg.fprs[i]);
print_vxrs(fpregs);
print_gs_cb(fpregs);
print_gs_bc(fpregs);
print_ri_cb(fpregs);
}
int sigreturn_prep_regs_plain(struct rt_sigframe *sigframe, user_regs_struct_t *regs, user_fpregs_struct_t *fpregs)
{
_sigregs_ext *dst_ext = &sigframe->uc.uc_mcontext_ext;
_sigregs *dst = &sigframe->uc.uc_mcontext;
memcpy(dst->regs.gprs, regs->prstatus.gprs, sizeof(regs->prstatus.gprs));
memcpy(dst->regs.acrs, regs->prstatus.acrs, sizeof(regs->prstatus.acrs));
memcpy(&dst->regs.psw, ®s->prstatus.psw, sizeof(regs->prstatus.psw));
memcpy(&dst->fpregs.fpc, &fpregs->prfpreg.fpc, sizeof(fpregs->prfpreg.fpc));
memcpy(&dst->fpregs.fprs, &fpregs->prfpreg.fprs, sizeof(fpregs->prfpreg.fprs));
if (fpregs->flags & USER_FPREGS_VXRS) {
memcpy(&dst_ext->vxrs_low, &fpregs->vxrs_low, sizeof(fpregs->vxrs_low));
memcpy(&dst_ext->vxrs_high, &fpregs->vxrs_high, sizeof(fpregs->vxrs_high));
} else {
memset(&dst_ext->vxrs_low, 0, sizeof(dst_ext->vxrs_low));
memset(&dst_ext->vxrs_high, 0, sizeof(dst_ext->vxrs_high));
}
return 0;
}
int sigreturn_prep_fpu_frame_plain(struct rt_sigframe *sigframe, struct rt_sigframe *rsigframe)
{
return 0;
}
/*
* Rewind the psw for 'bytes' bytes
*/
static inline void rewind_psw(psw_t *psw, unsigned long bytes)
{
unsigned long mask;
pr_debug("Rewind psw: %016lx bytes=%lu\n", psw->addr, bytes);
mask = (psw->mask & PSW_MASK_EA) ? -1UL : (psw->mask & PSW_MASK_BA) ? (1UL << 31) - 1 : (1UL << 24) - 1;
psw->addr = (psw->addr - bytes) & mask;
}
/*
* Get vector registers
*/
int get_vx_regs(pid_t pid, user_fpregs_struct_t *fpregs)
{
struct iovec iov;
fpregs->flags &= ~USER_FPREGS_VXRS;
iov.iov_base = &fpregs->vxrs_low;
iov.iov_len = sizeof(fpregs->vxrs_low);
if (ptrace(PTRACE_GETREGSET, pid, NT_S390_VXRS_LOW, &iov) < 0) {
/*
* If the kernel does not support vector registers, we get
* EINVAL. With kernel support and old hardware, we get ENODEV.
*/
if (errno == EINVAL || errno == ENODEV) {
memset(fpregs->vxrs_low, 0, sizeof(fpregs->vxrs_low));
memset(fpregs->vxrs_high, 0, sizeof(fpregs->vxrs_high));
pr_debug("VXRS registers not supported\n");
return 0;
}
pr_perror("Couldn't get VXRS_LOW");
return -1;
}
iov.iov_base = &fpregs->vxrs_high;
iov.iov_len = sizeof(fpregs->vxrs_high);
if (ptrace(PTRACE_GETREGSET, pid, NT_S390_VXRS_HIGH, &iov) < 0) {
pr_perror("Couldn't get VXRS_HIGH");
return -1;
}
fpregs->flags |= USER_FPREGS_VXRS;
return 0;
}
/*
* Get guarded-storage control block
*/
int get_gs_cb(pid_t pid, user_fpregs_struct_t *fpregs)
{
struct iovec iov;
fpregs->flags &= ~(USER_GS_CB | USER_GS_BC);
iov.iov_base = &fpregs->gs_cb;
iov.iov_len = sizeof(fpregs->gs_cb);
if (ptrace(PTRACE_GETREGSET, pid, NT_S390_GS_CB, &iov) < 0) {
switch (errno) {
case EINVAL:
case ENODEV:
memset(&fpregs->gs_cb, 0, sizeof(fpregs->gs_cb));
memset(&fpregs->gs_bc, 0, sizeof(fpregs->gs_bc));
pr_debug("GS_CB not supported\n");
return 0;
case ENODATA:
pr_debug("GS_CB not set\n");
break;
default:
return -1;
}
} else {
fpregs->flags |= USER_GS_CB;
}
iov.iov_base = &fpregs->gs_bc;
iov.iov_len = sizeof(fpregs->gs_bc);
if (ptrace(PTRACE_GETREGSET, pid, NT_S390_GS_BC, &iov) < 0) {
if (errno == ENODATA) {
pr_debug("GS_BC not set\n");
return 0;
}
pr_perror("Couldn't get GS_BC");
return -1;
}
fpregs->flags |= USER_GS_BC;
return 0;
}
/*
* Get runtime-instrumentation control block
*/
int get_ri_cb(pid_t pid, user_fpregs_struct_t *fpregs)
{
user_regs_struct_t regs;
struct iovec iov;
psw_t *psw;
fpregs->flags &= ~(USER_RI_CB | USER_RI_ON);
iov.iov_base = &fpregs->ri_cb;
iov.iov_len = sizeof(fpregs->ri_cb);
if (ptrace(PTRACE_GETREGSET, pid, NT_S390_RI_CB, &iov) < 0) {
switch (errno) {
case EINVAL:
case ENODEV:
memset(&fpregs->ri_cb, 0, sizeof(fpregs->ri_cb));
pr_debug("RI_CB not supported\n");
return 0;
case ENODATA:
pr_debug("RI_CB not set\n");
return 0;
default:
pr_perror("Couldn't get RI_CB");
return -1;
}
}
fpregs->flags |= USER_RI_CB;
/* Get PSW and check if runtime-instrumentation bit is enabled */
iov.iov_base = ®s.prstatus;
iov.iov_len = sizeof(regs.prstatus);
if (ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov) < 0)
return -1;
psw = ®s.prstatus.psw;
if (psw->mask & PSW_MASK_RI)
fpregs->flags |= USER_RI_ON;
return 0;
}
/*
* Disable runtime-instrumentation bit
*/
static int s390_disable_ri_bit(pid_t pid, user_regs_struct_t *regs)
{
struct iovec iov;
psw_t *psw;
iov.iov_base = ®s->prstatus;
iov.iov_len = sizeof(regs->prstatus);
psw = ®s->prstatus.psw;
psw->mask &= ~PSW_MASK_RI;
return ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov);
}
/*
* Prepare task registers for restart
*/
int compel_get_task_regs(pid_t pid, user_regs_struct_t *regs, user_fpregs_struct_t *ext_regs, save_regs_t save,
void *arg, __maybe_unused unsigned long flags)
{
user_fpregs_struct_t tmp, *fpregs = ext_regs ? ext_regs : &tmp;
struct iovec iov;
int rewind;
print_user_regs_struct("compel_get_task_regs", pid, regs);
memset(fpregs, 0, sizeof(*fpregs));
iov.iov_base = &fpregs->prfpreg;
iov.iov_len = sizeof(fpregs->prfpreg);
if (ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov) < 0) {
pr_perror("Couldn't get floating-point registers");
return -1;
}
if (get_vx_regs(pid, fpregs)) {
pr_perror("Couldn't get vector registers");
return -1;
}
if (get_gs_cb(pid, fpregs)) {
pr_perror("Couldn't get guarded-storage");
return -1;
}
if (get_ri_cb(pid, fpregs)) {
pr_perror("Couldn't get runtime-instrumentation");
return -1;
}
/*
* If the runtime-instrumentation bit is set, we have to disable it
* before we execute parasite code. Otherwise parasite operations
* would be recorded.
*/
if (fpregs->flags & USER_RI_ON)
s390_disable_ri_bit(pid, regs);
print_user_fpregs_struct("compel_get_task_regs", pid, fpregs);
/* Check for system call restarting. */
if (regs->system_call) {
rewind = regs->system_call >> 16;
/* see arch/s390/kernel/signal.c: do_signal() */
switch ((long)regs->prstatus.gprs[2]) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->prstatus.gprs[2] = regs->prstatus.orig_gpr2;
rewind_psw(®s->prstatus.psw, rewind);
pr_debug("New gpr2: %016lx\n", regs->prstatus.gprs[2]);
break;
case -ERESTART_RESTARTBLOCK:
pr_warn("Will restore %d with interrupted system call\n", pid);
regs->prstatus.gprs[2] = -EINTR;
break;
}
}
/* Call save_task_regs() */
return save(arg, regs, fpregs);
}
int compel_set_task_ext_regs(pid_t pid, user_fpregs_struct_t *ext_regs)
{
struct iovec iov;
int ret = 0;
iov.iov_base = &ext_regs->prfpreg;
iov.iov_len = sizeof(ext_regs->prfpreg);
if (ptrace(PTRACE_SETREGSET, pid, NT_PRFPREG, &iov) < 0) {
pr_perror("Couldn't set floating-point registers");
ret = -1;
}
if (ext_regs->flags & USER_FPREGS_VXRS) {
iov.iov_base = &ext_regs->vxrs_low;
iov.iov_len = sizeof(ext_regs->vxrs_low);
if (ptrace(PTRACE_SETREGSET, pid, NT_S390_VXRS_LOW, &iov) < 0) {
pr_perror("Couldn't set VXRS_LOW");
ret = -1;
}
iov.iov_base = &ext_regs->vxrs_high;
iov.iov_len = sizeof(ext_regs->vxrs_high);
if (ptrace(PTRACE_SETREGSET, pid, NT_S390_VXRS_HIGH, &iov) < 0) {
pr_perror("Couldn't set VXRS_HIGH");
ret = -1;
}
}
if (ext_regs->flags & USER_GS_CB) {
iov.iov_base = &ext_regs->gs_cb;
iov.iov_len = sizeof(ext_regs->gs_cb);
if (ptrace(PTRACE_SETREGSET, pid, NT_S390_GS_CB, &iov) < 0) {
pr_perror("Couldn't set GS_CB");
ret = -1;
}
iov.iov_base = &ext_regs->gs_bc;
iov.iov_len = sizeof(ext_regs->gs_bc);
if (ptrace(PTRACE_SETREGSET, pid, NT_S390_GS_BC, &iov) < 0) {
pr_perror("Couldn't set GS_BC");
ret = -1;
}
}
if (ext_regs->flags & USER_RI_CB) {
iov.iov_base = &ext_regs->ri_cb;
iov.iov_len = sizeof(ext_regs->ri_cb);
if (ptrace(PTRACE_SETREGSET, pid, NT_S390_RI_CB, &iov) < 0) {
pr_perror("Couldn't set RI_CB");
ret = -1;
}
}
return ret;
}
/*
* Injected syscall instruction
*/
const char code_syscall[] = {
0x0a, 0x00, /* sc 0 */
0x00, 0x01, /* S390_BREAKPOINT_U16 */
0x00, 0x01, /* S390_BREAKPOINT_U16 */
0x00, 0x01, /* S390_BREAKPOINT_U16 */
};
static inline void __check_code_syscall(void)
{
BUILD_BUG_ON(sizeof(code_syscall) != BUILTIN_SYSCALL_SIZE);
BUILD_BUG_ON(!is_log2(sizeof(code_syscall)));
}
/*
* Issue s390 system call
*/
int compel_syscall(struct parasite_ctl *ctl, int nr, long *ret, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5, unsigned long arg6)
{
user_regs_struct_t regs = ctl->orig.regs;
int err;
/* Load syscall number into %r1 */
regs.prstatus.gprs[1] = (unsigned long)nr;
/* Load parameter registers %r2-%r7 */
regs.prstatus.gprs[2] = arg1;
regs.prstatus.gprs[3] = arg2;
regs.prstatus.gprs[4] = arg3;
regs.prstatus.gprs[5] = arg4;
regs.prstatus.gprs[6] = arg5;
regs.prstatus.gprs[7] = arg6;
err = compel_execute_syscall(ctl, ®s, (char *)code_syscall);
/* Return code from system is in %r2 */
if (ret)
*ret = regs.prstatus.gprs[2];
return err;
}
/*
* Issue s390 mmap call
*/
void *remote_mmap(struct parasite_ctl *ctl, void *addr, size_t length, int prot, int flags, int fd, off_t offset)
{
void *where = (void *)ctl->ictx.syscall_ip + BUILTIN_SYSCALL_SIZE;
struct mmap_arg_struct arg_struct;
pid_t pid = ctl->rpid;
long map = 0;
int err;
/* Setup s390 mmap data */
arg_struct.addr = (unsigned long)addr;
arg_struct.len = length;
arg_struct.prot = prot;
arg_struct.flags = flags;
arg_struct.fd = fd;
arg_struct.offset = offset;
/* Move args to process */
if (ptrace_swap_area(pid, where, &arg_struct, sizeof(arg_struct))) {
pr_err("Can't inject memfd args (pid: %d)\n", pid);
return NULL;
}
/* Do syscall */
err = compel_syscall(ctl, __NR_mmap, &map, (unsigned long)where, 0, 0, 0, 0, 0);
if (err < 0 || (long)map < 0)
map = 0;
/* Restore data */
if (ptrace_poke_area(pid, &arg_struct, where, sizeof(arg_struct))) {
pr_err("Can't restore mmap args (pid: %d)\n", pid);
if (map != 0) {
err = compel_syscall(ctl, __NR_munmap, NULL, map, length, 0, 0, 0, 0);
if (err)
pr_err("Can't munmap %d\n", err);
map = 0;
}
}
return (void *)map;
}
/*
* Setup registers for parasite call
*/
void parasite_setup_regs(unsigned long new_ip, void *stack, user_regs_struct_t *regs)
{
regs->prstatus.psw.addr = new_ip;
if (!stack)
return;
regs->prstatus.gprs[15] = ((unsigned long)stack) - STACK_FRAME_OVERHEAD;
}
/*
* Check if we have all kernel and CRIU features to dump the task
*/
bool arch_can_dump_task(struct parasite_ctl *ctl)
{
user_fpregs_struct_t fpregs;
user_regs_struct_t regs;
pid_t pid = ctl->rpid;
char str[8];
psw_t *psw;
if (ptrace_get_regs(pid, ®s))
return false;
psw = ®s.prstatus.psw;
/* Check if the kernel supports RI ptrace interface */
if (psw->mask & PSW_MASK_RI) {
if (get_ri_cb(pid, &fpregs) < 0) {
pr_perror("Can't dump process with RI bit active");
return false;
}
}
/* We don't support 24 and 31 bit mode - only 64 bit */
if (psw->mask & PSW_MASK_EA) {
if (psw->mask & PSW_MASK_BA)
return true;
else
sprintf(str, "??");
} else {
if (psw->mask & PSW_MASK_BA)
sprintf(str, "31");
else
sprintf(str, "24");
}
pr_err("Pid %d is %s bit: Only 64 bit tasks are supported\n", pid, str);
return false;
}
/*
* Return current alternate signal stack
*/
int arch_fetch_sas(struct parasite_ctl *ctl, struct rt_sigframe *s)
{
long ret;
int err;
err = compel_syscall(ctl, __NR_sigaltstack, &ret, 0, (unsigned long)&s->uc.uc_stack, 0, 0, 0, 0);
return err ? err : ret;
}
/*
* Find last mapped address of current process
*/
static unsigned long max_mapped_addr(void)
{
unsigned long addr_end, addr_max = 0;
char line[128];
FILE *fp;
fp = fopen("/proc/self/maps", "r");
if (!fp)
goto out;
/* Parse lines like: 3fff415f000-3fff4180000 rw-p 00000000 00:00 0 */
while (fgets(line, sizeof(line), fp)) {
char *ptr;
/* First skip start address */
strtoul(&line[0], &ptr, 16);
addr_end = strtoul(ptr + 1, NULL, 16);
addr_max = max(addr_max, addr_end);
}
fclose(fp);
out:
return addr_max - 1;
}
/*
* Kernel task size level
*
* We have (dynamic) 4 level page tables for 64 bit since linux 2.6.25:
*
* 5a216a2083 ("[S390] Add four level page tables for CONFIG_64BIT=y.")
* 6252d702c5 ("[S390] dynamic page tables.")
*
* The code below is already prepared for future (dynamic) 5 level page tables.
*
* Besides that there is one problematic kernel bug that has been fixed for
* linux 4.11 by the following commit:
*
* ee71d16d22 ("s390/mm: make TASK_SIZE independent from the number
* of page table levels")
*
* A 64 bit process on s390x always starts with 3 levels and upgrades to 4
* levels for mmap(> 4 TB) and to 5 levels for mmap(> 16 EB).
*
* Unfortunately before fix ee71d16d22 for a 3 level process munmap()
* and mremap() fail for addresses > 4 TB. CRIU uses the task size,
* to unmap() all memory from a starting point to task size to get rid of
* unwanted mappings. CRIU uses mremap() to establish the final mappings
* which also fails if we want to restore mappings > 4 TB and the initial
* restore process still runs with 3 levels.
*
* To support the current CRIU design on s390 we return task size = 4 TB when
* a kernel without fix ee71d16d22 is detected. In this case we can dump at
* least processes with < 4 TB which is the most likely case anyway.
*
* For kernels with fix ee71d16d22 we are fully functional.
*/
enum kernel_ts_level {
/* Kernel with 4 level page tables without fix ee71d16d22 */
KERNEL_TS_LEVEL_4_FIX_NO,
/* Kernel with 4 level page tables with fix ee71d16d22 */
KERNEL_TS_LEVEL_4_FIX_YES,
/* Kernel with 4 level page tables with or without fix ee71d16d22 */
KERNEL_TS_LEVEL_4_FIX_UNKN,
/* Kernel with 5 level page tables */
KERNEL_TS_LEVEL_5,
};
/* See arch/s390/include/asm/processor.h */
#define TASK_SIZE_LEVEL_3 0x40000000000UL /* 4 TB */
#define TASK_SIZE_LEVEL_4 0x20000000000000UL /* 8 PB */
#define TASK_SIZE_LEVEL_5 0xffffffffffffefffUL /* 16 EB - 0x1000 */
/*
* Return detected kernel version regarding task size level
*
* We use unmap() to probe the maximum possible page table level of kernel
*/
static enum kernel_ts_level get_kernel_ts_level(void)
{
unsigned long criu_end_addr = max_mapped_addr();
/* Check for 5 levels */
if (criu_end_addr >= TASK_SIZE_LEVEL_4)
return KERNEL_TS_LEVEL_5;
else if (munmap((void *)TASK_SIZE_LEVEL_4, 0x1000) == 0)
return KERNEL_TS_LEVEL_5;
if (criu_end_addr < TASK_SIZE_LEVEL_3) {
/* Check for 4 level kernel with fix */
if (munmap((void *)TASK_SIZE_LEVEL_3, 0x1000) == 0)
return KERNEL_TS_LEVEL_4_FIX_YES;
else
return KERNEL_TS_LEVEL_4_FIX_NO;
}
/* We can't find out if kernel has the fix */
return KERNEL_TS_LEVEL_4_FIX_UNKN;
}
/*
* Log detected level
*/
static void pr_levels(const char *str)
{
pr_debug("Max user page table levels (task size): %s\n", str);
}
/*
* Return last address (+1) of biggest possible user address space for
* current kernel
*/
unsigned long compel_task_size(void)
{
switch (get_kernel_ts_level()) {
case KERNEL_TS_LEVEL_4_FIX_NO:
pr_levels("KERNEL_TS_LEVEL_4_FIX_NO");
return TASK_SIZE_LEVEL_3;
case KERNEL_TS_LEVEL_4_FIX_YES:
pr_levels("KERNEL_TS_LEVEL_4_FIX_YES");
return TASK_SIZE_LEVEL_4;
case KERNEL_TS_LEVEL_4_FIX_UNKN:
pr_levels("KERNEL_TS_LEVEL_4_FIX_UNKN");
return TASK_SIZE_LEVEL_3;
default: /* KERNEL_TS_LEVEL_5 */
pr_levels("KERNEL_TS_LEVEL_5");
return TASK_SIZE_LEVEL_5;
}
}
/*
* Get task registers (overwrites weak function)
*/
int ptrace_get_regs(int pid, user_regs_struct_t *regs)
{
struct iovec iov;
int rc;
pr_debug("ptrace_get_regs: pid=%d\n", pid);
iov.iov_base = ®s->prstatus;
iov.iov_len = sizeof(regs->prstatus);
rc = ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov);
if (rc != 0)
return rc;
iov.iov_base = ®s->system_call;
iov.iov_len = sizeof(regs->system_call);
return ptrace(PTRACE_GETREGSET, pid, NT_S390_SYSTEM_CALL, &iov);
}
/*
* Set task registers (overwrites weak function)
*/
int ptrace_set_regs(int pid, user_regs_struct_t *regs)
{
uint32_t system_call = 0;
struct iovec iov;
int rc;
pr_debug("ptrace_set_regs: pid=%d\n", pid);
iov.iov_base = ®s->prstatus;
iov.iov_len = sizeof(regs->prstatus);
rc = ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov);
if (rc)
return rc;
/*
* If we attached to an inferior that is sleeping in a restarting
* system call like futex_wait(), we have to reset the system_call
* to 0. Otherwise the kernel would try to finish the interrupted
* system call after PTRACE_CONT and we could not run the
* parasite code.
*/
iov.iov_base = &system_call;
iov.iov_len = sizeof(system_call);
return ptrace(PTRACE_SETREGSET, pid, NT_S390_SYSTEM_CALL, &iov);
}
| 19,770 | 25.862772 | 115 |
c
|
criu
|
criu-master/compel/arch/s390/src/lib/include/uapi/asm/infect-types.h
|
#ifndef UAPI_COMPEL_ASM_TYPES_H__
#define UAPI_COMPEL_ASM_TYPES_H__
#include <stdint.h>
#include <signal.h>
#include <sys/mman.h>
#include <asm/ptrace.h>
#include "common/page.h"
#define SIGMAX 64
#define SIGMAX_OLD 31
/*
* Definitions from /usr/include/asm/ptrace.h:
*
* typedef struct
* {
* __u32 fpc;
* freg_t fprs[NUM_FPRS];
* } s390_fp_regs;
*
* typedef struct
* {
* psw_t psw;
* unsigned long gprs[NUM_GPRS];
* unsigned int acrs[NUM_ACRS];
* unsigned long orig_gpr2;
* } s390_regs;
*/
typedef struct {
uint64_t part1;
uint64_t part2;
} vector128_t;
struct prfpreg {
uint32_t fpc;
uint64_t fprs[16];
};
#define USER_FPREGS_VXRS 0x000000001
/* Guarded-storage control block */
#define USER_GS_CB 0x000000002
/* Guarded-storage broadcast control block */
#define USER_GS_BC 0x000000004
/* Runtime-instrumentation control block */
#define USER_RI_CB 0x000000008
/* Runtime-instrumentation bit set */
#define USER_RI_ON 0x000000010
typedef struct {
uint32_t flags;
struct prfpreg prfpreg;
uint64_t vxrs_low[16];
vector128_t vxrs_high[16];
uint64_t gs_cb[4];
uint64_t gs_bc[4];
uint64_t ri_cb[8];
} user_fpregs_struct_t;
typedef struct {
s390_regs prstatus;
uint32_t system_call;
} user_regs_struct_t;
#define REG_RES(r) ((uint64_t)(r).prstatus.gprs[2])
#define REG_IP(r) ((uint64_t)(r).prstatus.psw.addr)
#define SET_REG_IP(r, val) ((r).prstatus.psw.addr = (val))
#define REG_SP(r) ((uint64_t)(r).prstatus.gprs[15])
/*
* We assume that REG_SYSCALL_NR() is only used for pie code where we
* always use svc 0 with opcode in %r1.
*/
#define REG_SYSCALL_NR(r) ((uint64_t)(r).prstatus.gprs[1])
#define user_regs_native(pregs) true
#define __NR(syscall, compat) \
({ \
(void)compat; \
__NR_##syscall; \
})
struct mmap_arg_struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
};
#define __compel_arch_fetch_thread_area(tid, th) 0
#define compel_arch_fetch_thread_area(tctl) 0
#define compel_arch_get_tls_task(ctl, tls)
#define compel_arch_get_tls_thread(tctl, tls)
#endif /* UAPI_COMPEL_ASM_TYPES_H__ */
| 2,209 | 21.55102 | 69 |
h
|
criu
|
criu-master/compel/arch/s390/src/lib/include/uapi/asm/sigframe.h
|
#ifndef UAPI_COMPEL_ASM_SIGFRAME_H__
#define UAPI_COMPEL_ASM_SIGFRAME_H__
#include <asm/ptrace.h>
#include <asm/types.h>
#include <signal.h>
#include <stdint.h>
// XXX: the identifier rt_sigcontext is expected to be struct by the CRIU code
#define rt_sigcontext sigcontext
#include <compel/sigframe-common.h>
#define RT_SIGFRAME_OFFSET(rt_sigframe) 0
/*
* From /usr/include/asm/sigcontext.h
*
* Redefine _sigregs_ext to be able to compile on older systems
*/
#ifndef __NUM_VXRS_LOW
typedef struct {
__u32 u[4];
} __vector128;
typedef struct {
unsigned long long vxrs_low[16];
__vector128 vxrs_high[16];
unsigned char __reserved[128];
} _sigregs_ext;
#endif
/*
* From /usr/include/uapi/asm/ucontext.h
*/
struct ucontext_extended {
unsigned long uc_flags;
ucontext_t *uc_link;
stack_t uc_stack;
_sigregs uc_mcontext;
sigset_t uc_sigmask;
/* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
unsigned char __unused[128 - sizeof(sigset_t)];
_sigregs_ext uc_mcontext_ext;
};
/*
* Signal stack frame for RT sigreturn
*/
struct rt_sigframe {
uint8_t callee_used_stack[160];
uint8_t retcode[2];
siginfo_t info;
struct ucontext_extended uc;
};
/*
* Do rt_sigreturn SVC
*/
/* clang-format off */
#define ARCH_RT_SIGRETURN(new_sp, rt_sigframe) \
asm volatile( \
"lgr %%r15,%0\n" \
"lghi %%r1,173\n" \
"svc 0\n" \
: \
: "d" (new_sp) \
: "memory")
/* clang-format on */
#define RT_SIGFRAME_UC(rt_sigframe) (&rt_sigframe->uc)
#define RT_SIGFRAME_REGIP(rt_sigframe) (rt_sigframe)->uc.uc_mcontext.regs.psw.addr
#define RT_SIGFRAME_HAS_FPU(rt_sigframe) (1)
#define rt_sigframe_erase_sigset(sigframe) memset(&sigframe->uc.uc_sigmask, 0, sizeof(k_rtsigset_t))
#define rt_sigframe_copy_sigset(sigframe, from) memcpy(&sigframe->uc.uc_sigmask, from, sizeof(k_rtsigset_t))
#endif /* UAPI_COMPEL_ASM_SIGFRAME_H__ */
| 1,889 | 22.333333 | 108 |
h
|
criu
|
criu-master/compel/arch/x86/plugins/include/asm/syscall-types.h
|
#ifndef COMPEL_ARCH_SYSCALL_TYPES_H__
#define COMPEL_ARCH_SYSCALL_TYPES_H__
/* Types for sigaction, sigprocmask syscalls */
typedef void rt_signalfn_t(int, siginfo_t *, void *);
typedef rt_signalfn_t *rt_sighandler_t;
typedef void rt_restorefn_t(void);
typedef rt_restorefn_t *rt_sigrestore_t;
#define SA_RESTORER 0x04000000
#define _KNSIG 64
#define _NSIG_BPW 64
#define _KNSIG_WORDS (_KNSIG / _NSIG_BPW)
/*
* Note: as k_rtsigset_t is the same size for 32-bit and 64-bit,
* sig defined as uint64_t rather than (unsigned long) - for the
* purpose if we ever going to support native 32-bit compilation.
*/
typedef struct {
uint64_t sig[_KNSIG_WORDS];
} k_rtsigset_t;
typedef struct {
rt_sighandler_t rt_sa_handler;
unsigned long rt_sa_flags;
rt_sigrestore_t rt_sa_restorer;
k_rtsigset_t rt_sa_mask;
} rt_sigaction_t;
/*
* Note: there is unaligned access on x86_64 and it's fine.
* However, when porting this code -- keep in mind about possible issues
* with unaligned rt_sa_mask.
*/
typedef struct __attribute__((packed)) {
unsigned int rt_sa_handler;
unsigned int rt_sa_flags;
unsigned int rt_sa_restorer;
k_rtsigset_t rt_sa_mask;
} rt_sigaction_t_compat;
/* Types for set_thread_area, get_thread_area syscalls */
typedef struct {
unsigned int entry_number;
unsigned int base_addr;
unsigned int limit;
unsigned int seg_32bit : 1;
unsigned int contents : 2;
unsigned int read_exec_only : 1;
unsigned int limit_in_pages : 1;
unsigned int seg_not_present : 1;
unsigned int usable : 1;
unsigned int lm : 1;
} user_desc_t;
#endif /* COMPEL_ARCH_SYSCALL_TYPES_H__ */
| 1,600 | 25.245902 | 72 |
h
|
criu
|
criu-master/compel/arch/x86/plugins/std/syscalls/syscall32.c
|
#include "asm/types.h"
#include "syscall-32.h"
#define SYS_SOCKET 1 /* sys_socket(2) */
#define SYS_BIND 2 /* sys_bind(2) */
#define SYS_CONNECT 3 /* sys_connect(2) */
#define SYS_SENDTO 11 /* sys_sendto(2) */
#define SYS_RECVFROM 12 /* sys_recvfrom(2) */
#define SYS_SHUTDOWN 13 /* sys_shutdown(2) */
#define SYS_SETSOCKOPT 14 /* sys_setsockopt(2) */
#define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */
#define SYS_SENDMSG 16 /* sys_sendmsg(2) */
#define SYS_RECVMSG 17 /* sys_recvmsg(2) */
long sys_socket(int domain, int type, int protocol)
{
uint32_t a[] = { (uint32_t)domain, (uint32_t)type, (uint32_t)protocol };
return sys_socketcall(SYS_SOCKET, (unsigned long *)a);
}
long sys_connect(int sockfd, struct sockaddr *addr, int addrlen)
{
uint32_t a[] = { (uint32_t)sockfd, (uint32_t)addr, (uint32_t)addrlen };
return sys_socketcall(SYS_CONNECT, (unsigned long *)a);
}
long sys_sendto(int sockfd, void *buff, size_t len, unsigned int flags, struct sockaddr *addr, int addr_len)
{
uint32_t a[] = { (uint32_t)sockfd, (uint32_t)buff, (uint32_t)len,
(uint32_t)flags, (uint32_t)addr, (uint32_t)addr_len };
return sys_socketcall(SYS_SENDTO, (unsigned long *)a);
}
long sys_recvfrom(int sockfd, void *ubuf, size_t size, unsigned int flags, struct sockaddr *addr, int *addr_len)
{
uint32_t a[] = { (uint32_t)sockfd, (uint32_t)ubuf, (uint32_t)size,
(uint32_t)flags, (uint32_t)addr, (uint32_t)addr_len };
return sys_socketcall(SYS_RECVFROM, (unsigned long *)a);
}
long sys_sendmsg(int sockfd, const struct msghdr *msg, int flags)
{
uint32_t a[] = { (uint32_t)sockfd, (uint32_t)msg, (uint32_t)flags };
return sys_socketcall(SYS_SENDMSG, (unsigned long *)a);
}
long sys_recvmsg(int sockfd, struct msghdr *msg, int flags)
{
uint32_t a[] = { (uint32_t)sockfd, (uint32_t)msg, (uint32_t)flags };
return sys_socketcall(SYS_RECVMSG, (unsigned long *)a);
}
long sys_shutdown(int sockfd, int how)
{
uint32_t a[] = { (uint32_t)sockfd, (uint32_t)how };
return sys_socketcall(SYS_SHUTDOWN, (unsigned long *)a);
}
long sys_bind(int sockfd, const struct sockaddr *addr, int addrlen)
{
uint32_t a[] = { (uint32_t)sockfd, (uint32_t)addr, (uint32_t)addrlen };
return sys_socketcall(SYS_BIND, (unsigned long *)a);
}
long sys_setsockopt(int sockfd, int level, int optname, const void *optval, unsigned int optlen)
{
uint32_t a[] = { (uint32_t)sockfd, (uint32_t)level, (uint32_t)optname, (uint32_t)optval, (uint32_t)optlen };
return sys_socketcall(SYS_SETSOCKOPT, (unsigned long *)a);
}
long sys_getsockopt(int sockfd, int level, int optname, const void *optval, unsigned int *optlen)
{
uint32_t a[] = { (uint32_t)sockfd, (uint32_t)level, (uint32_t)optname, (uint32_t)optval, (uint32_t)optlen };
return sys_socketcall(SYS_GETSOCKOPT, (unsigned long *)a);
}
#define SHMAT 21
long sys_shmat(int shmid, void *shmaddr, int shmflag)
{
return sys_ipc(SHMAT, shmid, shmflag, 0, shmaddr, 0);
}
long sys_pread(unsigned int fd, char *ubuf, uint32_t count, uint64_t pos)
{
return sys_pread64(fd, ubuf, count, (uint32_t)(pos & 0xffffffffu), (uint32_t)(pos >> 32));
}
| 3,113 | 34.386364 | 112 |
c
|
criu
|
criu-master/compel/arch/x86/src/lib/cpu.c
|
#include <string.h>
#include <stdbool.h>
#include "compel-cpu.h"
#include "common/bitops.h"
#include "common/compiler.h"
#include "log.h"
#include "common/bug.h"
#undef LOG_PREFIX
#define LOG_PREFIX "cpu: "
static compel_cpuinfo_t rt_info;
static void fetch_rt_cpuinfo(void)
{
static bool rt_info_done = false;
if (!rt_info_done) {
compel_cpuid(&rt_info);
rt_info_done = true;
}
}
/*
* Although we spell it out in here, the Processor Trace
* xfeature is completely unused. We use other mechanisms
* to save/restore PT state in Linux.
*/
static const char *const xfeature_names[] = {
"x87 floating point registers",
"SSE registers",
"AVX registers",
"MPX bounds registers",
"MPX CSR",
"AVX-512 opmask",
"AVX-512 Hi256",
"AVX-512 ZMM_Hi256",
"Processor Trace",
"Protection Keys User registers",
"Hardware Duty Cycling",
};
static short xsave_cpuid_features[] = {
X86_FEATURE_FPU, X86_FEATURE_XMM, X86_FEATURE_AVX, X86_FEATURE_MPX,
X86_FEATURE_MPX, X86_FEATURE_AVX512F, X86_FEATURE_AVX512F, X86_FEATURE_AVX512F,
X86_FEATURE_INTEL_PT, X86_FEATURE_PKU, X86_FEATURE_HDC,
};
void compel_set_cpu_cap(compel_cpuinfo_t *c, unsigned int feature)
{
if (likely(feature < NCAPINTS_BITS))
set_bit(feature, (unsigned long *)c->x86_capability);
}
void compel_clear_cpu_cap(compel_cpuinfo_t *c, unsigned int feature)
{
if (likely(feature < NCAPINTS_BITS))
clear_bit(feature, (unsigned long *)c->x86_capability);
}
int compel_test_cpu_cap(compel_cpuinfo_t *c, unsigned int feature)
{
if (likely(feature < NCAPINTS_BITS))
return test_bit(feature, (unsigned long *)c->x86_capability);
return 0;
}
int compel_test_fpu_cap(compel_cpuinfo_t *c, unsigned int feature)
{
if (likely(feature < XFEATURE_MAX))
return (c->xfeatures_mask & (1UL << feature));
return 0;
}
static int compel_fpuid(compel_cpuinfo_t *c)
{
unsigned int last_good_offset;
uint32_t eax, ebx, ecx, edx;
size_t i;
BUILD_BUG_ON(ARRAY_SIZE(xsave_cpuid_features) != ARRAY_SIZE(xfeature_names));
if (!compel_test_cpu_cap(c, X86_FEATURE_FPU)) {
pr_err("fpu: No FPU detected\n");
return -1;
}
if (!compel_test_cpu_cap(c, X86_FEATURE_XSAVE)) {
pr_info("fpu: x87 FPU will use %s\n", compel_test_cpu_cap(c, X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE");
return 0;
}
cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
c->xfeatures_mask = eax + ((uint64_t)edx << 32);
if ((c->xfeatures_mask & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
/*
* This indicates that something really unexpected happened
* with the enumeration.
*/
pr_err("fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx\n",
(unsigned long long)c->xfeatures_mask);
return -1;
}
/*
* Clear XSAVE features that are disabled in the normal CPUID.
*/
for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) {
if (!compel_test_cpu_cap(c, xsave_cpuid_features[i]))
c->xfeatures_mask &= ~(1 << i);
}
c->xfeatures_mask &= XFEATURE_MASK_USER;
c->xfeatures_mask &= ~XFEATURE_MASK_SUPERVISOR;
/*
* xsaves is not enabled in userspace, so
* xsaves is mostly for debug purpose.
*/
cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
c->xsave_size = ebx;
c->xsave_size_max = ecx;
cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
c->xsaves_size = ebx;
pr_debug("fpu: xfeatures_mask 0x%llx xsave_size %u xsave_size_max %u xsaves_size %u\n",
(unsigned long long)c->xfeatures_mask, c->xsave_size, c->xsave_size_max, c->xsaves_size);
if (c->xsave_size_max > sizeof(struct xsave_struct))
pr_warn_once("fpu: max xsave frame exceed xsave_struct (%u %u)\n", c->xsave_size_max,
(unsigned)sizeof(struct xsave_struct));
memset(c->xstate_offsets, 0xff, sizeof(c->xstate_offsets));
memset(c->xstate_sizes, 0xff, sizeof(c->xstate_sizes));
memset(c->xstate_comp_offsets, 0xff, sizeof(c->xstate_comp_offsets));
memset(c->xstate_comp_sizes, 0xff, sizeof(c->xstate_comp_sizes));
/* start at the beginning of the "extended state" */
last_good_offset = offsetof(struct xsave_struct, extended_state_area);
/*
* The FP xstates and SSE xstates are legacy states. They are always
* in the fixed offsets in the xsave area in either compacted form
* or standard form.
*/
c->xstate_offsets[0] = 0;
c->xstate_sizes[0] = offsetof(struct i387_fxsave_struct, xmm_space);
c->xstate_offsets[1] = c->xstate_sizes[0];
c->xstate_sizes[1] = FIELD_SIZEOF(struct i387_fxsave_struct, xmm_space);
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
if (!(c->xfeatures_mask & (1UL << i)))
continue;
/*
* If an xfeature is supervisor state, the offset
* in EBX is invalid. We leave it to -1.
*
* SDM says: If state component 'i' is a user state component,
* ECX[0] return 0; if state component i is a supervisor
* state component, ECX[0] returns 1.
*/
cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
if (!(ecx & 1))
c->xstate_offsets[i] = ebx;
c->xstate_sizes[i] = eax;
/*
* In our xstate size checks, we assume that the
* highest-numbered xstate feature has the
* highest offset in the buffer. Ensure it does.
*/
if (last_good_offset > c->xstate_offsets[i])
pr_warn_once("fpu: misordered xstate %d %d\n", last_good_offset, c->xstate_offsets[i]);
last_good_offset = c->xstate_offsets[i];
}
BUILD_BUG_ON(sizeof(c->xstate_offsets) != sizeof(c->xstate_sizes));
BUILD_BUG_ON(sizeof(c->xstate_comp_offsets) != sizeof(c->xstate_comp_sizes));
c->xstate_comp_offsets[0] = 0;
c->xstate_comp_sizes[0] = offsetof(struct i387_fxsave_struct, xmm_space);
c->xstate_comp_offsets[1] = c->xstate_comp_sizes[0];
c->xstate_comp_sizes[1] = FIELD_SIZEOF(struct i387_fxsave_struct, xmm_space);
if (!compel_test_cpu_cap(c, X86_FEATURE_XSAVES)) {
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
if ((c->xfeatures_mask & (1UL << i))) {
c->xstate_comp_offsets[i] = c->xstate_offsets[i];
c->xstate_comp_sizes[i] = c->xstate_sizes[i];
}
}
} else {
c->xstate_comp_offsets[FIRST_EXTENDED_XFEATURE] = FXSAVE_SIZE + XSAVE_HDR_SIZE;
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
if ((c->xfeatures_mask & (1UL << i)))
c->xstate_comp_sizes[i] = c->xstate_sizes[i];
else
c->xstate_comp_sizes[i] = 0;
if (i > FIRST_EXTENDED_XFEATURE) {
c->xstate_comp_offsets[i] = c->xstate_comp_offsets[i - 1] + c->xstate_comp_sizes[i - 1];
/*
* The value returned by ECX[1] indicates the alignment
* of state component 'i' when the compacted format
* of the extended region of an XSAVE area is used:
*/
cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
if (ecx & 2)
c->xstate_comp_offsets[i] = ALIGN(c->xstate_comp_offsets[i], 64);
}
}
}
if (!pr_quelled(COMPEL_LOG_DEBUG)) {
for (i = 0; i < ARRAY_SIZE(c->xstate_offsets); i++) {
if (!(c->xfeatures_mask & (1UL << i)))
continue;
pr_debug("fpu: %-32s xstate_offsets %6d / %-6d xstate_sizes %6d / %-6d\n", xfeature_names[i],
c->xstate_offsets[i], c->xstate_comp_offsets[i], c->xstate_sizes[i],
c->xstate_comp_sizes[i]);
}
}
return 0;
}
int compel_cpuid(compel_cpuinfo_t *c)
{
uint32_t eax, ebx, ecx, edx;
/*
* See cpu_detect() in the kernel, also
* read cpuid specs not only from general
* SDM but for extended instructions set
* reference.
*/
/* Get vendor name */
cpuid(0x00000000, (unsigned int *)&c->cpuid_level, (unsigned int *)&c->x86_vendor_id[0],
(unsigned int *)&c->x86_vendor_id[8], (unsigned int *)&c->x86_vendor_id[4]);
if (!strcmp(c->x86_vendor_id, "GenuineIntel")) {
c->x86_vendor = X86_VENDOR_INTEL;
} else if (!strcmp(c->x86_vendor_id, "AuthenticAMD") || !strcmp(c->x86_vendor_id, "HygonGenuine")) {
c->x86_vendor = X86_VENDOR_AMD;
} else {
pr_err("Unsupported CPU vendor %s\n", c->x86_vendor_id);
return -1;
}
c->x86_family = 4;
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) {
cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
c->x86_family = (eax >> 8) & 0xf;
c->x86_model = (eax >> 4) & 0xf;
c->x86_mask = eax & 0xf;
if (c->x86_family == 0xf)
c->x86_family += (eax >> 20) & 0xff;
if (c->x86_family >= 0x6)
c->x86_model += ((eax >> 16) & 0xf) << 4;
c->x86_capability[CPUID_1_EDX] = edx;
c->x86_capability[CPUID_1_ECX] = ecx;
}
/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
if (c->cpuid_level >= 0x00000006)
c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
/* Additional Intel-defined flags: level 0x00000007 */
if (c->cpuid_level >= 0x00000007) {
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_7_0_EBX] = ebx;
c->x86_capability[CPUID_7_0_ECX] = ecx;
c->x86_capability[CPUID_7_0_EDX] = edx;
}
/* Extended state features: level 0x0000000d */
if (c->cpuid_level >= 0x0000000d) {
cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_D_1_EAX] = eax;
}
/* Additional Intel-defined flags: level 0x0000000F */
if (c->cpuid_level >= 0x0000000F) {
/* QoS sub-leaf, EAX=0Fh, ECX=0 */
cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_F_0_EDX] = edx;
if (compel_test_cpu_cap(c, X86_FEATURE_CQM_LLC)) {
/* QoS sub-leaf, EAX=0Fh, ECX=1 */
cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_F_1_EDX] = edx;
}
}
/* AMD-defined flags: level 0x80000001 */
eax = cpuid_eax(0x80000000);
c->extended_cpuid_level = eax;
if ((eax & 0xffff0000) == 0x80000000) {
if (eax >= 0x80000001) {
cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_8000_0001_ECX] = ecx;
c->x86_capability[CPUID_8000_0001_EDX] = edx;
}
}
/*
* We're don't care about scattered features for now,
* otherwise look into init_scattered_cpuid_features()
* in kernel.
*
* Same applies to speculation control. Look into
* init_speculation_control() otherwise.
*/
if (c->extended_cpuid_level >= 0x80000004) {
unsigned int *v;
char *p, *q;
v = (unsigned int *)c->x86_model_id;
cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
c->x86_model_id[48] = 0;
/*
* Intel chips right-justify this string for some dumb reason;
* undo that brain damage:
*/
p = q = &c->x86_model_id[0];
while (*p == ' ')
p++;
if (p != q) {
while (*p)
*q++ = *p++;
while (q <= &c->x86_model_id[48])
*q++ = '\0'; /* Zero-pad the rest */
}
}
if (c->extended_cpuid_level >= 0x80000007) {
cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_8000_0007_EBX] = ebx;
c->x86_power = edx;
}
if (c->extended_cpuid_level >= 0x8000000a)
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
if (c->extended_cpuid_level >= 0x80000008)
c->x86_capability[CPUID_8000_0008_EBX] = cpuid_ebx(0x80000008);
/* On x86-64 CPUID is always present */
compel_set_cpu_cap(c, X86_FEATURE_CPUID);
/* On x86-64 NOP is always present */
compel_set_cpu_cap(c, X86_FEATURE_NOPL);
/*
* On x86-64 syscalls32 are enabled but we don't
* set it yet for backward compatibility reason
*/
//compel_set_cpu_cap(c, X86_FEATURE_SYSCALL32);
/* See filter_cpuid_features in kernel */
if ((int32_t)c->cpuid_level < (int32_t)0x0000000d)
compel_clear_cpu_cap(c, X86_FEATURE_XSAVE);
/*
* We only care about small subset from c_early_init:
* early_init_amd and early_init_intel
*/
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
/*
* Strictly speaking we need to read MSR_IA32_MISC_ENABLE
* here but on ring3 it's impossible.
*/
if (c->x86_family == 15) {
compel_clear_cpu_cap(c, X86_FEATURE_REP_GOOD);
compel_clear_cpu_cap(c, X86_FEATURE_ERMS);
} else if (c->x86_family == 6) {
/* On x86-64 rep is fine */
compel_set_cpu_cap(c, X86_FEATURE_REP_GOOD);
}
break;
case X86_VENDOR_AMD:
/*
* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
* 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
*/
compel_clear_cpu_cap(c, 0 * 32 + 31);
if (c->x86_family >= 0x10)
compel_set_cpu_cap(c, X86_FEATURE_REP_GOOD);
if (c->x86_family == 0xf) {
uint32_t level;
/* On C+ stepping K8 rep microcode works well for copy/memset */
level = cpuid_eax(1);
if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
compel_set_cpu_cap(c, X86_FEATURE_REP_GOOD);
}
break;
}
pr_debug("x86_family %u x86_vendor_id %s x86_model_id %s\n", c->x86_family, c->x86_vendor_id, c->x86_model_id);
return compel_fpuid(c);
}
bool compel_cpu_has_feature(unsigned int feature)
{
fetch_rt_cpuinfo();
return compel_test_cpu_cap(&rt_info, feature);
}
bool compel_fpu_has_feature(unsigned int feature)
{
fetch_rt_cpuinfo();
return compel_test_fpu_cap(&rt_info, feature);
}
uint32_t compel_fpu_feature_size(unsigned int feature)
{
fetch_rt_cpuinfo();
if (feature >= FIRST_EXTENDED_XFEATURE && feature < XFEATURE_MAX)
return rt_info.xstate_sizes[feature];
return 0;
}
uint32_t compel_fpu_feature_offset(unsigned int feature)
{
fetch_rt_cpuinfo();
if (feature >= FIRST_EXTENDED_XFEATURE && feature < XFEATURE_MAX)
return rt_info.xstate_offsets[feature];
return 0;
}
void compel_cpu_clear_feature(unsigned int feature)
{
fetch_rt_cpuinfo();
return compel_clear_cpu_cap(&rt_info, feature);
}
void compel_cpu_copy_cpuinfo(compel_cpuinfo_t *c)
{
fetch_rt_cpuinfo();
memcpy(c, &rt_info, sizeof(rt_info));
}
| 13,462 | 27.828694 | 112 |
c
|
criu
|
criu-master/compel/arch/x86/src/lib/handle-elf.c
|
#include <string.h>
#include <errno.h>
#include "handle-elf.h"
#include "piegen.h"
#include "log.h"
static const unsigned char __maybe_unused elf_ident_64_le[EI_NIDENT] = {
0x7f, 0x45, 0x4c, 0x46, 0x02, 0x01, 0x01, 0x00, /* clang-format */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
int handle_binary(void *mem, size_t size)
{
if (memcmp(mem, elf_ident_64_le, sizeof(elf_ident_64_le)) == 0)
return handle_elf_x86_64(mem, size);
pr_err("Unsupported Elf format detected\n");
return -EINVAL;
}
| 510 | 23.333333 | 72 |
c
|
criu
|
criu-master/compel/arch/x86/src/lib/infect.c
|
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/auxv.h>
#include <sys/mman.h>
#include <sys/user.h>
#include <errno.h>
#include <stdlib.h>
#include <time.h>
#include <compel/asm/fpu.h>
#include "asm/cpu.h"
#include <compel/asm/processor-flags.h>
#include <compel/cpu.h>
#include "errno.h"
#include <compel/plugins/std/syscall-codes.h>
#include <compel/plugins/std/syscall.h>
#include "common/err.h"
#include "asm/infect-types.h"
#include "ptrace.h"
#include "infect.h"
#include "infect-priv.h"
#include "log.h"
#ifndef NT_X86_XSTATE
#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
#endif
#ifndef NT_PRSTATUS
#define NT_PRSTATUS 1 /* Contains copy of prstatus struct */
#endif
/*
* Injected syscall instruction
*/
const char code_syscall[] = {
0x0f, 0x05, /* syscall */
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc /* int 3, ... */
};
const char code_int_80[] = {
0xcd, 0x80, /* int $0x80 */
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc /* int 3, ... */
};
static const int code_syscall_aligned = round_up(sizeof(code_syscall), sizeof(long));
static const int code_int_80_aligned = round_up(sizeof(code_syscall), sizeof(long));
static inline __always_unused void __check_code_syscall(void)
{
BUILD_BUG_ON(code_int_80_aligned != BUILTIN_SYSCALL_SIZE);
BUILD_BUG_ON(code_syscall_aligned != BUILTIN_SYSCALL_SIZE);
BUILD_BUG_ON(!is_log2(sizeof(code_syscall)));
}
/* 10-byte legacy floating point register */
struct fpreg {
uint16_t significand[4];
uint16_t exponent;
};
/* 16-byte floating point register */
struct fpxreg {
uint16_t significand[4];
uint16_t exponent;
uint16_t padding[3];
};
#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n)*16)
#define FP_EXP_TAG_VALID 0
#define FP_EXP_TAG_ZERO 1
#define FP_EXP_TAG_SPECIAL 2
#define FP_EXP_TAG_EMPTY 3
static inline uint32_t twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
{
struct fpxreg *st;
uint32_t tos = (fxsave->swd >> 11) & 7;
uint32_t twd = (unsigned long)fxsave->twd;
uint32_t tag;
uint32_t ret = 0xffff0000u;
int i;
for (i = 0; i < 8; i++, twd >>= 1) {
if (twd & 0x1) {
st = FPREG_ADDR(fxsave, (i - tos) & 7);
switch (st->exponent & 0x7fff) {
case 0x7fff:
tag = FP_EXP_TAG_SPECIAL;
break;
case 0x0000:
if (!st->significand[0] && !st->significand[1] && !st->significand[2] &&
!st->significand[3])
tag = FP_EXP_TAG_ZERO;
else
tag = FP_EXP_TAG_SPECIAL;
break;
default:
if (st->significand[3] & 0x8000)
tag = FP_EXP_TAG_VALID;
else
tag = FP_EXP_TAG_SPECIAL;
break;
}
} else {
tag = FP_EXP_TAG_EMPTY;
}
ret |= tag << (2 * i);
}
return ret;
}
void compel_convert_from_fxsr(struct user_i387_ia32_struct *env, struct i387_fxsave_struct *fxsave)
{
struct fpxreg *from = (struct fpxreg *)&fxsave->st_space[0];
struct fpreg *to = (struct fpreg *)env->st_space;
int i;
env->cwd = fxsave->cwd | 0xffff0000u;
env->swd = fxsave->swd | 0xffff0000u;
env->twd = twd_fxsr_to_i387(fxsave);
env->fip = fxsave->rip;
env->foo = fxsave->rdp;
/*
* should be actually ds/cs at fpu exception time, but
* that information is not available in 64bit mode.
*/
env->fcs = 0x23; /* __USER32_CS */
env->fos = 0x2b; /* __USER32_DS */
env->fos |= 0xffff0000;
for (i = 0; i < 8; ++i)
memcpy(&to[i], &from[i], sizeof(to[0]));
}
int sigreturn_prep_regs_plain(struct rt_sigframe *sigframe, user_regs_struct_t *regs, user_fpregs_struct_t *fpregs)
{
bool is_native = user_regs_native(regs);
fpu_state_t *fpu_state = is_native ? &sigframe->native.fpu_state : &sigframe->compat.fpu_state;
if (is_native) {
#define cpreg64_native(d, s) sigframe->native.uc.uc_mcontext.d = regs->native.s
cpreg64_native(rdi, di);
cpreg64_native(rsi, si);
cpreg64_native(rbp, bp);
cpreg64_native(rsp, sp);
cpreg64_native(rbx, bx);
cpreg64_native(rdx, dx);
cpreg64_native(rcx, cx);
cpreg64_native(rip, ip);
cpreg64_native(rax, ax);
cpreg64_native(r8, r8);
cpreg64_native(r9, r9);
cpreg64_native(r10, r10);
cpreg64_native(r11, r11);
cpreg64_native(r12, r12);
cpreg64_native(r13, r13);
cpreg64_native(r14, r14);
cpreg64_native(r15, r15);
cpreg64_native(cs, cs);
cpreg64_native(eflags, flags);
sigframe->is_native = true;
#undef cpreg64_native
} else {
#define cpreg32_compat(d) sigframe->compat.uc.uc_mcontext.d = regs->compat.d
cpreg32_compat(gs);
cpreg32_compat(fs);
cpreg32_compat(es);
cpreg32_compat(ds);
cpreg32_compat(di);
cpreg32_compat(si);
cpreg32_compat(bp);
cpreg32_compat(sp);
cpreg32_compat(bx);
cpreg32_compat(dx);
cpreg32_compat(cx);
cpreg32_compat(ip);
cpreg32_compat(ax);
cpreg32_compat(cs);
cpreg32_compat(ss);
cpreg32_compat(flags);
#undef cpreg32_compat
sigframe->is_native = false;
}
fpu_state->has_fpu = true;
if (is_native) {
memcpy(&fpu_state->fpu_state_64.xsave, fpregs, sizeof(*fpregs));
} else {
memcpy(&fpu_state->fpu_state_ia32.xsave, fpregs, sizeof(*fpregs));
compel_convert_from_fxsr(&fpu_state->fpu_state_ia32.fregs_state.i387_ia32,
&fpu_state->fpu_state_ia32.xsave.i387);
}
return 0;
}
int sigreturn_prep_fpu_frame_plain(struct rt_sigframe *sigframe, struct rt_sigframe *rsigframe)
{
fpu_state_t *fpu_state = (sigframe->is_native) ? &rsigframe->native.fpu_state : &rsigframe->compat.fpu_state;
if (sigframe->is_native) {
unsigned long addr = (unsigned long)(void *)&fpu_state->fpu_state_64.xsave;
if ((addr % 64ul)) {
pr_err("Unaligned address passed: %lx (native %d)\n", addr, sigframe->is_native);
return -1;
}
sigframe->native.uc.uc_mcontext.fpstate = (uint64_t)addr;
} else if (!sigframe->is_native) {
sigframe->compat.uc.uc_mcontext.fpstate = (uint32_t)(unsigned long)(void *)&fpu_state->fpu_state_ia32;
}
return 0;
}
#define get_signed_user_reg(pregs, name) \
((user_regs_native(pregs)) ? (int64_t)((pregs)->native.name) : (int32_t)((pregs)->compat.name))
static int get_task_xsave(pid_t pid, user_fpregs_struct_t *xsave)
{
struct iovec iov;
iov.iov_base = xsave;
iov.iov_len = sizeof(*xsave);
if (ptrace(PTRACE_GETREGSET, pid, (unsigned int)NT_X86_XSTATE, &iov) < 0) {
pr_perror("Can't obtain FPU registers for %d", pid);
return -1;
}
return 0;
}
static int get_task_fpregs(pid_t pid, user_fpregs_struct_t *xsave)
{
if (ptrace(PTRACE_GETFPREGS, pid, NULL, xsave)) {
pr_perror("Can't obtain FPU registers for %d", pid);
return -1;
}
return 0;
}
static inline void fixup_mxcsr(struct xsave_struct *xsave)
{
/*
* Right now xsave->i387.mxcsr filled with the random garbage,
* let's make it valid by applying mask which allows all
* features, except the denormals-are-zero feature bit.
*
* See also fpu__init_system_mxcsr function:
* https://github.com/torvalds/linux/blob/8cb1ae19/arch/x86/kernel/fpu/init.c#L117
*/
xsave->i387.mxcsr &= 0x0000ffbf;
}
/* See arch/x86/kernel/fpu/xstate.c */
static void validate_random_xstate(struct xsave_struct *xsave)
{
struct xsave_hdr_struct *hdr = &xsave->xsave_hdr;
unsigned int i;
/* No unknown or supervisor features may be set */
hdr->xstate_bv &= XFEATURE_MASK_USER;
hdr->xstate_bv &= ~XFEATURE_MASK_SUPERVISOR;
hdr->xstate_bv &= XFEATURE_MASK_FAULTINJ;
for (i = 0; i < XFEATURE_MAX; i++) {
if (!compel_fpu_has_feature(i))
hdr->xstate_bv &= ~(1 << i);
}
/* Userspace must use the uncompacted format */
hdr->xcomp_bv = 0;
/*
* If 'reserved' is shrunken to add a new field, make sure to validate
* that new field here!
*/
BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
/* No reserved bits may be set */
memset(&hdr->reserved, 0, sizeof(hdr->reserved));
}
/*
* TODO: Put fault-injection under CONFIG_* and move
* extended regset corruption to generic code
*/
static int corrupt_extregs(pid_t pid)
{
bool use_xsave = compel_cpu_has_feature(X86_FEATURE_OSXSAVE);
user_fpregs_struct_t ext_regs;
int *rand_to = (int *)&ext_regs;
unsigned int seed, init_seed;
size_t i;
init_seed = seed = time(NULL);
for (i = 0; i < sizeof(ext_regs) / sizeof(int); i++)
*rand_to++ = rand_r(&seed);
/*
* Error log-level as:
* - not intended to be used outside of testing;
* - zdtm.py will grep it auto-magically from logs
* (and the seed will be known from automatic testing).
*/
pr_err("Corrupting %s for %d, seed %u\n", use_xsave ? "xsave" : "fpuregs", pid, init_seed);
fixup_mxcsr(&ext_regs);
if (!use_xsave) {
if (ptrace(PTRACE_SETFPREGS, pid, NULL, &ext_regs)) {
pr_perror("Can't set FPU registers for %d", pid);
return -1;
}
} else {
struct iovec iov;
validate_random_xstate((void *)&ext_regs);
iov.iov_base = &ext_regs;
iov.iov_len = sizeof(ext_regs);
if (ptrace(PTRACE_SETREGSET, pid, (unsigned int)NT_X86_XSTATE, &iov) < 0) {
pr_perror("Can't set xstate for %d", pid);
return -1;
}
}
return 0;
}
int compel_get_task_regs(pid_t pid, user_regs_struct_t *regs, user_fpregs_struct_t *ext_regs, save_regs_t save,
void *arg, unsigned long flags)
{
user_fpregs_struct_t xsave = {}, *xs = ext_regs ? ext_regs : &xsave;
int ret = -1;
pr_info("Dumping general registers for %d in %s mode\n", pid, user_regs_native(regs) ? "native" : "compat");
/* Did we come from a system call? */
if (get_signed_user_reg(regs, orig_ax) >= 0) {
/* Restart the system call */
switch (get_signed_user_reg(regs, ax)) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
set_user_reg(regs, ax, get_user_reg(regs, orig_ax));
set_user_reg(regs, ip, get_user_reg(regs, ip) - 2);
break;
case -ERESTART_RESTARTBLOCK:
pr_warn("Will restore %d with interrupted system call\n", pid);
set_user_reg(regs, ax, -EINTR);
break;
}
}
if (!compel_cpu_has_feature(X86_FEATURE_FPU))
goto out;
/*
* FPU fetched either via fxsave or via xsave,
* thus decode it accordingly.
*/
pr_info("Dumping GP/FPU registers for %d\n", pid);
if (!compel_cpu_has_feature(X86_FEATURE_OSXSAVE)) {
ret = get_task_fpregs(pid, xs);
} else if (unlikely(flags & INFECT_X86_PTRACE_MXCSR_BUG)) {
/*
* get_task_fpregs() will fill FP state,
* get_task_xsave() will overwrite rightly sse/mmx/etc
*/
pr_warn("Skylake xsave fpu bug workaround used\n");
ret = get_task_fpregs(pid, xs);
if (!ret)
ret = get_task_xsave(pid, xs);
} else {
ret = get_task_xsave(pid, xs);
}
if (!ret && unlikely(flags & INFECT_CORRUPT_EXTREGS))
ret = corrupt_extregs(pid);
if (ret)
goto err;
out:
ret = save(arg, regs, xs);
err:
return ret;
}
int compel_set_task_ext_regs(pid_t pid, user_fpregs_struct_t *ext_regs)
{
struct iovec iov;
pr_info("Restoring GP/FPU registers for %d\n", pid);
if (!compel_cpu_has_feature(X86_FEATURE_OSXSAVE)) {
if (ptrace(PTRACE_SETFPREGS, pid, NULL, ext_regs)) {
pr_perror("Can't set FPU registers for %d", pid);
return -1;
}
return 0;
}
iov.iov_base = ext_regs;
iov.iov_len = sizeof(*ext_regs);
if (ptrace(PTRACE_SETREGSET, pid, (unsigned int)NT_X86_XSTATE, &iov) < 0) {
pr_perror("Can't set FPU registers for %d", pid);
return -1;
}
return 0;
}
int compel_syscall(struct parasite_ctl *ctl, int nr, long *ret, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5, unsigned long arg6)
{
user_regs_struct_t regs = ctl->orig.regs;
bool native = user_regs_native(®s);
int err;
if (native) {
user_regs_struct64 *r = ®s.native;
r->ax = (uint64_t)nr;
r->di = arg1;
r->si = arg2;
r->dx = arg3;
r->r10 = arg4;
r->r8 = arg5;
r->r9 = arg6;
err = compel_execute_syscall(ctl, ®s, code_syscall);
} else {
user_regs_struct32 *r = ®s.compat;
r->ax = (uint32_t)nr;
r->bx = arg1;
r->cx = arg2;
r->dx = arg3;
r->si = arg4;
r->di = arg5;
r->bp = arg6;
err = compel_execute_syscall(ctl, ®s, code_int_80);
}
*ret = native ? (long)get_user_reg(®s, ax) : (int)get_user_reg(®s, ax);
return err;
}
void *remote_mmap(struct parasite_ctl *ctl, void *addr, size_t length, int prot, int flags, int fd, off_t offset)
{
long map;
int err;
bool compat_task = !user_regs_native(&ctl->orig.regs);
err = compel_syscall(ctl, __NR(mmap, compat_task), &map, (unsigned long)addr, length, prot, flags, fd, offset);
if (err < 0)
return NULL;
if (map == -EACCES && (prot & PROT_WRITE) && (prot & PROT_EXEC)) {
pr_warn("mmap(PROT_WRITE | PROT_EXEC) failed for %d, "
"check selinux execmem policy\n",
ctl->rpid);
return NULL;
}
if (IS_ERR_VALUE(map)) {
pr_err("remote mmap() failed: %s\n", strerror(-map));
return NULL;
}
/*
* For compat tasks the address in foreign process
* must lay inside 4 bytes.
*/
if (compat_task)
map &= 0xfffffffful;
return (void *)map;
}
/*
* regs must be inited when calling this function from original context
*/
void parasite_setup_regs(unsigned long new_ip, void *stack, user_regs_struct_t *regs)
{
set_user_reg(regs, ip, new_ip);
if (stack)
set_user_reg(regs, sp, (unsigned long)stack);
/* Avoid end of syscall processing */
set_user_reg(regs, orig_ax, -1);
/* Make sure flags are in known state */
set_user_reg(regs, flags, get_user_reg(regs, flags) & ~(X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_IF));
}
#define USER32_CS 0x23
#define USER_CS 0x33
static bool ldt_task_selectors(pid_t pid)
{
unsigned long cs;
errno = 0;
/*
* Offset of register must be from 64-bit set even for
* compatible tasks. Fix this to support native i386 tasks
*/
cs = ptrace(PTRACE_PEEKUSER, pid, offsetof(user_regs_struct64, cs), 0);
if (errno != 0) {
pr_perror("Can't get CS register for %d", pid);
return -1;
}
return cs != USER_CS && cs != USER32_CS;
}
static int arch_task_compatible(pid_t pid)
{
user_regs_struct_t r;
int ret = ptrace_get_regs(pid, &r);
if (ret)
return -1;
return !user_regs_native(&r);
}
bool arch_can_dump_task(struct parasite_ctl *ctl)
{
pid_t pid = ctl->rpid;
int ret;
ret = arch_task_compatible(pid);
if (ret < 0)
return false;
if (ret && !(ctl->ictx.flags & INFECT_COMPATIBLE)) {
pr_err("Can't dump task %d running in 32-bit mode\n", pid);
return false;
}
if (ldt_task_selectors(pid)) {
pr_err("Can't dump task %d with LDT descriptors\n", pid);
return false;
}
return true;
}
int arch_fetch_sas(struct parasite_ctl *ctl, struct rt_sigframe *s)
{
int native = compel_mode_native(ctl);
void *where = native ? (void *)&s->native.uc.uc_stack : (void *)&s->compat.uc.uc_stack;
long ret;
int err;
err = compel_syscall(ctl, __NR(sigaltstack, !native), &ret, 0, (unsigned long)where, 0, 0, 0, 0);
return err ? err : ret;
}
/* Copied from the gdb header gdb/nat/x86-dregs.h */
/* Debug registers' indices. */
#define DR_FIRSTADDR 0
#define DR_LASTADDR 3
#define DR_NADDR 4 /* The number of debug address registers. */
#define DR_STATUS 6 /* Index of debug status register (DR6). */
#define DR_CONTROL 7 /* Index of debug control register (DR7). */
#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit. */
#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit. */
#define DR_ENABLE_SIZE 2 /* Two enable bits per debug register. */
/* Locally enable the break/watchpoint in the I'th debug register. */
#define X86_DR_LOCAL_ENABLE(i) (1 << (DR_LOCAL_ENABLE_SHIFT + DR_ENABLE_SIZE * (i)))
int ptrace_set_breakpoint(pid_t pid, void *addr)
{
k_rtsigset_t block;
int ret;
/* Set a breakpoint */
if (ptrace(PTRACE_POKEUSER, pid, offsetof(struct user, u_debugreg[DR_FIRSTADDR]), addr)) {
pr_perror("Unable to setup a breakpoint into %d", pid);
return -1;
}
/* Enable the breakpoint */
if (ptrace(PTRACE_POKEUSER, pid, offsetof(struct user, u_debugreg[DR_CONTROL]),
X86_DR_LOCAL_ENABLE(DR_FIRSTADDR))) {
pr_perror("Unable to enable the breakpoint for %d", pid);
return -1;
}
/*
* FIXME(issues/1429): SIGTRAP can't be blocked, otherwise its handler
* will be reset to the default one.
*/
ksigfillset(&block);
ksigdelset(&block, SIGTRAP);
if (ptrace(PTRACE_SETSIGMASK, pid, sizeof(k_rtsigset_t), &block)) {
pr_perror("Can't block signals for %d", pid);
return -1;
}
ret = ptrace(PTRACE_CONT, pid, NULL, NULL);
if (ret) {
pr_perror("Unable to restart the stopped tracee process %d", pid);
return -1;
}
return 1;
}
int ptrace_flush_breakpoints(pid_t pid)
{
/* Disable the breakpoint */
if (ptrace(PTRACE_POKEUSER, pid, offsetof(struct user, u_debugreg[DR_CONTROL]), 0)) {
pr_perror("Unable to disable the breakpoint for %d", pid);
return -1;
}
return 0;
}
int ptrace_get_regs(pid_t pid, user_regs_struct_t *regs)
{
struct iovec iov;
int ret;
iov.iov_base = ®s->native;
iov.iov_len = sizeof(user_regs_struct64);
ret = ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov);
if (ret == -1) {
pr_perror("PTRACE_GETREGSET failed");
return -1;
}
if (iov.iov_len == sizeof(regs->native)) {
regs->__is_native = NATIVE_MAGIC;
return ret;
}
if (iov.iov_len == sizeof(regs->compat)) {
regs->__is_native = COMPAT_MAGIC;
return ret;
}
pr_err("PTRACE_GETREGSET read %zu bytes for pid %d, but native/compat regs sizes are %zu/%zu bytes\n",
iov.iov_len, pid, sizeof(regs->native), sizeof(regs->compat));
return -1;
}
int ptrace_set_regs(pid_t pid, user_regs_struct_t *regs)
{
struct iovec iov;
if (user_regs_native(regs)) {
iov.iov_base = ®s->native;
iov.iov_len = sizeof(user_regs_struct64);
} else {
iov.iov_base = ®s->compat;
iov.iov_len = sizeof(user_regs_struct32);
}
return ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov);
}
#define TASK_SIZE ((1UL << 47) - PAGE_SIZE)
/*
* Task size may be limited to 3G but we need a
* higher limit, because it's backward compatible.
*/
#define TASK_SIZE_IA32 (0xffffe000)
unsigned long compel_task_size(void)
{
return TASK_SIZE;
}
| 17,740 | 24.711594 | 115 |
c
|
criu
|
criu-master/compel/arch/x86/src/lib/thread_area.c
|
#include <errno.h>
#include <string.h>
#include <sys/ptrace.h>
#include <sys/wait.h>
#include "log.h"
#include "asm/infect-types.h"
#include "infect.h"
#include "infect-priv.h"
#ifndef PTRACE_GET_THREAD_AREA
#define PTRACE_GET_THREAD_AREA 25
#endif
/*
* For 64-bit applications, TLS (fs_base for Glibc) is in MSR,
* which are dumped with the help of ptrace() and restored with
* arch_prctl(ARCH_SET_FS/ARCH_SET_GS).
*
* But SET_FS_BASE will update GDT if base pointer fits in 4 bytes.
* Otherwise it will set only MSR, which allows for mixed 64/32-bit
* code to use: 2 MSRs as TLS base _and_ 3 GDT entries.
* Having in sum 5 TLS pointers, 3 of which are four bytes and
* other two eight bytes:
* struct thread_struct {
* struct desc_struct tls_array[3];
* ...
* #ifdef CONFIG_X86_64
* unsigned long fsbase;
* unsigned long gsbase;
* #endif
* ...
* };
*
* Most x86_64 applications don't use GDT, but mixed code (i.e. Wine)
* can use it. Be pessimistic and dump it for 64-bit applications too.
*/
int __compel_arch_fetch_thread_area(int tid, struct thread_ctx *th)
{
bool native_mode = user_regs_native(&th->regs);
tls_t *ptls = &th->tls;
int err, i;
/* Initialise as not present by default */
for (i = 0; i < GDT_ENTRY_TLS_NUM; i++) {
user_desc_t *d = &ptls->desc[i];
memset(d, 0, sizeof(user_desc_t));
d->seg_not_present = 1;
d->entry_number = GDT_ENTRY_TLS_MIN + i;
}
for (i = 0; i < GDT_ENTRY_TLS_NUM; i++) {
user_desc_t *d = &ptls->desc[i];
err = ptrace(PTRACE_GET_THREAD_AREA, tid, GDT_ENTRY_TLS_MIN + i, d);
if (err) {
/*
* Ignoring absent syscall on !CONFIG_IA32_EMULATION
* where such mixed code can't run.
* XXX: Add compile CONFIG_X86_IGNORE_64BIT_TLS
* (for x86_64 systems with CONFIG_IA32_EMULATION)
*/
if (errno == EIO && native_mode)
return 0;
pr_perror("get_thread_area failed for %d", tid);
return err;
}
}
return 0;
}
int compel_arch_fetch_thread_area(struct parasite_thread_ctl *tctl)
{
return __compel_arch_fetch_thread_area(tctl->tid, &tctl->th);
}
void compel_arch_get_tls_task(struct parasite_ctl *ctl, tls_t *out)
{
memcpy(out, &ctl->orig.tls, sizeof(tls_t));
}
void compel_arch_get_tls_thread(struct parasite_thread_ctl *tctl, tls_t *out)
{
memcpy(out, &tctl->th.tls, sizeof(tls_t));
}
| 2,308 | 25.238636 | 77 |
c
|
criu
|
criu-master/compel/arch/x86/src/lib/include/cpu.h
|
#ifndef __COMPEL_ASM_CPU_H__
#define __COMPEL_ASM_CPU_H__
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
asm volatile("cpuid" : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx) : "0"(*eax), "2"(*ecx) : "memory");
}
static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
{
*eax = op;
*ecx = 0;
native_cpuid(eax, ebx, ecx, edx);
}
static inline void cpuid_count(unsigned int op, int count, unsigned int *eax, unsigned int *ebx, unsigned int *ecx,
unsigned int *edx)
{
*eax = op;
*ecx = count;
native_cpuid(eax, ebx, ecx, edx);
}
static inline unsigned int cpuid_eax(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
return eax;
}
static inline unsigned int cpuid_ebx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
return ebx;
}
static inline unsigned int cpuid_ecx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
return ecx;
}
static inline unsigned int cpuid_edx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
return edx;
}
#endif
| 1,302 | 21.465517 | 117 |
h
|
criu
|
criu-master/compel/arch/x86/src/lib/include/handle-elf.h
|
#ifndef COMPEL_HANDLE_ELF_H__
#define COMPEL_HANDLE_ELF_H__
#include "elf64-types.h"
#define ELF_X86_64
#ifndef R_X86_64_GOTPCRELX
#define R_X86_64_GOTPCRELX 41
#endif
#ifndef R_X86_64_REX_GOTPCRELX
#define R_X86_64_REX_GOTPCRELX 42
#endif
#define __handle_elf handle_elf_x86_64
#define arch_is_machine_supported(e_machine) (e_machine == EM_X86_64)
extern int handle_elf_x86_32(void *mem, size_t size);
extern int handle_elf_x86_64(void *mem, size_t size);
#endif /* COMPEL_HANDLE_ELF_H__ */
| 506 | 21.043478 | 69 |
h
|
criu
|
criu-master/compel/arch/x86/src/lib/include/uapi/asm/cpu.h
|
#ifndef __CR_ASM_CPU_H__
#define __CR_ASM_CPU_H__
#include <stdint.h>
#include <compel/asm/fpu.h>
/*
* Adopted from linux kernel and enhanced from Intel/AMD manuals.
* Note these bits are not ABI for linux kernel but they _are_
* for us, so make sure they are at proper position between
* versions.
*
* In particular since we already used leaf 11 we have
* to keep it here, since it's an ABI now.
*/
enum cpuid_leafs {
CPUID_1_EDX = 0,
CPUID_8000_0001_EDX = 1,
CPUID_8086_0001_EDX = 2,
CPUID_LNX_1 = 3,
CPUID_1_ECX = 4,
CPUID_C000_0001_EDX = 5,
CPUID_8000_0001_ECX = 6,
CPUID_LNX_2 = 7,
CPUID_LNX_3 = 8,
CPUID_7_0_EBX = 9,
CPUID_D_1_EAX = 10,
CPUID_7_0_ECX = 11,
CPUID_F_1_EDX = 12,
CPUID_8000_0008_EBX = 13,
CPUID_6_EAX = 14,
CPUID_8000_000A_EDX = 15,
CPUID_F_0_EDX = 16,
CPUID_8000_0007_EBX = 17,
CPUID_7_0_EDX = 18,
};
#define NCAPINTS_V1 12
#define NCAPINTS_V2 19
#define NCAPINTS (NCAPINTS_V2) /* N 32-bit words worth of info */
#define NCAPINTS_BITS (NCAPINTS * 32)
/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
#define X86_FEATURE_FPU (0 * 32 + 0) /* Onboard FPU */
#define X86_FEATURE_VME (0 * 32 + 1) /* Virtual Mode Extensions */
#define X86_FEATURE_DE (0 * 32 + 2) /* Debugging Extensions */
#define X86_FEATURE_PSE (0 * 32 + 3) /* Page Size Extensions */
#define X86_FEATURE_TSC (0 * 32 + 4) /* Time Stamp Counter */
#define X86_FEATURE_MSR (0 * 32 + 5) /* Model-Specific Registers */
#define X86_FEATURE_PAE (0 * 32 + 6) /* Physical Address Extensions */
#define X86_FEATURE_MCE (0 * 32 + 7) /* Machine Check Exception */
#define X86_FEATURE_CX8 (0 * 32 + 8) /* CMPXCHG8 instruction */
#define X86_FEATURE_APIC (0 * 32 + 9) /* Onboard APIC */
#define X86_FEATURE_SEP (0 * 32 + 11) /* SYSENTER/SYSEXIT */
#define X86_FEATURE_MTRR (0 * 32 + 12) /* Memory Type Range Registers */
#define X86_FEATURE_PGE (0 * 32 + 13) /* Page Global Enable */
#define X86_FEATURE_MCA (0 * 32 + 14) /* Machine Check Architecture */
#define X86_FEATURE_CMOV (0 * 32 + 15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
#define X86_FEATURE_PAT (0 * 32 + 16) /* Page Attribute Table */
#define X86_FEATURE_PSE36 (0 * 32 + 17) /* 36-bit PSEs */
#define X86_FEATURE_PN (0 * 32 + 18) /* Processor serial number */
#define X86_FEATURE_CLFLUSH (0 * 32 + 19) /* CLFLUSH instruction */
#define X86_FEATURE_DS (0 * 32 + 21) /* "dts" Debug Store */
#define X86_FEATURE_ACPI (0 * 32 + 22) /* ACPI via MSR */
#define X86_FEATURE_MMX (0 * 32 + 23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR (0 * 32 + 24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
#define X86_FEATURE_XMM (0 * 32 + 25) /* "sse" */
#define X86_FEATURE_XMM2 (0 * 32 + 26) /* "sse2" */
#define X86_FEATURE_SELFSNOOP (0 * 32 + 27) /* "ss" CPU self snoop */
#define X86_FEATURE_HT (0 * 32 + 28) /* Hyper-Threading */
#define X86_FEATURE_ACC (0 * 32 + 29) /* "tm" Automatic clock control */
#define X86_FEATURE_IA64 (0 * 32 + 30) /* IA-64 processor */
#define X86_FEATURE_PBE (0 * 32 + 31) /* Pending Break Enable */
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
#define X86_FEATURE_SYSCALL (1 * 32 + 11) /* SYSCALL/SYSRET */
#define X86_FEATURE_MP (1 * 32 + 19) /* MP Capable */
#define X86_FEATURE_NX (1 * 32 + 20) /* Execute Disable */
#define X86_FEATURE_MMXEXT (1 * 32 + 22) /* AMD MMX extensions */
#define X86_FEATURE_FXSR_OPT (1 * 32 + 25) /* FXSAVE/FXRSTOR optimizations */
#define X86_FEATURE_GBPAGES (1 * 32 + 26) /* "pdpe1gb" GB pages */
#define X86_FEATURE_RDTSCP (1 * 32 + 27) /* RDTSCP */
#define X86_FEATURE_LM (1 * 32 + 29) /* Long Mode (x86-64, 64-bit support) */
#define X86_FEATURE_3DNOWEXT (1 * 32 + 30) /* AMD 3DNow extensions */
#define X86_FEATURE_3DNOW (1 * 32 + 31) /* 3DNow */
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
#define X86_FEATURE_RECOVERY (2 * 32 + 0) /* CPU in recovery mode */
#define X86_FEATURE_LONGRUN (2 * 32 + 1) /* Longrun power control */
#define X86_FEATURE_LRTI (2 * 32 + 3) /* LongRun table interface */
/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
#define X86_FEATURE_CXMMX (3 * 32 + 0) /* Cyrix MMX extensions */
#define X86_FEATURE_K6_MTRR (3 * 32 + 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR (3 * 32 + 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3 * 32 + 3) /* Centaur MCRs (= MTRRs) */
/* CPU types for specific tunings: */
#define X86_FEATURE_K8 (3 * 32 + 4) /* "" Opteron, Athlon64 */
#define X86_FEATURE_K7 (3 * 32 + 5) /* "" Athlon */
#define X86_FEATURE_P3 (3 * 32 + 6) /* "" P3 */
#define X86_FEATURE_P4 (3 * 32 + 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC (3 * 32 + 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP (3 * 32 + 9) /* SMP kernel running on UP */
#define X86_FEATURE_ART (3 * 32 + 10) /* Always running timer (ART) */
#define X86_FEATURE_ARCH_PERFMON (3 * 32 + 11) /* Intel Architectural PerfMon */
#define X86_FEATURE_PEBS (3 * 32 + 12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS (3 * 32 + 13) /* Branch Trace Store */
#define X86_FEATURE_SYSCALL32 (3 * 32 + 14) /* "" syscall in IA32 userspace */
#define X86_FEATURE_SYSENTER32 (3 * 32 + 15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD (3 * 32 + 16) /* REP microcode works well */
#define X86_FEATURE_MFENCE_RDTSC (3 * 32 + 17) /* "" MFENCE synchronizes RDTSC */
#define X86_FEATURE_LFENCE_RDTSC (3 * 32 + 18) /* "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER (3 * 32 + 19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL (3 * 32 + 20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS (3 * 32 + 21) /* "" Always-present feature */
#define X86_FEATURE_XTOPOLOGY (3 * 32 + 22) /* CPU topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE (3 * 32 + 23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC (3 * 32 + 24) /* TSC does not stop in C states */
#define X86_FEATURE_CPUID (3 * 32 + 25) /* CPU has CPUID instruction itself */
#define X86_FEATURE_EXTD_APICID (3 * 32 + 26) /* Extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM (3 * 32 + 27) /* AMD multi-node processor */
#define X86_FEATURE_APERFMPERF (3 * 32 + 28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
#define X86_FEATURE_NONSTOP_TSC_S3 (3 * 32 + 30) /* TSC doesn't stop in S3 state */
#define X86_FEATURE_TSC_KNOWN_FREQ (3 * 32 + 31) /* TSC has known frequency */
/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
#define X86_FEATURE_XMM3 (4 * 32 + 0) /* "pni" SSE-3 */
#define X86_FEATURE_PCLMULQDQ (4 * 32 + 1) /* PCLMULQDQ instruction */
#define X86_FEATURE_DTES64 (4 * 32 + 2) /* 64-bit Debug Store */
#define X86_FEATURE_MWAIT (4 * 32 + 3) /* "monitor" MONITOR/MWAIT support */
#define X86_FEATURE_DSCPL (4 * 32 + 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
#define X86_FEATURE_VMX (4 * 32 + 5) /* Hardware virtualization */
#define X86_FEATURE_SMX (4 * 32 + 6) /* Safer Mode eXtensions */
#define X86_FEATURE_EST (4 * 32 + 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 (4 * 32 + 8) /* Thermal Monitor 2 */
#define X86_FEATURE_SSSE3 (4 * 32 + 9) /* Supplemental SSE-3 */
#define X86_FEATURE_CID (4 * 32 + 10) /* Context ID */
#define X86_FEATURE_SDBG (4 * 32 + 11) /* Silicon Debug */
#define X86_FEATURE_FMA (4 * 32 + 12) /* Fused multiply-add */
#define X86_FEATURE_CX16 (4 * 32 + 13) /* CMPXCHG16B instruction */
#define X86_FEATURE_XTPR (4 * 32 + 14) /* Send Task Priority Messages */
#define X86_FEATURE_PDCM (4 * 32 + 15) /* Perf/Debug Capabilities MSR */
#define X86_FEATURE_PCID (4 * 32 + 17) /* Process Context Identifiers */
#define X86_FEATURE_DCA (4 * 32 + 18) /* Direct Cache Access */
#define X86_FEATURE_XMM4_1 (4 * 32 + 19) /* "sse4_1" SSE-4.1 */
#define X86_FEATURE_XMM4_2 (4 * 32 + 20) /* "sse4_2" SSE-4.2 */
#define X86_FEATURE_X2APIC (4 * 32 + 21) /* X2APIC */
#define X86_FEATURE_MOVBE (4 * 32 + 22) /* MOVBE instruction */
#define X86_FEATURE_POPCNT (4 * 32 + 23) /* POPCNT instruction */
#define X86_FEATURE_TSC_DEADLINE_TIMER (4 * 32 + 24) /* TSC deadline timer */
#define X86_FEATURE_AES (4 * 32 + 25) /* AES instructions */
#define X86_FEATURE_XSAVE (4 * 32 + 26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
#define X86_FEATURE_OSXSAVE (4 * 32 + 27) /* "" XSAVE instruction enabled in the OS */
#define X86_FEATURE_AVX (4 * 32 + 28) /* Advanced Vector Extensions */
#define X86_FEATURE_F16C (4 * 32 + 29) /* 16-bit FP conversions */
#define X86_FEATURE_RDRAND (4 * 32 + 30) /* RDRAND instruction */
#define X86_FEATURE_HYPERVISOR (4 * 32 + 31) /* Running on a hypervisor */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE (5 * 32 + 2) /* "rng" RNG present (xstore) */
#define X86_FEATURE_XSTORE_EN (5 * 32 + 3) /* "rng_en" RNG enabled */
#define X86_FEATURE_XCRYPT (5 * 32 + 6) /* "ace" on-CPU crypto (xcrypt) */
#define X86_FEATURE_XCRYPT_EN (5 * 32 + 7) /* "ace_en" on-CPU crypto enabled */
#define X86_FEATURE_ACE2 (5 * 32 + 8) /* Advanced Cryptography Engine v2 */
#define X86_FEATURE_ACE2_EN (5 * 32 + 9) /* ACE v2 enabled */
#define X86_FEATURE_PHE (5 * 32 + 10) /* PadLock Hash Engine */
#define X86_FEATURE_PHE_EN (5 * 32 + 11) /* PHE enabled */
#define X86_FEATURE_PMM (5 * 32 + 12) /* PadLock Montgomery Multiplier */
#define X86_FEATURE_PMM_EN (5 * 32 + 13) /* PMM enabled */
/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
#define X86_FEATURE_LAHF_LM (6 * 32 + 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6 * 32 + 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_SVM (6 * 32 + 2) /* Secure Virtual Machine */
#define X86_FEATURE_EXTAPIC (6 * 32 + 3) /* Extended APIC space */
#define X86_FEATURE_CR8_LEGACY (6 * 32 + 4) /* CR8 in 32-bit mode */
#define X86_FEATURE_ABM (6 * 32 + 5) /* Advanced bit manipulation */
#define X86_FEATURE_SSE4A (6 * 32 + 6) /* SSE-4A */
#define X86_FEATURE_MISALIGNSSE (6 * 32 + 7) /* Misaligned SSE mode */
#define X86_FEATURE_3DNOWPREFETCH (6 * 32 + 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW (6 * 32 + 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS (6 * 32 + 10) /* Instruction Based Sampling */
#define X86_FEATURE_XOP (6 * 32 + 11) /* extended AVX instructions */
#define X86_FEATURE_SKINIT (6 * 32 + 12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT (6 * 32 + 13) /* Watchdog timer */
#define X86_FEATURE_LWP (6 * 32 + 15) /* Light Weight Profiling */
#define X86_FEATURE_FMA4 (6 * 32 + 16) /* 4 operands MAC instructions */
#define X86_FEATURE_TCE (6 * 32 + 17) /* Translation Cache Extension */
#define X86_FEATURE_NODEID_MSR (6 * 32 + 19) /* NodeId MSR */
#define X86_FEATURE_TBM (6 * 32 + 21) /* Trailing Bit Manipulations */
#define X86_FEATURE_TOPOEXT (6 * 32 + 22) /* Topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE (6 * 32 + 23) /* Core performance counter extensions */
#define X86_FEATURE_PERFCTR_NB (6 * 32 + 24) /* NB performance counter extensions */
#define X86_FEATURE_BPEXT (6 * 32 + 26) /* Data breakpoint extension */
#define X86_FEATURE_PTSC (6 * 32 + 27) /* Performance time-stamp counter */
#define X86_FEATURE_PERFCTR_LLC (6 * 32 + 28) /* Last Level Cache performance counter extensions */
#define X86_FEATURE_MWAITX (6 * 32 + 29) /* MWAIT extension (MONITORX/MWAITX instructions) */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE (9 * 32 + 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
#define X86_FEATURE_TSC_ADJUST (9 * 32 + 1) /* TSC adjustment MSR 0x3B */
#define X86_FEATURE_BMI1 (9 * 32 + 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE (9 * 32 + 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 (9 * 32 + 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP (9 * 32 + 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 (9 * 32 + 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS (9 * 32 + 9) /* Enhanced REP MOVSB/STOSB instructions */
#define X86_FEATURE_INVPCID (9 * 32 + 10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM (9 * 32 + 11) /* Restricted Transactional Memory */
#define X86_FEATURE_CQM (9 * 32 + 12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX (9 * 32 + 14) /* Memory Protection Extension */
#define X86_FEATURE_RDT_A (9 * 32 + 15) /* Resource Director Technology Allocation */
#define X86_FEATURE_AVX512F (9 * 32 + 16) /* AVX-512 Foundation */
#define X86_FEATURE_AVX512DQ (9 * 32 + 17) /* AVX-512 DQ (Double/Quad granular) Instructions */
#define X86_FEATURE_RDSEED (9 * 32 + 18) /* RDSEED instruction */
#define X86_FEATURE_ADX (9 * 32 + 19) /* ADCX and ADOX instructions */
#define X86_FEATURE_SMAP (9 * 32 + 20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_AVX512IFMA (9 * 32 + 21) /* AVX-512 Integer Fused Multiply-Add instructions */
#define X86_FEATURE_CLFLUSHOPT (9 * 32 + 23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB (9 * 32 + 24) /* CLWB instruction */
#define X86_FEATURE_INTEL_PT (9 * 32 + 25) /* Intel Processor Trace */
#define X86_FEATURE_AVX512PF (9 * 32 + 26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER (9 * 32 + 27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD (9 * 32 + 28) /* AVX-512 Conflict Detection */
#define X86_FEATURE_SHA_NI (9 * 32 + 29) /* SHA1/SHA256 Instruction Extensions */
#define X86_FEATURE_AVX512BW (9 * 32 + 30) /* AVX-512 BW (Byte/Word granular) Instructions */
#define X86_FEATURE_AVX512VL (9 * 32 + 31) /* AVX-512 VL (128/256 Vector Length) Extensions */
/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
#define X86_FEATURE_XSAVEOPT (10 * 32 + 0) /* XSAVEOPT instruction */
#define X86_FEATURE_XSAVEC (10 * 32 + 1) /* XSAVEC instruction */
#define X86_FEATURE_XGETBV1 (10 * 32 + 2) /* XGETBV with ECX = 1 instruction */
#define X86_FEATURE_XSAVES (10 * 32 + 3) /* XSAVES/XRSTORS instructions */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 11 */
#define X86_FEATURE_PREFETCHWT1 (11 * 32 + 0) /* PREFETCHWT1 Intel® Xeon PhiTM only */
#define X86_FEATURE_AVX512VBMI (11 * 32 + 1) /* AVX512 Vector Bit Manipulation instructions*/
#define X86_FEATURE_UMIP (11 * 32 + 2) /* User Mode Instruction Protection */
#define X86_FEATURE_PKU (11 * 32 + 3) /* Protection Keys for Userspace */
#define X86_FEATURE_OSPKE (11 * 32 + 4) /* OS Protection Keys Enable */
#define X86_FEATURE_AVX512_VBMI2 (11 * 32 + 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
#define X86_FEATURE_GFNI (11 * 32 + 8) /* Galois Field New Instructions */
#define X86_FEATURE_VAES (11 * 32 + 9) /* Vector AES */
#define X86_FEATURE_VPCLMULQDQ (11 * 32 + 10) /* Carry-Less Multiplication Double Quadword */
#define X86_FEATURE_AVX512_VNNI (11 * 32 + 11) /* Vector Neural Network Instructions */
#define X86_FEATURE_AVX512_BITALG (11 * 32 + 12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
#define X86_FEATURE_TME (11 * 32 + 13) /* Intel Total Memory Encryption */
#define X86_FEATURE_AVX512_VPOPCNTDQ (11 * 32 + 14) /* POPCNT for vectors of DW/QW */
#define X86_FEATURE_LA57 (11 * 32 + 16) /* 5-level page tables */
#define X86_FEATURE_RDPID (11 * 32 + 22) /* RDPID instruction */
#define X86_FEATURE_CLDEMOTE (11 * 32 + 25) /* CLDEMOTE instruction */
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
#define X86_FEATURE_CQM_OCCUP_LLC (12 * 32 + 0) /* LLC occupancy monitoring */
#define X86_FEATURE_CQM_MBM_TOTAL (12 * 32 + 1) /* LLC Total MBM monitoring */
#define X86_FEATURE_CQM_MBM_LOCAL (12 * 32 + 2) /* LLC Local MBM monitoring */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
#define X86_FEATURE_CLZERO (13 * 32 + 0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13 * 32 + 1) /* Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13 * 32 + 2) /* Always save/restore FP error pointers */
#define X86_FEATURE_IBPB (13 * 32 + 12) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_IBRS (13 * 32 + 14) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_STIBP (13 * 32 + 15) /* Single Thread Indirect Branch Predictors */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14 * 32 + 0) /* Digital Thermal Sensor */
#define X86_FEATURE_IDA (14 * 32 + 1) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (14 * 32 + 2) /* Always Running APIC Timer */
#define X86_FEATURE_PLN (14 * 32 + 4) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (14 * 32 + 6) /* Intel Package Thermal Status */
#define X86_FEATURE_HWP (14 * 32 + 7) /* Intel Hardware P-states */
#define X86_FEATURE_HWP_NOTIFY (14 * 32 + 8) /* HWP Notification */
#define X86_FEATURE_HWP_ACT_WINDOW (14 * 32 + 9) /* HWP Activity Window */
#define X86_FEATURE_HWP_EPP (14 * 32 + 10) /* HWP Energy Perf. Preference */
#define X86_FEATURE_HWP_PKG_REQ (14 * 32 + 11) /* HWP Package Level Request */
#define X86_FEATURE_HDC (14 * 32 + 13) /* HDC base registers present */
/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
#define X86_FEATURE_NPT (15 * 32 + 0) /* Nested Page Table support */
#define X86_FEATURE_LBRV (15 * 32 + 1) /* LBR Virtualization support */
#define X86_FEATURE_SVML (15 * 32 + 2) /* "svm_lock" SVM locking MSR */
#define X86_FEATURE_NRIPS (15 * 32 + 3) /* "nrip_save" SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR (15 * 32 + 4) /* "tsc_scale" TSC scaling support */
#define X86_FEATURE_VMCBCLEAN (15 * 32 + 5) /* "vmcb_clean" VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID (15 * 32 + 6) /* flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS (15 * 32 + 7) /* Decode Assists support */
#define X86_FEATURE_PAUSEFILTER (15 * 32 + 10) /* filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (15 * 32 + 12) /* pause filter threshold */
#define X86_FEATURE_AVIC (15 * 32 + 13) /* Virtual Interrupt Controller */
#define X86_FEATURE_V_VMSAVE_VMLOAD (15 * 32 + 15) /* Virtual VMSAVE VMLOAD */
#define X86_FEATURE_VGIF (15 * 32 + 16) /* Virtual GIF */
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 16 */
#define X86_FEATURE_CQM_LLC (16 * 32 + 1) /* LLC QoS if 1 */
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17 * 32 + 0) /* MCA overflow recovery support */
#define X86_FEATURE_SUCCOR (17 * 32 + 1) /* Uncorrectable error containment and recovery */
#define X86_FEATURE_SMCA (17 * 32 + 3) /* Scalable MCA */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18 * 32 + 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18 * 32 + 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_PCONFIG (18 * 32 + 18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18 * 32 + 26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18 * 32 + 27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ARCH_CAPABILITIES (18 * 32 + 29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
#define X86_FEATURE_SPEC_CTRL_SSBD (18 * 32 + 31) /* "" Speculative Store Bypass Disable */
enum {
X86_VENDOR_INTEL = 0,
X86_VENDOR_AMD = 1,
X86_VENDOR_MAX
};
struct cpuinfo_x86 {
/* cpu context */
uint8_t x86_family;
uint8_t x86_vendor;
uint8_t x86_model;
uint8_t x86_mask;
uint32_t x86_capability[NCAPINTS];
uint32_t x86_power;
uint32_t extended_cpuid_level;
int cpuid_level;
char x86_vendor_id[16];
char x86_model_id[64];
/* fpu context */
uint64_t xfeatures_mask;
uint32_t xsave_size_max;
uint32_t xsave_size;
uint32_t xstate_offsets[XFEATURE_MAX];
uint32_t xstate_sizes[XFEATURE_MAX];
uint32_t xsaves_size;
uint32_t xstate_comp_offsets[XFEATURE_MAX];
uint32_t xstate_comp_sizes[XFEATURE_MAX];
};
typedef struct cpuinfo_x86 compel_cpuinfo_t;
#endif /* __CR_ASM_CPU_H__ */
| 21,120 | 59.173789 | 122 |
h
|
criu
|
criu-master/compel/arch/x86/src/lib/include/uapi/asm/fpu.h
|
#ifndef __CR_ASM_FPU_H__
#define __CR_ASM_FPU_H__
#include <sys/types.h>
#include <stdbool.h>
#include <stdint.h>
#include <compel/common/compiler.h>
#define FP_MIN_ALIGN_BYTES 64
#define FXSAVE_ALIGN_BYTES 16
#define FP_XSTATE_MAGIC1 0x46505853U
#define FP_XSTATE_MAGIC2 0x46505845U
#ifndef FP_XSTATE_MAGIC2_SIZE
#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
#endif
#define XSTATE_FP 0x1
#define XSTATE_SSE 0x2
#define XSTATE_YMM 0x4
#define FXSAVE_SIZE 512
#define XSAVE_SIZE 4096
#define XSAVE_HDR_SIZE 64
#define XSAVE_HDR_OFFSET FXSAVE_SIZE
#define XSAVE_YMM_SIZE 256
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
/*
* List of XSAVE features Linux knows about:
*/
enum xfeature {
XFEATURE_FP,
XFEATURE_SSE,
/*
* Values above here are "legacy states".
* Those below are "extended states".
*/
XFEATURE_YMM,
XFEATURE_BNDREGS,
XFEATURE_BNDCSR,
XFEATURE_OPMASK,
XFEATURE_ZMM_Hi256,
XFEATURE_Hi16_ZMM,
XFEATURE_PT,
XFEATURE_PKRU,
XFEATURE_HDC,
XFEATURE_MAX,
};
#define XSTATE_CPUID 0x0000000d
#define XFEATURE_MASK_FP (1 << XFEATURE_FP)
#define XFEATURE_MASK_SSE (1 << XFEATURE_SSE)
#define XFEATURE_MASK_YMM (1 << XFEATURE_YMM)
#define XFEATURE_MASK_BNDREGS (1 << XFEATURE_BNDREGS)
#define XFEATURE_MASK_BNDCSR (1 << XFEATURE_BNDCSR)
#define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK)
#define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256)
#define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM)
#define XFEATURE_MASK_PT (1 << XFEATURE_PT)
#define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU)
#define XFEATURE_MASK_HDC (1 << XFEATURE_HDC)
#define XFEATURE_MASK_MAX (1 << XFEATURE_MAX)
#define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK | XFEATURE_MASK_ZMM_Hi256 | XFEATURE_MASK_Hi16_ZMM)
#define FIRST_EXTENDED_XFEATURE XFEATURE_YMM
/* Supervisor features */
#define XFEATURE_MASK_SUPERVISOR (XFEATURE_MASK_PT | XFEATURE_HDC)
/* All currently supported features */
#define XFEATURE_MASK_USER \
(XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_OPMASK | XFEATURE_MASK_ZMM_Hi256 | \
XFEATURE_MASK_Hi16_ZMM | XFEATURE_MASK_PKRU | XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
/* xsave structure features which is safe to fill with garbage (see validate_random_xstate()) */
#define XFEATURE_MASK_FAULTINJ \
(XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_OPMASK | XFEATURE_MASK_ZMM_Hi256 | \
XFEATURE_MASK_Hi16_ZMM)
struct fpx_sw_bytes {
uint32_t magic1;
uint32_t extended_size;
uint64_t xstate_bv;
uint32_t xstate_size;
uint32_t padding[7];
};
struct i387_fxsave_struct {
uint16_t cwd; /* Control Word */
uint16_t swd; /* Status Word */
uint16_t twd; /* Tag Word */
uint16_t fop; /* Last Instruction Opcode */
union {
struct {
uint64_t rip; /* Instruction Pointer */
uint64_t rdp; /* Data Pointer */
};
struct {
uint32_t fip; /* FPU IP Offset */
uint32_t fcs; /* FPU IP Selector */
uint32_t foo; /* FPU Operand Offset */
uint32_t fos; /* FPU Operand Selector */
};
};
uint32_t mxcsr; /* MXCSR Register State */
uint32_t mxcsr_mask; /* MXCSR Mask */
/* 8*16 bytes for each FP-reg = 128 bytes */
uint32_t st_space[32];
/* 16*16 bytes for each XMM-reg = 256 bytes */
uint32_t xmm_space[64];
uint32_t padding[12];
union {
uint32_t padding1[12];
uint32_t sw_reserved[12];
};
} __aligned(FXSAVE_ALIGN_BYTES);
struct xsave_hdr_struct {
uint64_t xstate_bv;
uint64_t xcomp_bv;
uint64_t reserved[6];
} __packed;
/*
* xstate_header.xcomp_bv[63] indicates that the extended_state_area
* is in compacted format.
*/
#define XCOMP_BV_COMPACTED_FORMAT ((uint64_t)1 << 63)
/*
* State component 2:
*
* There are 16x 256-bit AVX registers named YMM0-YMM15.
* The low 128 bits are aliased to the 16 SSE registers (XMM0-XMM15)
* and are stored in 'struct fxregs_state::xmm_space[]' in the
* "legacy" area.
*
* The high 128 bits are stored here.
*/
struct ymmh_struct {
uint32_t ymmh_space[64];
} __packed;
/* Intel MPX support: */
struct mpx_bndreg {
uint64_t lower_bound;
uint64_t upper_bound;
} __packed;
/*
* State component 3 is used for the 4 128-bit bounds registers
*/
struct mpx_bndreg_state {
struct mpx_bndreg bndreg[4];
} __packed;
/*
* State component 4 is used for the 64-bit user-mode MPX
* configuration register BNDCFGU and the 64-bit MPX status
* register BNDSTATUS. We call the pair "BNDCSR".
*/
struct mpx_bndcsr {
uint64_t bndcfgu;
uint64_t bndstatus;
} __packed;
/*
* The BNDCSR state is padded out to be 64-bytes in size.
*/
struct mpx_bndcsr_state {
union {
struct mpx_bndcsr bndcsr;
uint8_t pad_to_64_bytes[64];
};
} __packed;
/* AVX-512 Components: */
/*
* State component 5 is used for the 8 64-bit opmask registers
* k0-k7 (opmask state).
*/
struct avx_512_opmask_state {
uint64_t opmask_reg[8];
} __packed;
/*
* State component 6 is used for the upper 256 bits of the
* registers ZMM0-ZMM15. These 16 256-bit values are denoted
* ZMM0_H-ZMM15_H (ZMM_Hi256 state).
*/
struct avx_512_zmm_uppers_state {
uint64_t zmm_upper[16 * 4];
} __packed;
/*
* State component 7 is used for the 16 512-bit registers
* ZMM16-ZMM31 (Hi16_ZMM state).
*/
struct avx_512_hi16_state {
uint64_t hi16_zmm[16 * 8];
} __packed;
/*
* State component 9: 32-bit PKRU register. The state is
* 8 bytes long but only 4 bytes is used currently.
*/
struct pkru_state {
uint32_t pkru;
uint32_t pad;
} __packed;
/*
* This is our most modern FPU state format, as saved by the XSAVE
* and restored by the XRSTOR instructions.
*
* It consists of a legacy fxregs portion, an xstate header and
* subsequent areas as defined by the xstate header. Not all CPUs
* support all the extensions, so the size of the extended area
* can vary quite a bit between CPUs.
*
*
* One page should be enough for the whole xsave state ;-)
*/
#define EXTENDED_STATE_AREA_SIZE (4096 - sizeof(struct i387_fxsave_struct) - sizeof(struct xsave_hdr_struct))
/*
* cpu requires it to be 64 byte aligned
*/
struct xsave_struct {
struct i387_fxsave_struct i387;
struct xsave_hdr_struct xsave_hdr;
union {
/*
* This ymmh is unndeed, for
* backward compatibility.
*/
struct ymmh_struct ymmh;
uint8_t extended_state_area[EXTENDED_STATE_AREA_SIZE];
};
} __aligned(FP_MIN_ALIGN_BYTES) __packed;
struct xsave_struct_ia32 {
struct i387_fxsave_struct i387;
struct xsave_hdr_struct xsave_hdr;
union {
/*
* This ymmh is unndeed, for
* backward compatibility.
*/
struct ymmh_struct ymmh;
uint8_t extended_state_area[EXTENDED_STATE_AREA_SIZE];
};
};
typedef struct {
/*
* The FPU xsave area must be continuous and FP_MIN_ALIGN_BYTES
* aligned, thus make sure the compiler won't insert any hole here.
*/
union {
struct xsave_struct xsave;
uint8_t __pad[sizeof(struct xsave_struct) + FP_XSTATE_MAGIC2_SIZE];
};
uint8_t has_fpu;
} fpu_state_64_t;
struct user_i387_ia32_struct {
uint32_t cwd; /* FPU Control Word */
uint32_t swd; /* FPU Status Word */
uint32_t twd; /* FPU Tag Word */
uint32_t fip; /* FPU IP Offset */
uint32_t fcs; /* FPU IP Selector */
uint32_t foo; /* FPU Operand Pointer Offset */
uint32_t fos; /* FPU Operand Pointer Selector */
uint32_t st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
};
typedef struct {
struct {
struct user_i387_ia32_struct i387_ia32;
/* Software status information [not touched by FSAVE]: */
uint32_t status;
} fregs_state;
union {
struct xsave_struct_ia32 xsave;
uint8_t __pad[sizeof(struct xsave_struct) + FP_XSTATE_MAGIC2_SIZE];
} __aligned(FXSAVE_ALIGN_BYTES);
} __aligned(FXSAVE_ALIGN_BYTES) fpu_state_ia32_t;
/*
* This one is used in restorer.
*/
typedef struct {
union {
fpu_state_64_t fpu_state_64;
struct {
/* fpu_state_ia32->xsave has to be 64-byte aligned. */
uint32_t __pad[2];
fpu_state_ia32_t fpu_state_ia32;
};
};
uint8_t has_fpu;
} fpu_state_t;
extern void compel_convert_from_fxsr(struct user_i387_ia32_struct *env, struct i387_fxsave_struct *fxsave);
#endif /* __CR_ASM_FPU_H__ */
| 8,366 | 24.587156 | 118 |
h
|
criu
|
criu-master/compel/arch/x86/src/lib/include/uapi/asm/infect-types.h
|
#ifndef UAPI_COMPEL_ASM_TYPES_H__
#define UAPI_COMPEL_ASM_TYPES_H__
#include <stdint.h>
#include <stdbool.h>
#include <signal.h>
#include <compel/plugins/std/asm/syscall-types.h>
#define SIGMAX 64
#define SIGMAX_OLD 31
#define ARCH_HAS_PTRACE_GET_THREAD_AREA
/*
* Linux preserves three TLS segments in GDT.
* Offsets in GDT differ between 32-bit and 64-bit machines.
* For 64-bit x86 those GDT offsets are the same
* for native and compat tasks.
*/
#define GDT_ENTRY_TLS_MIN 12
#define GDT_ENTRY_TLS_MAX 14
#define GDT_ENTRY_TLS_NUM 3
typedef struct {
user_desc_t desc[GDT_ENTRY_TLS_NUM];
} tls_t;
struct thread_ctx;
struct parasite_ctl;
struct parasite_thread_ctl;
extern int __compel_arch_fetch_thread_area(int tid, struct thread_ctx *th);
extern int compel_arch_fetch_thread_area(struct parasite_thread_ctl *tctl);
extern void compel_arch_get_tls_thread(struct parasite_thread_ctl *tctl, tls_t *out);
extern void compel_arch_get_tls_task(struct parasite_ctl *ctl, tls_t *out);
typedef struct {
uint64_t r15;
uint64_t r14;
uint64_t r13;
uint64_t r12;
uint64_t bp;
uint64_t bx;
uint64_t r11;
uint64_t r10;
uint64_t r9;
uint64_t r8;
uint64_t ax;
uint64_t cx;
uint64_t dx;
uint64_t si;
uint64_t di;
uint64_t orig_ax;
uint64_t ip;
uint64_t cs;
uint64_t flags;
uint64_t sp;
uint64_t ss;
uint64_t fs_base;
uint64_t gs_base;
uint64_t ds;
uint64_t es;
uint64_t fs;
uint64_t gs;
} user_regs_struct64;
typedef struct {
uint32_t bx;
uint32_t cx;
uint32_t dx;
uint32_t si;
uint32_t di;
uint32_t bp;
uint32_t ax;
uint32_t ds;
uint32_t es;
uint32_t fs;
uint32_t gs;
uint32_t orig_ax;
uint32_t ip;
uint32_t cs;
uint32_t flags;
uint32_t sp;
uint32_t ss;
} user_regs_struct32;
/*
* To be sure that we rely on inited reg->__is_native, this member
* is (short int) instead of initial (bool). The right way to
* check if regs are native or compat is to use user_regs_native() macro.
* This should cost nothing, as *usually* sizeof(bool) == sizeof(short)
*/
typedef struct {
union {
user_regs_struct64 native;
user_regs_struct32 compat;
};
short __is_native; /* use user_regs_native macro to check it */
} user_regs_struct_t;
#define NATIVE_MAGIC 0x0A
#define COMPAT_MAGIC 0x0C
static inline bool user_regs_native(user_regs_struct_t *pregs)
{
return pregs->__is_native == NATIVE_MAGIC;
}
#define get_user_reg(pregs, name) ((user_regs_native(pregs)) ? ((pregs)->native.name) : ((pregs)->compat.name))
#define set_user_reg(pregs, name, val) \
((user_regs_native(pregs)) ? ((pregs)->native.name = (val)) : ((pregs)->compat.name = (val)))
#if 0
typedef struct {
unsigned short cwd;
unsigned short swd;
unsigned short twd; /* Note this is not the same as
the 32bit/x87/FSAVE twd */
unsigned short fop;
u64 rip;
u64 rdp;
u32 mxcsr;
u32 mxcsr_mask;
u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
u32 padding[24];
} user_fpregs_struct_t;
#endif
typedef struct xsave_struct user_fpregs_struct_t;
#define REG_RES(regs) get_user_reg(®s, ax)
#define REG_IP(regs) get_user_reg(®s, ip)
#define SET_REG_IP(regs, val) set_user_reg(®s, ip, val)
#define REG_SP(regs) get_user_reg(®s, sp)
#define REG_SYSCALL_NR(regs) get_user_reg(®s, orig_ax)
#define __NR(syscall, compat) ((compat) ? __NR32_##syscall : __NR_##syscall)
/*
* For x86_32 __NR_mmap inside the kernel represents old_mmap system
* call, but since we didn't use it yet lets go further and simply
* define own alias for __NR_mmap2 which would allow us to unify code
* between 32 and 64 bits version.
*/
#define __NR32_mmap __NR32_mmap2
#endif /* UAPI_COMPEL_ASM_TYPES_H__ */
| 3,728 | 24.367347 | 111 |
h
|
criu
|
criu-master/compel/arch/x86/src/lib/include/uapi/asm/processor-flags.h
|
#ifndef __CR_PROCESSOR_FLAGS_H__
#define __CR_PROCESSOR_FLAGS_H__
/* Taken from linux kernel headers */
/*
* EFLAGS bits
*/
#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
#define X86_EFLAGS_BIT1 0x00000002 /* Bit 1 - always on */
#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
#endif /* __CR_PROCESSOR_FLAGS_H__ */
| 1,144 | 38.482759 | 65 |
h
|
criu
|
criu-master/compel/arch/x86/src/lib/include/uapi/asm/sigframe.h
|
#ifndef UAPI_COMPEL_ASM_SIGFRAME_H__
#define UAPI_COMPEL_ASM_SIGFRAME_H__
#include <stdint.h>
#include <stdbool.h>
#include <string.h>
#include <compel/asm/fpu.h>
#include <compel/plugins/std/syscall-codes.h>
#define SIGFRAME_MAX_OFFSET 8
struct rt_sigcontext {
uint64_t r8;
uint64_t r9;
uint64_t r10;
uint64_t r11;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t rdi;
uint64_t rsi;
uint64_t rbp;
uint64_t rbx;
uint64_t rdx;
uint64_t rax;
uint64_t rcx;
uint64_t rsp;
uint64_t rip;
uint64_t eflags;
uint16_t cs;
uint16_t gs;
uint16_t fs;
uint16_t ss;
uint64_t err;
uint64_t trapno;
uint64_t oldmask;
uint64_t cr2;
uint64_t fpstate;
uint64_t reserved1[8];
};
struct rt_sigcontext_32 {
uint32_t gs;
uint32_t fs;
uint32_t es;
uint32_t ds;
uint32_t di;
uint32_t si;
uint32_t bp;
uint32_t sp;
uint32_t bx;
uint32_t dx;
uint32_t cx;
uint32_t ax;
uint32_t trapno;
uint32_t err;
uint32_t ip;
uint32_t cs;
uint32_t flags;
uint32_t sp_at_signal;
uint32_t ss;
uint32_t fpstate;
uint32_t oldmask;
uint32_t cr2;
};
#include <compel/sigframe-common.h>
/*
* XXX: move declarations to generic sigframe.h or sigframe-compat.h
* when (if) other architectures will support compatible C/R
*/
typedef uint32_t compat_uptr_t;
typedef uint32_t compat_size_t;
typedef uint32_t compat_sigset_word;
typedef struct compat_siginfo {
int si_signo;
int si_errno;
int si_code;
int _pad[128 / sizeof(int) - 3];
} compat_siginfo_t;
typedef struct compat_sigaltstack {
compat_uptr_t ss_sp;
int ss_flags;
compat_size_t ss_size;
} compat_stack_t;
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
struct ucontext_ia32 {
unsigned int uc_flags;
unsigned int uc_link;
compat_stack_t uc_stack;
struct rt_sigcontext_32 uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct rt_sigframe_ia32 {
uint32_t pretcode;
int32_t sig;
uint32_t pinfo;
uint32_t puc;
compat_siginfo_t info;
struct ucontext_ia32 uc;
char retcode[8];
/* fp state follows here */
fpu_state_t fpu_state;
};
struct rt_sigframe_64 {
char *pretcode;
struct rt_ucontext uc;
struct rt_siginfo info;
/* fp state follows here */
fpu_state_t fpu_state;
};
struct rt_sigframe {
union {
struct rt_sigframe_ia32 compat;
struct rt_sigframe_64 native;
};
bool is_native;
};
static inline void rt_sigframe_copy_sigset(struct rt_sigframe *to, k_rtsigset_t *from)
{
size_t sz = sizeof(k_rtsigset_t);
BUILD_BUG_ON(sz != sizeof(compat_sigset_t));
if (to->is_native)
memcpy(&to->native.uc.uc_sigmask, from, sz);
else
memcpy(&to->compat.uc.uc_sigmask, from, sz);
}
static inline void rt_sigframe_erase_sigset(struct rt_sigframe *sigframe)
{
size_t sz = sizeof(k_rtsigset_t);
if (sigframe->is_native)
memset(&sigframe->native.uc.uc_sigmask, 0, sz);
else
memset(&sigframe->compat.uc.uc_sigmask, 0, sz);
}
#define RT_SIGFRAME_REGIP(rt_sigframe) \
((rt_sigframe->is_native) ? (rt_sigframe)->native.uc.uc_mcontext.rip : (rt_sigframe)->compat.uc.uc_mcontext.ip)
#define RT_SIGFRAME_FPU(rt_sigframe) \
((rt_sigframe->is_native) ? (&(rt_sigframe)->native.fpu_state) : (&(rt_sigframe)->compat.fpu_state))
#define RT_SIGFRAME_HAS_FPU(rt_sigframe) (RT_SIGFRAME_FPU(rt_sigframe)->has_fpu)
/*
* Sigframe offset is different for native/compat tasks.
* Offsets calculations one may see at kernel:
* - compatible is in sys32_rt_sigreturn at arch/x86/ia32/ia32_signal.c
* - native is in sys_rt_sigreturn at arch/x86/kernel/signal.c
*/
#define RT_SIGFRAME_OFFSET(rt_sigframe) (((rt_sigframe)->is_native) ? 8 : 4)
#define USER32_CS 0x23
/* clang-format off */
#define ARCH_RT_SIGRETURN_NATIVE(new_sp) \
asm volatile( \
"movq %0, %%rax \n" \
"movq %%rax, %%rsp \n" \
"movl $"__stringify(__NR_rt_sigreturn)", %%eax \n" \
"syscall \n" \
: \
: "r"(new_sp) \
: "rax","memory")
#define ARCH_RT_SIGRETURN_COMPAT(new_sp) \
asm volatile( \
"pushq $"__stringify(USER32_CS)" \n" \
"xor %%rax, %%rax \n" \
"movl $1f, %%eax \n" \
"pushq %%rax \n" \
"lretq \n" \
"1: \n" \
".code32 \n" \
"movl %%edi, %%esp \n" \
"movl $"__stringify(__NR32_rt_sigreturn)",%%eax \n" \
"int $0x80 \n" \
".code64 \n" \
: \
: "rdi"(new_sp) \
: "eax", "r8", "r9", "r10", "r11", "memory")
#define ARCH_RT_SIGRETURN(new_sp, rt_sigframe) \
do { \
if ((rt_sigframe)->is_native) \
ARCH_RT_SIGRETURN_NATIVE(new_sp); \
else \
ARCH_RT_SIGRETURN_COMPAT(new_sp); \
} while (0)
/* clang-format off */
int sigreturn_prep_fpu_frame(struct rt_sigframe *sigframe,
struct rt_sigframe *rsigframe);
#endif /* UAPI_COMPEL_ASM_SIGFRAME_H__ */
| 4,926 | 21.497717 | 112 |
h
|
criu
|
criu-master/compel/include/infect-priv.h
|
#ifndef __COMPEL_INFECT_PRIV_H__
#define __COMPEL_INFECT_PRIV_H__
#include <stdbool.h>
#define BUILTIN_SYSCALL_SIZE 8
struct thread_ctx {
k_rtsigset_t sigmask;
user_regs_struct_t regs;
#ifdef ARCH_HAS_PTRACE_GET_THREAD_AREA
tls_t tls;
#endif
user_fpregs_struct_t ext_regs;
};
/* parasite control block */
struct parasite_ctl {
int rpid; /* Real pid of the victim */
void *remote_map;
void *local_map;
void *sigreturn_addr; /* A place for the breakpoint */
unsigned long map_length;
struct infect_ctx ictx;
/* thread leader data */
bool daemonized;
struct thread_ctx orig;
void *rstack; /* thread leader stack*/
struct rt_sigframe *sigframe;
struct rt_sigframe *rsigframe; /* address in a parasite */
void *r_thread_stack; /* stack for non-leader threads */
unsigned long parasite_ip; /* service routine start ip */
unsigned int *cmd; /* address for command */
void *args; /* address for arguments */
unsigned long args_size;
int tsock; /* transport socket for transferring fds */
struct parasite_blob_desc pblob;
};
struct parasite_thread_ctl {
int tid;
struct parasite_ctl *ctl;
struct thread_ctx th;
};
#define MEMFD_FNAME "CRIUMFD"
#define MEMFD_FNAME_SZ sizeof(MEMFD_FNAME)
struct ctl_msg;
int parasite_wait_ack(int sockfd, unsigned int cmd, struct ctl_msg *m);
extern void parasite_setup_regs(unsigned long new_ip, void *stack, user_regs_struct_t *regs);
extern void *remote_mmap(struct parasite_ctl *ctl, void *addr, size_t length, int prot, int flags, int fd,
off_t offset);
extern bool arch_can_dump_task(struct parasite_ctl *ctl);
/*
* @regs: general purpose registers
* @ext_regs: extended register set (fpu/mmx/sse/etc)
* for task that is NULL, restored by sigframe on rt_sigreturn()
* @save: callback to dump all info
* @flags: see INFECT_* in infect_ctx::flags
* @pid: mystery
*/
extern int compel_get_task_regs(pid_t pid, user_regs_struct_t *regs, user_fpregs_struct_t *ext_regs, save_regs_t save,
void *arg, unsigned long flags);
extern int compel_set_task_ext_regs(pid_t pid, user_fpregs_struct_t *ext_regs);
extern int arch_fetch_sas(struct parasite_ctl *ctl, struct rt_sigframe *s);
extern int sigreturn_prep_regs_plain(struct rt_sigframe *sigframe, user_regs_struct_t *regs,
user_fpregs_struct_t *fpregs);
extern int sigreturn_prep_fpu_frame_plain(struct rt_sigframe *sigframe, struct rt_sigframe *rsigframe);
extern int compel_execute_syscall(struct parasite_ctl *ctl, user_regs_struct_t *regs, const char *code_syscall);
#endif
| 2,522 | 30.148148 | 118 |
h
|
criu
|
criu-master/compel/include/log.h
|
#ifndef COMPEL_LOG_H__
#define COMPEL_LOG_H__
#include <errno.h>
#include <string.h>
#include "uapi/compel/log.h"
#ifndef LOG_PREFIX
#define LOG_PREFIX
#endif
static inline int pr_quelled(unsigned int loglevel)
{
return compel_log_get_loglevel() < loglevel && loglevel != COMPEL_LOG_MSG;
}
extern void compel_print_on_level(unsigned int loglevel, const char *format, ...)
__attribute__((__format__(__printf__, 2, 3)));
#define pr_msg(fmt, ...) compel_print_on_level(COMPEL_LOG_MSG, fmt, ##__VA_ARGS__)
#define pr_info(fmt, ...) compel_print_on_level(COMPEL_LOG_INFO, LOG_PREFIX fmt, ##__VA_ARGS__)
#define pr_err(fmt, ...) \
compel_print_on_level(COMPEL_LOG_ERROR, "Error (%s:%d): " LOG_PREFIX fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#define pr_err_once(fmt, ...) \
do { \
static bool __printed; \
if (!__printed) { \
pr_err(fmt, ##__VA_ARGS__); \
__printed = 1; \
} \
} while (0)
#define pr_warn(fmt, ...) \
compel_print_on_level(COMPEL_LOG_WARN, "Warn (%s:%d): " LOG_PREFIX fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#define pr_warn_once(fmt, ...) \
do { \
static bool __printed; \
if (!__printed) { \
pr_warn(fmt, ##__VA_ARGS__); \
__printed = 1; \
} \
} while (0)
#define pr_debug(fmt, ...) compel_print_on_level(COMPEL_LOG_DEBUG, LOG_PREFIX fmt, ##__VA_ARGS__)
#define pr_perror(fmt, ...) pr_err(fmt ": %s\n", ##__VA_ARGS__, strerror(errno))
#endif /* COMPEL_LOG_H__ */
| 1,702 | 30.537037 | 109 |
h
|
criu
|
criu-master/compel/include/uapi/cpu.h
|
#ifndef UAPI_COMPEL_CPU_H__
#define UAPI_COMPEL_CPU_H__
#include <stdbool.h>
#include <stdint.h>
#include <compel/asm/cpu.h>
extern int /* TODO: __must_check */ compel_cpuid(compel_cpuinfo_t *info);
extern bool compel_cpu_has_feature(unsigned int feature);
extern bool compel_fpu_has_feature(unsigned int feature);
extern uint32_t compel_fpu_feature_size(unsigned int feature);
extern uint32_t compel_fpu_feature_offset(unsigned int feature);
extern void compel_cpu_clear_feature(unsigned int feature);
extern void compel_cpu_copy_cpuinfo(compel_cpuinfo_t *c);
#endif /* UAPI_COMPEL_CPU_H__ */
| 598 | 32.277778 | 73 |
h
|
criu
|
criu-master/compel/include/uapi/handle-elf.h
|
#ifndef __COMPEL_UAPI_HANDLE_ELF__
#define __COMPEL_UAPI_HANDLE_ELF__
#define COMPEL_TYPE_INT (1u << 0)
#define COMPEL_TYPE_LONG (1u << 1)
#define COMPEL_TYPE_GOTPCREL (1u << 2)
#ifdef CONFIG_MIPS
#define COMPEL_TYPE_MIPS_26 (1u << 3)
#define COMPEL_TYPE_MIPS_HI16 (1u << 4)
#define COMPEL_TYPE_MIPS_LO16 (1u << 5)
#define COMPEL_TYPE_MIPS_HIGHER (1u << 6)
#define COMPEL_TYPE_MIPS_HIGHEST (1u << 7)
#define COMPEL_TYPE_MIPS_64 (1u << 8)
#endif
typedef struct {
unsigned int offset;
unsigned int type;
long addend;
long value;
} compel_reloc_t;
#endif
| 572 | 23.913043 | 42 |
h
|
criu
|
criu-master/compel/include/uapi/infect-rpc.h
|
#ifndef __COMPEL_INFECT_RPC_H__
#define __COMPEL_INFECT_RPC_H__
#include <sys/socket.h>
#include <sys/un.h>
#include <stdint.h>
#include <common/compiler.h>
struct parasite_ctl;
extern int __must_check compel_rpc_sync(unsigned int cmd, struct parasite_ctl *ctl);
extern int __must_check compel_rpc_call(unsigned int cmd, struct parasite_ctl *ctl);
extern int __must_check compel_rpc_call_sync(unsigned int cmd, struct parasite_ctl *ctl);
extern int compel_rpc_sock(struct parasite_ctl *ctl);
#define PARASITE_USER_CMDS 64
#endif
| 534 | 27.157895 | 89 |
h
|
criu
|
criu-master/compel/include/uapi/infect.h
|
#ifndef __COMPEL_INFECT_H__
#define __COMPEL_INFECT_H__
#include <stdbool.h>
#include <compel/asm/sigframe.h>
#include <compel/asm/infect-types.h>
#include <compel/ksigset.h>
#include <compel/handle-elf.h>
#include <compel/task-state.h>
#include "common/compiler.h"
#define PARASITE_START_AREA_MIN (4096)
extern int __must_check compel_interrupt_task(int pid);
struct seize_task_status {
unsigned long long sigpnd;
unsigned long long shdpnd;
unsigned long long sigblk;
char state;
int vpid;
int ppid;
int seccomp_mode;
};
extern int __must_check compel_wait_task(int pid, int ppid,
int (*get_status)(int pid, struct seize_task_status *, void *data),
void (*free_status)(int pid, struct seize_task_status *, void *data),
struct seize_task_status *st, void *data);
extern int __must_check compel_stop_task(int pid);
extern int __must_check compel_parse_stop_signo(int pid);
extern int compel_resume_task(pid_t pid, int orig_state, int state);
extern int compel_resume_task_sig(pid_t pid, int orig_state, int state, int stop_signo);
struct parasite_ctl;
struct parasite_thread_ctl;
extern struct parasite_ctl __must_check *compel_prepare(int pid);
extern struct parasite_ctl __must_check *compel_prepare_noctx(int pid);
extern int __must_check compel_infect(struct parasite_ctl *ctl, unsigned long nr_threads, unsigned long args_size);
extern int __must_check compel_infect_no_daemon(struct parasite_ctl *ctl, unsigned long nr_threads,
unsigned long args_size);
extern struct parasite_thread_ctl __must_check *compel_prepare_thread(struct parasite_ctl *ctl, int pid);
extern void compel_release_thread(struct parasite_thread_ctl *);
extern int __must_check compel_start_daemon(struct parasite_ctl *ctl);
extern int __must_check compel_stop_daemon(struct parasite_ctl *ctl);
extern int __must_check compel_cure_remote(struct parasite_ctl *ctl);
extern int __must_check compel_cure_local(struct parasite_ctl *ctl);
extern int __must_check compel_cure(struct parasite_ctl *ctl);
#define PARASITE_ARG_SIZE_MIN (1 << 12)
#define compel_parasite_args(ctl, type) \
({ \
void *___ret; \
BUILD_BUG_ON(sizeof(type) > PARASITE_ARG_SIZE_MIN); \
___ret = compel_parasite_args_p(ctl); \
___ret; \
})
extern void *compel_parasite_args_p(struct parasite_ctl *ctl);
extern void *compel_parasite_args_s(struct parasite_ctl *ctl, unsigned long args_size);
extern int __must_check compel_syscall(struct parasite_ctl *ctl, int nr, long *ret, unsigned long arg1,
unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5,
unsigned long arg6);
extern int __must_check compel_run_in_thread(struct parasite_thread_ctl *tctl, unsigned int cmd);
extern int __must_check compel_run_at(struct parasite_ctl *ctl, unsigned long ip, user_regs_struct_t *ret_regs);
/*
* The PTRACE_SYSCALL will trap task twice -- on
* enter into and on exit from syscall. If we trace
* a single task, we may skip half of all getregs
* calls -- on exit we don't need them.
*/
enum trace_flags {
TRACE_ALL,
TRACE_ENTER,
TRACE_EXIT,
};
extern int __must_check compel_stop_on_syscall(int tasks, int sys_nr, int sys_nr_compat);
extern int __must_check compel_stop_pie(pid_t pid, void *addr, bool no_bp);
extern int __must_check compel_unmap(struct parasite_ctl *ctl, unsigned long addr);
extern int compel_mode_native(struct parasite_ctl *ctl);
extern k_rtsigset_t *compel_task_sigmask(struct parasite_ctl *ctl);
extern k_rtsigset_t *compel_thread_sigmask(struct parasite_thread_ctl *tctl);
struct rt_sigframe;
typedef int (*open_proc_fn)(int pid, int mode, const char *fmt, ...) __attribute__((__format__(__printf__, 3, 4)));
typedef int (*save_regs_t)(void *, user_regs_struct_t *, user_fpregs_struct_t *);
typedef int (*make_sigframe_t)(void *, struct rt_sigframe *, struct rt_sigframe *, k_rtsigset_t *);
struct infect_ctx {
int sock;
/*
* Regs manipulation context.
*/
save_regs_t save_regs;
make_sigframe_t make_sigframe;
void *regs_arg;
unsigned long task_size;
unsigned long syscall_ip; /* entry point of infection */
unsigned long flags; /* fine-tune (e.g. faults) */
void (*child_handler)(int, siginfo_t *, void *); /* handler for SIGCHLD deaths */
struct sigaction orig_handler;
open_proc_fn open_proc;
int log_fd; /* fd for parasite code to send messages to */
};
extern struct infect_ctx *compel_infect_ctx(struct parasite_ctl *);
/* Don't use memfd() */
#define INFECT_NO_MEMFD (1UL << 0)
/* Make parasite connect() fail */
#define INFECT_FAIL_CONNECT (1UL << 1)
/* No breakpoints in pie tracking */
#define INFECT_NO_BREAKPOINTS (1UL << 2)
/* Can run parasite inside compat tasks */
#define INFECT_COMPATIBLE (1UL << 3)
/* Workaround for ptrace bug on Skylake CPUs with kernels older than v4.14 */
#define INFECT_X86_PTRACE_MXCSR_BUG (1UL << 4)
/* After infecting - corrupt extended registers (fault-injection) */
#define INFECT_CORRUPT_EXTREGS (1UL << 5)
/*
* There are several ways to describe a blob to compel
* library. The simplest one derived from criu is to
* provide it from .h files.
*/
#define COMPEL_BLOB_CHEADER 0x1
struct parasite_blob_desc {
unsigned parasite_type;
union {
struct {
const void *mem;
size_t bsize;
unsigned long parasite_ip_off;
unsigned long cmd_off;
unsigned long args_ptr_off;
unsigned long got_off;
unsigned long args_off;
unsigned long data_off;
compel_reloc_t *relocs;
unsigned int nr_relocs;
} hdr;
};
};
extern struct parasite_blob_desc *compel_parasite_blob_desc(struct parasite_ctl *);
extern int __must_check compel_get_thread_regs(struct parasite_thread_ctl *, save_regs_t, void *);
extern void compel_relocs_apply(void *mem, void *vbase, struct parasite_blob_desc *pbd);
extern void compel_relocs_apply_mips(void *mem, void *vbase, struct parasite_blob_desc *pbd);
extern unsigned long compel_task_size(void);
extern uint64_t compel_get_leader_sp(struct parasite_ctl *ctl);
extern uint64_t compel_get_thread_sp(struct parasite_thread_ctl *tctl);
extern uint64_t compel_get_leader_ip(struct parasite_ctl *ctl);
extern uint64_t compel_get_thread_ip(struct parasite_thread_ctl *tctl);
void compel_set_leader_ip(struct parasite_ctl *ctl, uint64_t v);
void compel_set_thread_ip(struct parasite_thread_ctl *tctl, uint64_t v);
extern void compel_get_stack(struct parasite_ctl *ctl, void **rstack, void **r_thread_stack);
#endif
| 6,595 | 34.462366 | 115 |
h
|
criu
|
criu-master/compel/include/uapi/loglevels.h
|
#ifndef UAPI_COMPEL_LOGLEVELS_H__
#define UAPI_COMPEL_LOGLEVELS_H__
/*
* Log levels used by compel itself (see compel_log_init()),
* also by log functions in the std plugin.
*/
enum __compel_log_levels {
COMPEL_LOG_MSG, /* Print message regardless of log level */
COMPEL_LOG_ERROR, /* Errors only, when we're in trouble */
COMPEL_LOG_WARN, /* Warnings */
COMPEL_LOG_INFO, /* Informative, everything is fine */
COMPEL_LOG_DEBUG, /* Debug only */
COMPEL_DEFAULT_LOGLEVEL = COMPEL_LOG_WARN
};
#endif /* UAPI_COMPEL_LOGLEVELS_H__ */
| 546 | 26.35 | 62 |
h
|
criu
|
criu-master/compel/include/uapi/plugins.h
|
#ifndef UAPI_COMPEL_PLUGIN_H__
#define UAPI_COMPEL_PLUGIN_H__
#define __init __attribute__((__used__)) __attribute__((__section__(".compel.init")))
#define __exit __attribute__((__used__)) __attribute__((__section__(".compel.exit")))
#ifndef __ASSEMBLY__
typedef struct {
const char *name;
int (*init)(void);
void (*exit)(void);
} plugin_init_t;
#define plugin_register(___desc) static const plugin_init_t *const ___ptr__##___desc __init = &___desc;
#define PLUGIN_REGISTER(___id, ___name, ___init, ___exit) \
static const plugin_init_t __plugin_desc_##___id = { \
.name = ___name, \
.init = ___init, \
.exit = ___exit, \
}; \
plugin_register(__plugin_desc_##___id);
#define PLUGIN_REGISTER_DUMMY(___id) \
static const plugin_init_t __plugin_desc_##___id = { \
.name = #___id, \
}; \
plugin_register(__plugin_desc_##___id);
#endif /* __ASSEMBLY__ */
#endif /* UAPI_COMPEL_PLUGIN_H__ */
| 1,151 | 32.882353 | 103 |
h
|
criu
|
criu-master/compel/include/uapi/ptrace.h
|
#ifndef UAPI_COMPEL_PTRACE_H__
#define UAPI_COMPEL_PTRACE_H__
#include "common/compiler.h"
/*
* We'd want to include both sys/ptrace.h and linux/ptrace.h,
* hoping that most definitions come from either one or another.
* Alas, on Alpine/musl both files declare struct ptrace_peeksiginfo_args,
* so there is no way they can be used together. Let's rely on libc one.
*/
#include <sys/ptrace.h>
#include <stdint.h>
#include <compel/asm/breakpoints.h>
/*
* Some constants for ptrace that might be missing from the
* standard library includes due to being (relatively) new.
*/
#ifndef PTRACE_SEIZE
#define PTRACE_SEIZE 0x4206
#endif
#ifndef PTRACE_O_SUSPEND_SECCOMP
#define PTRACE_O_SUSPEND_SECCOMP (1 << 21)
#endif
#ifndef PTRACE_INTERRUPT
#define PTRACE_INTERRUPT 0x4207
#endif
#ifndef PTRACE_PEEKSIGINFO
#define PTRACE_PEEKSIGINFO 0x4209
/* Read signals from a shared (process wide) queue */
#define PTRACE_PEEKSIGINFO_SHARED (1 << 0)
#endif
#ifndef PTRACE_GETREGSET
#define PTRACE_GETREGSET 0x4204
#define PTRACE_SETREGSET 0x4205
#endif
#ifndef PTRACE_GETSIGMASK
#define PTRACE_GETSIGMASK 0x420a
#define PTRACE_SETSIGMASK 0x420b
#endif
#ifndef PTRACE_SECCOMP_GET_FILTER
#define PTRACE_SECCOMP_GET_FILTER 0x420c
#endif
#ifndef PTRACE_SECCOMP_GET_METADATA
#define PTRACE_SECCOMP_GET_METADATA 0x420d
#endif /* PTRACE_SECCOMP_GET_METADATA */
/*
* struct seccomp_metadata is not yet
* settled down well in headers so use
* own identical definition for a while.
*/
typedef struct {
uint64_t filter_off; /* Input: which filter */
uint64_t flags; /* Output: filter's flags */
} seccomp_metadata_t;
#ifndef PTRACE_GET_RSEQ_CONFIGURATION
#define PTRACE_GET_RSEQ_CONFIGURATION 0x420f
struct __ptrace_rseq_configuration {
uint64_t rseq_abi_pointer;
uint32_t rseq_abi_size;
uint32_t signature;
uint32_t flags;
uint32_t pad;
};
#endif
#ifdef PTRACE_EVENT_STOP
#if PTRACE_EVENT_STOP == 7 /* Bad value from Linux 3.1-3.3, fixed in 3.4 */
#undef PTRACE_EVENT_STOP
#endif
#endif
#ifndef PTRACE_EVENT_STOP
#define PTRACE_EVENT_STOP 128
#endif
extern int ptrace_suspend_seccomp(pid_t pid);
extern int __must_check ptrace_peek_area(pid_t pid, void *dst, void *addr, long bytes);
extern int __must_check ptrace_poke_area(pid_t pid, void *src, void *addr, long bytes);
extern int __must_check ptrace_swap_area(pid_t pid, void *dst, void *src, long bytes);
#endif /* UAPI_COMPEL_PTRACE_H__ */
| 2,411 | 24.125 | 87 |
h
|
criu
|
criu-master/compel/include/uapi/sigframe-common.h
|
/*
* Don't include it directly but use "arch-sigframe.h" instead.
*/
#ifndef UAPI_COMPEL_SIGFRAME_COMMON_H__
#define UAPI_COMPEL_SIGFRAME_COMMON_H__
#ifndef UAPI_COMPEL_ASM_SIGFRAME_H__
#error "Direct inclusion is forbidden, use <compel/asm/sigframe.h> instead"
#endif
#include "common/compiler.h"
#include <signal.h>
#include <compel/plugins/std/asm/syscall-types.h>
struct rt_sigframe;
#ifndef SIGFRAME_MAX_OFFSET
#define SIGFRAME_MAX_OFFSET RT_SIGFRAME_OFFSET(0)
#endif
#define RESTORE_STACK_ALIGN(x, a) (((x) + (a)-1) & ~((a)-1))
/* sigframe should be aligned on 64 byte for x86 and 8 bytes for arm */
#define RESTORE_STACK_SIGFRAME RESTORE_STACK_ALIGN(sizeof(struct rt_sigframe) + SIGFRAME_MAX_OFFSET, 64)
#ifndef __ARCH_SI_PREAMBLE_SIZE
#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
#endif
#define SI_MAX_SIZE 128
#ifndef SI_PAD_SIZE
#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
#endif
typedef struct rt_siginfo {
int si_signo;
int si_errno;
int si_code;
int _pad[SI_PAD_SIZE];
} rt_siginfo_t;
typedef struct rt_sigaltstack {
void *ss_sp;
int ss_flags;
size_t ss_size;
} rt_stack_t;
struct rt_ucontext {
unsigned long uc_flags;
struct rt_ucontext *uc_link;
rt_stack_t uc_stack;
struct rt_sigcontext uc_mcontext;
k_rtsigset_t uc_sigmask; /* mask last for extensibility */
int _unused[32 - (sizeof(k_rtsigset_t) / sizeof(int))];
unsigned long uc_regspace[128] __attribute__((aligned(8)));
};
extern int __must_check sigreturn_prep_fpu_frame(struct rt_sigframe *frame, struct rt_sigframe *rframe);
#endif /* UAPI_COMPEL_SIGFRAME_COMMON_H__ */
| 1,611 | 25 | 104 |
h
|
criu
|
criu-master/compel/plugins/include/uapi/std/infect.h
|
#ifndef COMPEL_PLUGIN_STD_INFECT_H__
#define COMPEL_PLUGIN_STD_INFECT_H__
#include "common/compiler.h"
extern int parasite_get_rpc_sock(void);
extern unsigned int __export_parasite_service_cmd;
extern void *__export_parasite_service_args_ptr;
extern int __must_check parasite_service(void);
/*
* Must be supplied by user plugins.
*/
extern int __must_check parasite_daemon_cmd(int cmd, void *args);
extern int __must_check parasite_trap_cmd(int cmd, void *args);
extern void parasite_cleanup(void);
/*
* FIXME: Should be supplied by log module.
*/
extern void log_set_fd(int fd);
extern void log_set_loglevel(unsigned int level);
#endif /* COMPEL_PLUGIN_STD_INFECT_H__ */
| 682 | 25.269231 | 65 |
h
|
criu
|
criu-master/compel/plugins/include/uapi/std/log.h
|
#ifndef COMPEL_PLUGIN_STD_LOG_H__
#define COMPEL_PLUGIN_STD_LOG_H__
#include "compel/loglevels.h"
#include "common/compiler.h"
#define STD_LOG_SIMPLE_CHUNK 256
extern void std_log_set_fd(int fd);
extern void std_log_set_loglevel(enum __compel_log_levels level);
extern void std_log_set_start(struct timeval *tv);
/*
* Provides a function to get time *in the infected task* for log timings.
* Expected use-case: address on the vdso page to get time.
* If not set or called with NULL - compel will use raw syscall,
* which requires enter in the kernel and as a result affects performance.
*/
typedef int (*gettimeofday_t)(struct timeval *tv, struct timezone *tz);
extern void std_log_set_gettimeofday(gettimeofday_t gtod);
/* std plugin helper to get time (hopefully, efficiently) */
extern int std_gettimeofday(struct timeval *tv, struct timezone *tz);
extern int std_vprint_num(char *buf, int blen, int num, char **ps);
extern void std_sprintf(char output[STD_LOG_SIMPLE_CHUNK], const char *format, ...)
__attribute__((__format__(__printf__, 2, 3)));
extern void print_on_level(unsigned int loglevel, const char *format, ...)
__attribute__((__format__(__printf__, 2, 3)));
#endif /* COMPEL_PLUGIN_STD_LOG_H__ */
| 1,225 | 38.548387 | 83 |
h
|
criu
|
criu-master/compel/plugins/include/uapi/std/string.h
|
#ifndef COMPEL_PLUGIN_STD_STRING_H__
#define COMPEL_PLUGIN_STD_STRING_H__
#include <sys/types.h>
#include <stdbool.h>
#include <stdarg.h>
/* Standard file descriptors. */
#define STDIN_FILENO 0 /* Standard input. */
#define STDOUT_FILENO 1 /* Standard output. */
#define STDERR_FILENO 2 /* Standard error output. */
extern void std_dputc(int fd, char c);
extern void std_dputs(int fd, const char *s);
extern void std_vdprintf(int fd, const char *format, va_list args);
extern void std_dprintf(int fd, const char *format, ...) __attribute__((__format__(__printf__, 2, 3)));
#define std_printf(fmt, ...) std_dprintf(STDOUT_FILENO, fmt, ##__VA_ARGS__)
#define std_puts(s) std_dputs(STDOUT_FILENO, s)
#define std_putchar(c) std_dputc(STDOUT_FILENO, c)
extern unsigned long std_strtoul(const char *nptr, char **endptr, int base);
extern int std_strcmp(const char *cs, const char *ct);
extern int std_strncmp(const char *cs, const char *ct, size_t n);
extern void *memcpy(void *dest, const void *src, size_t n);
extern int memcmp(const void *s1, const void *s2, size_t n);
extern void *memset(void *s, int c, size_t n);
#endif /* COMPEL_PLUGIN_STD_STRING_H__ */
| 1,178 | 37.032258 | 103 |
h
|
criu
|
criu-master/compel/src/main.c
|
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <unistd.h>
#include <stdint.h>
#include <getopt.h>
#include <string.h>
#include <ctype.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include "version.h"
#include "piegen.h"
#include "log.h"
#define CFLAGS_DEFAULT_SET \
"-Wstrict-prototypes " \
"-ffreestanding " \
"-fno-stack-protector -nostdlib -fomit-frame-pointer "
#define COMPEL_CFLAGS_PIE CFLAGS_DEFAULT_SET "-fpie"
#define COMPEL_CFLAGS_NOPIC CFLAGS_DEFAULT_SET "-fno-pic"
#ifdef NO_RELOCS
#define COMPEL_LDFLAGS_COMMON "-z noexecstack -T "
#else
#define COMPEL_LDFLAGS_COMMON "-r -z noexecstack -T "
#endif
typedef struct {
const char *arch; // dir name under arch/
const char *cflags;
const char *cflags_compat;
} flags_t;
static const flags_t flags = {
#if defined CONFIG_X86_64
.arch = "x86",
.cflags = COMPEL_CFLAGS_PIE,
.cflags_compat = COMPEL_CFLAGS_NOPIC,
#elif defined CONFIG_AARCH64
.arch = "aarch64",
.cflags = COMPEL_CFLAGS_PIE,
#elif defined(CONFIG_ARMV6) || defined(CONFIG_ARMV7)
.arch = "arm",
.cflags = COMPEL_CFLAGS_PIE,
#elif defined CONFIG_PPC64
.arch = "ppc64",
.cflags = COMPEL_CFLAGS_PIE,
#elif defined CONFIG_S390
.arch = "s390",
.cflags = COMPEL_CFLAGS_PIE,
#elif defined CONFIG_MIPS
.arch = "mips",
.cflags = COMPEL_CFLAGS_PIE,
#else
#error "CONFIG_<ARCH> not defined, or unsupported ARCH"
#endif
};
piegen_opt_t opts = {};
const char *uninst_root;
static int piegen(void)
{
struct stat st;
void *mem;
int fd, ret = -1;
fd = open(opts.input_filename, O_RDONLY);
if (fd < 0) {
pr_perror("Can't open file %s", opts.input_filename);
return -1;
}
if (fstat(fd, &st)) {
pr_perror("Can't stat file %s", opts.input_filename);
goto err;
}
opts.fout = fopen(opts.output_filename, "w");
if (opts.fout == NULL) {
pr_perror("Can't open %s", opts.output_filename);
goto err;
}
mem = mmap(NULL, st.st_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FILE, fd, 0);
if (mem == MAP_FAILED) {
pr_perror("Can't mmap file %s", opts.input_filename);
goto err;
}
if (handle_binary(mem, st.st_size)) {
unlink(opts.output_filename);
goto err;
}
ret = 0;
err:
close(fd);
if (opts.fout)
fclose(opts.fout);
if (!ret)
pr_info("%s generated successfully.\n", opts.output_filename);
return ret;
}
static void cli_log(unsigned int lvl, const char *fmt, va_list parms)
{
FILE *f = stdout;
if (pr_quelled(lvl))
return;
if ((lvl == COMPEL_LOG_ERROR) || (lvl == COMPEL_LOG_WARN))
f = stderr;
vfprintf(f, fmt, parms);
}
static int usage(int rc)
{
FILE *out = (rc == 0) ? stdout : stderr;
fprintf(out,
"Usage:\n"
" compel [--compat] includes | cflags | ldflags\n"
" compel plugins [PLUGIN_NAME ...]\n"
" compel [--compat] [--static] libs\n"
" compel -f FILE -o FILE [-p NAME] [-l N] hgen\n"
" -f, --file FILE input (parasite object) file name\n"
" -o, --output FILE output (header) file name\n"
" -p, --prefix NAME prefix for var names\n"
" -l, --log-level NUM log level (default: %d)\n"
" compel -h|--help\n"
" compel -V|--version\n",
COMPEL_DEFAULT_LOGLEVEL);
return rc;
}
static void print_includes(void)
{
int i;
/* list of standard include dirs (built into C preprocessor) */
const char *standard_includes[] = {
"/usr/include",
"/usr/local/include",
};
/* I am not installed, called via a wrapper */
if (uninst_root) {
printf("-I %s/include/uapi\n", uninst_root);
return;
}
/* I am installed
* Make sure to not print banalities */
for (i = 0; i < ARRAY_SIZE(standard_includes); i++)
if (strcmp(INCLUDEDIR, standard_includes[i]) == 0)
return;
/* Finally, print our non-standard include path */
printf("%s\n", "-I " INCLUDEDIR);
}
static void print_cflags(bool compat)
{
printf("%s\n", compat ? flags.cflags_compat : flags.cflags);
print_includes();
}
static void print_ldflags(bool compat)
{
const char *compat_str = (compat) ? "-compat" : "";
printf("%s", COMPEL_LDFLAGS_COMMON);
if (uninst_root) {
printf("%s/arch/%s/scripts/compel-pack%s.lds.S\n", uninst_root, flags.arch, compat_str);
} else {
printf("%s/compel/scripts/compel-pack%s.lds.S\n", LIBEXECDIR, compat_str);
}
}
static void print_plugin(const char *name)
{
const char suffix[] = ".lib.a";
if (uninst_root)
printf("%s/plugins/%s%s\n", uninst_root, name, suffix);
else
printf("%s/compel/%s%s\n", LIBEXECDIR, name, suffix);
}
static void print_plugins(char *const list[])
{
char *builtin_list[] = { "std", NULL };
char **p = builtin_list;
while (*p != NULL)
print_plugin(*p++);
while (*list != NULL)
print_plugin(*list++);
}
static int print_libs(bool is_static)
{
if (uninst_root) {
if (!is_static) {
fprintf(stderr, "Compel is not installed, can "
"only link with static libraries "
"(use --static)\n");
return 1;
}
printf("%s/%s\n", uninst_root, STATIC_LIB);
} else {
printf("%s/%s\n", LIBDIR, (is_static) ? STATIC_LIB : DYN_LIB);
}
return 0;
}
/* Extracts the file name (removing directory path and suffix,
* and checks the result for being a valid C identifier
* (replacing - with _ along the way).
*
* If everything went fine, return the resulting string,
* otherwise NULL.
*
* Example: get_prefix("./some/path/to/file.c") ==> "file"
*/
static char *gen_prefix(const char *path)
{
const char *p1 = NULL, *p2 = NULL;
size_t len;
int i;
char *p, *ret;
len = strlen(path);
if (len == 0)
return NULL;
// Find the last slash (p1)
// and the first dot after it (p2)
for (i = len - 1; i >= 0; i--) {
if (!p1 && path[i] == '.') {
p2 = path + i - 1;
} else if (!p1 && path[i] == '/') {
p1 = path + i + 1;
break;
}
}
if (!p1) // no slash in path
p1 = path;
if (!p2) // no dot (after slash)
p2 = path + len;
len = p2 - p1 + 1;
if (len < 1)
return NULL;
ret = strndup(p1, len);
// Now, check if we got a valid C identifier. We don't need to care
// about C reserved keywords, as this is only used as a prefix.
for (p = ret; *p != '\0'; p++) {
if (isalpha(*p))
continue;
// digit is fine, except the first character
if (isdigit(*p) && p > ret)
continue;
// only allowed special character is _
if (*p == '_')
continue;
// as a courtesy, replace - with _
if (*p == '-') {
*p = '_';
continue;
}
// invalid character!
free(ret);
return NULL;
}
return ret;
}
int main(int argc, char *argv[])
{
int log_level = COMPEL_DEFAULT_LOGLEVEL;
bool compat = false;
bool is_static = false;
int opt, idx;
char *action;
static const char short_opts[] = "csf:o:p:hVl:";
static struct option long_opts[] = {
{ "compat", no_argument, 0, 'c' },
{ "static", no_argument, 0, 's' },
{ "file", required_argument, 0, 'f' },
{ "output", required_argument, 0, 'o' },
{ "prefix", required_argument, 0, 'p' },
{ "help", no_argument, 0, 'h' },
{ "version", no_argument, 0, 'V' },
{ "log-level", required_argument, 0, 'l' },
{},
};
uninst_root = getenv("COMPEL_UNINSTALLED_ROOTDIR");
while (1) {
idx = -1;
opt = getopt_long(argc, argv, short_opts, long_opts, &idx);
if (opt == -1)
break;
switch (opt) {
case 'c':
compat = true;
break;
case 's':
is_static = true;
break;
case 'f':
opts.input_filename = optarg;
break;
case 'o':
opts.output_filename = optarg;
break;
case 'p':
opts.prefix = optarg;
break;
case 'l':
log_level = atoi(optarg);
break;
case 'h':
return usage(0);
case 'V':
printf("Version: %d.%d.%d\n", COMPEL_SO_VERSION_MAJOR, COMPEL_SO_VERSION_MINOR,
COMPEL_SO_VERSION_SUBLEVEL);
exit(0);
default: // '?'
// error message already printed by getopt_long()
return usage(1);
}
}
if (optind >= argc) {
fprintf(stderr, "Error: action argument required\n");
return usage(1);
}
action = argv[optind++];
if (!strcmp(action, "includes")) {
print_includes();
return 0;
}
if (!strcmp(action, "cflags")) {
print_cflags(compat);
return 0;
}
if (!strcmp(action, "ldflags")) {
print_ldflags(compat);
return 0;
}
if (!strcmp(action, "plugins")) {
print_plugins(argv + optind);
return 0;
}
if (!strcmp(action, "libs")) {
return print_libs(is_static);
}
if (!strcmp(action, "hgen")) {
if (!opts.input_filename) {
fprintf(stderr, "Error: option --file required\n");
return usage(1);
}
if (!opts.output_filename) {
fprintf(stderr, "Error: option --output required\n");
return usage(1);
}
if (!opts.prefix) {
// prefix not provided, let's autogenerate
opts.prefix = gen_prefix(opts.input_filename);
if (!opts.prefix)
opts.prefix = gen_prefix(opts.output_filename);
if (!opts.prefix) {
fprintf(stderr, "Error: can't autogenerate "
"prefix (supply --prefix)");
return 2;
}
}
compel_log_init(&cli_log, log_level);
return piegen();
}
fprintf(stderr, "Error: unknown action '%s'\n", action);
return usage(1);
}
| 8,962 | 20.807786 | 90 |
c
|
criu
|
criu-master/compel/src/lib/handle-elf.c
|
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <unistd.h>
#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include "handle-elf.h"
#include "piegen.h"
#include "log.h"
#ifdef CONFIG_MIPS
#include "ldsodefs.h"
#endif
/* Check if pointer is out-of-bound */
static bool __ptr_oob(const uintptr_t ptr, const uintptr_t start, const size_t size)
{
uintptr_t end = start + size;
return ptr >= end || ptr < start;
}
/* Check if pointed structure's end is out-of-bound */
static bool __ptr_struct_end_oob(const uintptr_t ptr, const size_t struct_size, const uintptr_t start,
const size_t size)
{
/* the last byte of the structure should be inside [begin, end) */
return __ptr_oob(ptr + struct_size - 1, start, size);
}
/* Check if pointed structure is out-of-bound */
static bool __ptr_struct_oob(const uintptr_t ptr, const size_t struct_size, const uintptr_t start, const size_t size)
{
return __ptr_oob(ptr, start, size) || __ptr_struct_end_oob(ptr, struct_size, start, size);
}
static bool test_pointer(const void *ptr, const void *start, const size_t size, const char *name, const char *file,
const int line)
{
if (__ptr_oob((const uintptr_t)ptr, (const uintptr_t)start, size)) {
pr_err("Corrupted pointer %p (%s) at %s:%d\n", ptr, name, file, line);
return true;
}
return false;
}
#define ptr_func_exit(__ptr) \
do { \
if (test_pointer((__ptr), mem, size, #__ptr, __FILE__, __LINE__)) { \
free(sec_hdrs); \
return -1; \
} \
} while (0)
#ifdef ELF_PPC64
static int do_relative_toc(long value, uint16_t *location, unsigned long mask, int complain_signed)
{
if (complain_signed && (value + 0x8000 > 0xffff)) {
pr_err("TOC16 relocation overflows (%ld)\n", value);
return -1;
}
if ((~mask & 0xffff) & value) {
pr_err("bad TOC16 relocation (%ld) (0x%lx)\n", value, (~mask & 0xffff) & value);
return -1;
}
*location = (*location & ~mask) | (value & mask);
return 0;
}
#endif
static bool is_header_supported(Elf_Ehdr *hdr)
{
if (!arch_is_machine_supported(hdr->e_machine))
return false;
if ((hdr->e_type != ET_REL
#ifdef NO_RELOCS
&& hdr->e_type != ET_EXEC
#endif
) ||
hdr->e_version != EV_CURRENT)
return false;
return true;
}
static const char *get_strings_section(Elf_Ehdr *hdr, uintptr_t mem, size_t size)
{
size_t sec_table_size = ((size_t)hdr->e_shentsize) * hdr->e_shnum;
uintptr_t sec_table = mem + hdr->e_shoff;
Elf_Shdr *secstrings_hdr;
uintptr_t addr;
if (__ptr_struct_oob(sec_table, sec_table_size, mem, size)) {
pr_err("Section table [%#zx, %#zx) is out of [%#zx, %#zx)\n", sec_table, sec_table + sec_table_size,
mem, mem + size);
return NULL;
}
/*
* strings section header's offset in section headers table is
* (size of section header * index of string section header)
*/
addr = sec_table + ((size_t)hdr->e_shentsize) * hdr->e_shstrndx;
if (__ptr_struct_oob(addr, sizeof(Elf_Shdr), sec_table, sec_table_size)) {
pr_err("String section header @%#zx is out of [%#zx, %#zx)\n", addr, sec_table,
sec_table + sec_table_size);
return NULL;
}
secstrings_hdr = (void *)addr;
addr = mem + secstrings_hdr->sh_offset;
if (__ptr_struct_oob(addr, secstrings_hdr->sh_size, mem, size)) {
pr_err("String section @%#zx size %#lx is out of [%#zx, %#zx)\n", addr,
(unsigned long)secstrings_hdr->sh_size, mem, mem + size);
return NULL;
}
return (void *)addr;
}
/*
* This name @__handle_elf get renamed into
* @handle_elf_ppc64 or say @handle_elf_x86_64
* depending on the architecture it's compiled
* under.
*/
int __handle_elf(void *mem, size_t size)
{
const char *symstrings = NULL;
Elf_Shdr *symtab_hdr = NULL;
Elf_Sym *symbols = NULL;
Elf_Ehdr *hdr = mem;
Elf_Shdr *strtab_hdr = NULL;
Elf_Shdr **sec_hdrs = NULL;
const char *secstrings;
size_t i, k, nr_gotpcrel = 0;
#ifdef ELF_PPC64
int64_t toc_offset = 0;
#endif
int ret = -EINVAL;
unsigned long data_off = 0;
pr_debug("Header\n");
pr_debug("------------\n");
pr_debug("\ttype 0x%x machine 0x%x version 0x%x\n", (unsigned)hdr->e_type, (unsigned)hdr->e_machine,
(unsigned)hdr->e_version);
if (!is_header_supported(hdr)) {
pr_err("Unsupported header detected\n");
goto err;
}
sec_hdrs = malloc(sizeof(*sec_hdrs) * hdr->e_shnum);
if (!sec_hdrs) {
pr_err("No memory for section headers\n");
ret = -ENOMEM;
goto err;
}
secstrings = get_strings_section(hdr, (uintptr_t)mem, size);
if (!secstrings)
goto err;
pr_debug("Sections\n");
pr_debug("------------\n");
for (i = 0; i < hdr->e_shnum; i++) {
Elf_Shdr *sh = mem + hdr->e_shoff + hdr->e_shentsize * i;
ptr_func_exit(sh);
if (sh->sh_type == SHT_SYMTAB)
symtab_hdr = sh;
ptr_func_exit(&secstrings[sh->sh_name]);
pr_debug("\t index %-2zd type 0x%-2x name %s\n", i, (unsigned)sh->sh_type, &secstrings[sh->sh_name]);
sec_hdrs[i] = sh;
#ifdef ELF_PPC64
if (!strcmp(&secstrings[sh->sh_name], ".toc")) {
toc_offset = sh->sh_addr + 0x8000;
pr_debug("\t\tTOC offset 0x%lx\n", toc_offset);
}
#endif
}
/* Calculate section addresses with proper alignment.
* Note: some but not all linkers precalculate this information.
*/
for (i = 0, k = 0; i < hdr->e_shnum; i++) {
Elf_Shdr *sh = sec_hdrs[i];
if (!(sh->sh_flags & SHF_ALLOC))
continue;
if (sh->sh_addralign > 0 && k % sh->sh_addralign != 0) {
k += sh->sh_addralign - k % sh->sh_addralign;
}
if (sh->sh_addr && sh->sh_addr != k)
pr_info("Overriding unexpected precalculated address of section (section %s addr 0x%lx expected 0x%lx)\n",
&secstrings[sh->sh_name], (unsigned long)sh->sh_addr, (unsigned long)k);
sh->sh_addr = k;
k += sh->sh_size;
}
if (!symtab_hdr) {
pr_err("No symbol table present\n");
goto err;
}
if (!symtab_hdr->sh_link || symtab_hdr->sh_link >= hdr->e_shnum) {
pr_err("Corrupted symtab header\n");
goto err;
}
pr_debug("Symbols\n");
pr_debug("------------\n");
strtab_hdr = sec_hdrs[symtab_hdr->sh_link];
ptr_func_exit(strtab_hdr);
symbols = mem + symtab_hdr->sh_offset;
ptr_func_exit(symbols);
symstrings = mem + strtab_hdr->sh_offset;
ptr_func_exit(symstrings);
if (sizeof(*symbols) != symtab_hdr->sh_entsize) {
pr_err("Symbol table align differ\n");
goto err;
}
pr_out("/* Autogenerated from %s */\n", opts.input_filename);
pr_out("#include <compel/infect.h>\n");
for (i = 0; i < symtab_hdr->sh_size / symtab_hdr->sh_entsize; i++) {
Elf_Sym *sym = &symbols[i];
const char *name;
Elf_Shdr *sh_src;
ptr_func_exit(sym);
name = &symstrings[sym->st_name];
ptr_func_exit(name);
if (!*name)
continue;
pr_debug("\ttype 0x%-2x bind 0x%-2x shndx 0x%-4x value 0x%-2lx name %s\n",
(unsigned)ELF_ST_TYPE(sym->st_info), (unsigned)ELF_ST_BIND(sym->st_info),
(unsigned)sym->st_shndx, (unsigned long)sym->st_value, name);
#ifdef ELF_PPC64
if (!sym->st_value && !strncmp(name, ".TOC.", 6)) {
if (!toc_offset) {
pr_err("No TOC pointer\n");
goto err;
}
sym->st_value = toc_offset;
continue;
}
#endif
if (strncmp(name, "__export", 8))
continue;
if ((sym->st_shndx && sym->st_shndx < hdr->e_shnum) || sym->st_shndx == SHN_ABS) {
if (sym->st_shndx == SHN_ABS) {
sh_src = NULL;
} else {
sh_src = sec_hdrs[sym->st_shndx];
ptr_func_exit(sh_src);
}
pr_out("#define %s_sym%s 0x%lx\n", opts.prefix, name,
(unsigned long)(sym->st_value + (sh_src ? sh_src->sh_addr : 0)));
}
}
pr_out("static __maybe_unused compel_reloc_t %s_relocs[] = {\n", opts.prefix);
#ifndef NO_RELOCS
pr_debug("Relocations\n");
pr_debug("------------\n");
for (i = 0; i < hdr->e_shnum; i++) {
Elf_Shdr *sh = sec_hdrs[i];
Elf_Shdr *sh_rel;
if (sh->sh_type != SHT_REL && sh->sh_type != SHT_RELA)
continue;
sh_rel = sec_hdrs[sh->sh_info];
ptr_func_exit(sh_rel);
pr_debug("\tsection %2zd type 0x%-2x link 0x%-2x info 0x%-2x name %s\n", i, (unsigned)sh->sh_type,
(unsigned)sh->sh_link, (unsigned)sh->sh_info, &secstrings[sh->sh_name]);
for (k = 0; k < sh->sh_size / sh->sh_entsize; k++) {
int64_t __maybe_unused addend64, __maybe_unused value64;
int32_t __maybe_unused addend32, __maybe_unused value32;
unsigned long place;
const char *name;
void *where;
Elf_Sym *sym;
union {
Elf_Rel rel;
Elf_Rela rela;
} *r = mem + sh->sh_offset + sh->sh_entsize * k;
ptr_func_exit(r);
sym = &symbols[ELF_R_SYM(r->rel.r_info)];
ptr_func_exit(sym);
name = &symstrings[sym->st_name];
ptr_func_exit(name);
where = mem + sh_rel->sh_offset + r->rel.r_offset;
ptr_func_exit(where);
pr_debug("\t\tr_offset 0x%-4lx r_info 0x%-4lx / sym 0x%-2lx type 0x%-2lx symsecoff 0x%-4lx\n",
(unsigned long)r->rel.r_offset, (unsigned long)r->rel.r_info,
(unsigned long)ELF_R_SYM(r->rel.r_info), (unsigned long)ELF_R_TYPE(r->rel.r_info),
(unsigned long)sh_rel->sh_addr);
if (sym->st_shndx == SHN_UNDEF) {
#ifdef ELF_PPC64
/* On PowerPC, TOC symbols appear to be
* undefined but should be processed as well.
* Their type is STT_NOTYPE, so report any
* other one.
*/
if (ELF32_ST_TYPE(sym->st_info) != STT_NOTYPE || strncmp(name, ".TOC.", 6)) {
pr_err("Unexpected undefined symbol:%s\n", name);
goto err;
}
#else
pr_err("Unexpected undefined symbol: `%s'. External symbol in PIE?\n", name);
goto err;
#endif
} else if (sym->st_shndx == SHN_COMMON) {
/*
* To support COMMON symbols, we could
* allocate these variables somewhere,
* perhaps somewhere near the GOT table.
* For now, we punt.
*/
pr_err("Unsupported COMMON symbol: `%s'. Try initializing the variable\n", name);
goto err;
}
if (sh->sh_type == SHT_REL) {
addend32 = *(int32_t *)where;
addend64 = *(int64_t *)where;
} else {
addend32 = (int32_t)r->rela.r_addend;
addend64 = (int64_t)r->rela.r_addend;
}
place = sh_rel->sh_addr + r->rel.r_offset;
pr_debug("\t\t\tvalue 0x%-8lx addend32 %-4d addend64 %-8ld place %-8lx symname %s\n",
(unsigned long)sym->st_value, addend32, (long)addend64, (long)place, name);
if (sym->st_shndx == SHN_ABS) {
value32 = (int32_t)sym->st_value;
value64 = (int64_t)sym->st_value;
} else {
Elf_Shdr *sh_src;
if ((unsigned)sym->st_shndx > (unsigned)hdr->e_shnum) {
pr_err("Unexpected symbol section index %u/%u\n", (unsigned)sym->st_shndx,
hdr->e_shnum);
goto err;
}
sh_src = sec_hdrs[sym->st_shndx];
ptr_func_exit(sh_src);
value32 = (int32_t)sh_src->sh_addr + (int32_t)sym->st_value;
value64 = (int64_t)sh_src->sh_addr + (int64_t)sym->st_value;
}
#ifdef ELF_PPC64
/*
* Snippet from the OpenPOWER ABI for Linux Supplement:
*
* The OpenPOWER ABI uses the three most-significant bits in the symbol
* st_other field specifies the number of instructions between a function's
* global entry point and local entry point. The global entry point is used
* when it is necessary to set up the TOC pointer (r2) for the function. The
* local entry point is used when r2 is known to already be valid for the
* function. A value of zero in these bits asserts that the function does
* not use r2.
*
* The st_other values have the following meanings:
* 0 and 1, the local and global entry points are the same.
* 2, the local entry point is at 1 instruction past the global entry point.
* 3, the local entry point is at 2 instructions past the global entry point.
* 4, the local entry point is at 4 instructions past the global entry point.
* 5, the local entry point is at 8 instructions past the global entry point.
* 6, the local entry point is at 16 instructions past the global entry point.
* 7, reserved.
*
* Here we are only handle the case '3' which is the most commonly seen.
*/
#define LOCAL_OFFSET(s) ((s->st_other >> 5) & 0x7)
if (LOCAL_OFFSET(sym)) {
if (LOCAL_OFFSET(sym) != 3) {
pr_err("Unexpected local offset value %d\n", LOCAL_OFFSET(sym));
goto err;
}
pr_debug("\t\t\tUsing local offset\n");
value64 += 8;
value32 += 8;
}
#endif
switch (ELF_R_TYPE(r->rel.r_info)) {
#ifdef CONFIG_MIPS
case R_MIPS_PC16:
/* s+a-p relative */
*((int32_t *)where) = *((int32_t *)where) | ((value32 + addend32 - place) >> 2);
break;
case R_MIPS_26:
/* local : (((A << 2) | (P & 0xf0000000) + S) >> 2
* external : (sign–extend(A < 2) + S) >> 2
*/
if (((unsigned)ELF_ST_BIND(sym->st_info) == 0x1) ||
((unsigned)ELF_ST_BIND(sym->st_info) == 0x2)) {
/* bind type local is 0x0 ,global is 0x1,WEAK is 0x2 */
addend32 = value32;
}
pr_out(" { .offset = 0x%-8x, .type = COMPEL_TYPE_MIPS_26, "
".addend = %-8d, .value = 0x%-16x, }, /* R_MIPS_26 */\n",
(unsigned int)place, addend32, value32);
break;
case R_MIPS_32:
/* S+A */
break;
case R_MIPS_64:
pr_out(" { .offset = 0x%-8x, .type = COMPEL_TYPE_MIPS_64, "
".addend = %-8ld, .value = 0x%-16lx, }, /* R_MIPS_64 */\n",
(unsigned int)place, (long)addend64, (long)value64);
break;
case R_MIPS_HIGHEST:
pr_out(" { .offset = 0x%-8x, .type = COMPEL_TYPE_MIPS_HIGHEST, "
".addend = %-8d, .value = 0x%-16x, }, /* R_MIPS_HIGHEST */\n",
(unsigned int)place, addend32, value32);
break;
case R_MIPS_HIGHER:
pr_out(" { .offset = 0x%-8x, .type = COMPEL_TYPE_MIPS_HIGHER, "
".addend = %-8d, .value = 0x%-16x, }, /* R_MIPS_HIGHER */\n",
(unsigned int)place, addend32, value32);
break;
case R_MIPS_HI16:
pr_out(" { .offset = 0x%-8x, .type = COMPEL_TYPE_MIPS_HI16, "
".addend = %-8d, .value = 0x%-16x, }, /* R_MIPS_HI16 */\n",
(unsigned int)place, addend32, value32);
break;
case R_MIPS_LO16:
if ((unsigned)ELF_ST_BIND(sym->st_info) == 0x1) {
/* bind type local is 0x0 ,global is 0x1 */
addend32 = value32;
}
pr_out(" { .offset = 0x%-8x, .type = COMPEL_TYPE_MIPS_LO16, "
".addend = %-8d, .value = 0x%-16x, }, /* R_MIPS_LO16 */\n",
(unsigned int)place, addend32, value32);
break;
#endif
#ifdef ELF_PPC64
case R_PPC64_REL24:
/* Update PC relative offset, linker has not done this yet */
pr_debug("\t\t\tR_PPC64_REL24 at 0x%-4lx val 0x%lx\n", place, value64);
/* Convert value to relative */
value64 -= place;
if (value64 + 0x2000000 > 0x3ffffff || (value64 & 3) != 0) {
pr_err("REL24 %li out of range!\n", (long int)value64);
goto err;
}
/* Only replace bits 2 through 26 */
*(uint32_t *)where = (*(uint32_t *)where & ~0x03fffffc) | (value64 & 0x03fffffc);
break;
case R_PPC64_ADDR32:
case R_PPC64_REL32:
pr_debug("\t\t\tR_PPC64_ADDR32 at 0x%-4lx val 0x%x\n", place,
(unsigned int)(value32 + addend32));
pr_out(" { .offset = 0x%-8x, .type = COMPEL_TYPE_INT, "
" .addend = %-8d, .value = 0x%-16x, "
"}, /* R_PPC64_ADDR32 */\n",
(unsigned int)place, addend32, value32);
break;
case R_PPC64_ADDR64:
case R_PPC64_REL64:
pr_debug("\t\t\tR_PPC64_ADDR64 at 0x%-4lx val 0x%lx\n", place, value64 + addend64);
pr_out("\t{ .offset = 0x%-8x, .type = COMPEL_TYPE_LONG,"
" .addend = %-8ld, .value = 0x%-16lx, "
"}, /* R_PPC64_ADDR64 */\n",
(unsigned int)place, (long)addend64, (long)value64);
break;
case R_PPC64_TOC16_HA:
pr_debug("\t\t\tR_PPC64_TOC16_HA at 0x%-4lx val 0x%lx\n", place,
value64 + addend64 - toc_offset + 0x8000);
if (do_relative_toc((value64 + addend64 - toc_offset + 0x8000) >> 16, where, 0xffff, 1))
goto err;
break;
case R_PPC64_TOC16_LO:
pr_debug("\t\t\tR_PPC64_TOC16_LO at 0x%-4lx val 0x%lx\n", place,
value64 + addend64 - toc_offset);
if (do_relative_toc(value64 + addend64 - toc_offset, where, 0xffff, 1))
goto err;
break;
case R_PPC64_TOC16_LO_DS:
pr_debug("\t\t\tR_PPC64_TOC16_LO_DS at 0x%-4lx val 0x%lx\n", place,
value64 + addend64 - toc_offset);
if (do_relative_toc(value64 + addend64 - toc_offset, where, 0xfffc, 0))
goto err;
break;
case R_PPC64_REL16_HA:
value64 += addend64 - place;
pr_debug("\t\t\tR_PPC64_REL16_HA at 0x%-4lx val 0x%lx\n", place, value64);
/* check that we are dealing with the addis 2,12 instruction */
if (((*(uint32_t *)where) & 0xffff0000) != 0x3c4c0000) {
pr_err("Unexpected instruction for R_PPC64_REL16_HA\n");
goto err;
}
*(uint16_t *)where = ((value64 + 0x8000) >> 16) & 0xffff;
break;
case R_PPC64_REL16_LO:
value64 += addend64 - place;
pr_debug("\t\t\tR_PPC64_REL16_LO at 0x%-4lx val 0x%lx\n", place, value64);
/* check that we are dealing with the addi 2,2 instruction */
if (((*(uint32_t *)where) & 0xffff0000) != 0x38420000) {
pr_err("Unexpected instruction for R_PPC64_REL16_LO\n");
goto err;
}
*(uint16_t *)where = value64 & 0xffff;
break;
#endif /* ELF_PPC64 */
#ifdef ELF_X86_64
case R_X86_64_32: /* Symbol + Addend (4 bytes) */
case R_X86_64_32S: /* Symbol + Addend (4 bytes) */
pr_debug("\t\t\t\tR_X86_64_32 at 0x%-4lx val 0x%x\n", place, value32);
pr_out(" { .offset = 0x%-8x, .type = COMPEL_TYPE_INT, "
".addend = %-8d, .value = 0x%-16x, }, /* R_X86_64_32 */\n",
(unsigned int)place, addend32, value32);
break;
case R_X86_64_64: /* Symbol + Addend (8 bytes) */
pr_debug("\t\t\t\tR_X86_64_64 at 0x%-4lx val 0x%lx\n", place, (long)value64);
pr_out(" { .offset = 0x%-8x, .type = COMPEL_TYPE_LONG, "
".addend = %-8ld, .value = 0x%-16lx, }, /* R_X86_64_64 */\n",
(unsigned int)place, (long)addend64, (long)value64);
break;
case R_X86_64_PC32: /* Symbol + Addend - Place (4 bytes) */
pr_debug("\t\t\t\tR_X86_64_PC32 at 0x%-4lx val 0x%x\n", place,
value32 + addend32 - (int32_t)place);
/*
* R_X86_64_PC32 are relative, patch them inplace.
*/
*((int32_t *)where) = value32 + addend32 - place;
break;
case R_X86_64_PLT32: /* ProcLinkage + Addend - Place (4 bytes) */
pr_debug("\t\t\t\tR_X86_64_PLT32 at 0x%-4lx val 0x%x\n", place,
value32 + addend32 - (int32_t)place);
/*
* R_X86_64_PLT32 are relative, patch them inplace.
*/
*((int32_t *)where) = value32 + addend32 - place;
break;
case R_X86_64_GOTPCRELX:
case R_X86_64_REX_GOTPCRELX:
case R_X86_64_GOTPCREL: /* SymbolOffsetInGot + GOT + Addend - Place (4 bytes) */
pr_debug("\t\t\t\tR_X86_64_GOTPCREL at 0x%-4lx val 0x%x\n", place, value32);
pr_out(" { .offset = 0x%-8x, .type = COMPEL_TYPE_LONG | COMPEL_TYPE_GOTPCREL, "
".addend = %-8d, .value = 0x%-16x, }, /* R_X86_64_GOTPCREL */\n",
(unsigned int)place, addend32, value32);
nr_gotpcrel++;
break;
#endif
#ifdef ELF_X86_32
case R_386_32: /* Symbol + Addend */
pr_debug("\t\t\t\tR_386_32 at 0x%-4lx val 0x%x\n", place, value32 + addend32);
pr_out(" { .offset = 0x%-8x, .type = COMPEL_TYPE_INT, "
".addend = %-4d, .value = 0x%x, },\n",
(unsigned int)place, addend32, value32);
break;
case R_386_PC32: /* Symbol + Addend - Place */
pr_debug("\t\t\t\tR_386_PC32 at 0x%-4lx val 0x%x\n", place,
value32 + addend32 - (int32_t)place);
/*
* R_386_PC32 are relative, patch them inplace.
*/
*((int32_t *)where) = value32 + addend32 - place;
break;
#endif
#ifdef ELF_S390
/*
* See also arch/s390/kernel/module.c/apply_rela():
* A PLT reads the GOT (global offset table). We can handle it like
* R_390_PC32DBL because we have linked statically.
*/
case R_390_PLT32DBL: /* PC relative on a PLT (predure link table) */
pr_debug("\t\t\t\tR_390_PLT32DBL at 0x%-4lx val 0x%x\n", place, value32 + addend32);
*((int32_t *)where) = (value64 + addend64 - place) >> 1;
break;
case R_390_PC32DBL: /* PC relative on a symbol */
pr_debug("\t\t\t\tR_390_PC32DBL at 0x%-4lx val 0x%x\n", place, value32 + addend32);
*((int32_t *)where) = (value64 + addend64 - place) >> 1;
break;
case R_390_64: /* 64 bit absolute address */
pr_debug("\t\t\t\tR_390_64 at 0x%-4lx val 0x%lx\n", place, (long)value64);
pr_out(" { .offset = 0x%-8x, .type = COMPEL_TYPE_LONG, "
".addend = %-8ld, .value = 0x%-16lx, }, /* R_390_64 */\n",
(unsigned int)place, (long)addend64, (long)value64);
break;
case R_390_PC64: /* 64 bit relative address */
*((int64_t *)where) = value64 + addend64 - place;
pr_debug("\t\t\t\tR_390_PC64 at 0x%-4lx val 0x%lx\n", place, (long)value64);
break;
#endif
default:
pr_err("Unsupported relocation of type %lu\n",
(unsigned long)ELF_R_TYPE(r->rel.r_info));
goto err;
}
}
}
#endif /* !NO_RELOCS */
pr_out("};\n");
pr_out("static __maybe_unused const char %s_blob[] = {\n\t", opts.prefix);
for (i = 0, k = 0; i < hdr->e_shnum; i++) {
Elf_Shdr *sh = sec_hdrs[i];
unsigned char *shdata;
size_t j;
if (!(sh->sh_flags & SHF_ALLOC) || !sh->sh_size)
continue;
shdata = mem + sh->sh_offset;
pr_debug("Copying section '%s'\n"
"\tstart:0x%lx (gap:0x%lx) size:0x%lx\n",
&secstrings[sh->sh_name], (unsigned long)sh->sh_addr, (unsigned long)(sh->sh_addr - k),
(unsigned long)sh->sh_size);
/* write 0 in the gap between the 2 sections */
for (; k < sh->sh_addr; k++) {
if (k && (k % 8) == 0)
pr_out("\n\t");
pr_out("0x00,");
}
for (j = 0; j < sh->sh_size; j++, k++) {
if (k && (k % 8) == 0)
pr_out("\n\t");
pr_out("0x%02x,", shdata[j]);
}
if (!strcmp(&secstrings[sh->sh_name], ".data"))
data_off = sh->sh_addr;
}
pr_out("};\n");
pr_out("\n");
pr_out("static void __maybe_unused %s_setup_c_header_desc(struct parasite_blob_desc *pbd, bool native)\n",
opts.prefix);
pr_out("{\n"
" pbd->parasite_type = COMPEL_BLOB_CHEADER;\n");
pr_out("\tpbd->hdr.mem = %s_blob;\n", opts.prefix);
pr_out("\tpbd->hdr.bsize = sizeof(%s_blob);\n", opts.prefix);
pr_out("\tif (native)\n");
pr_out("\t\tpbd->hdr.parasite_ip_off = "
"%s_sym__export_parasite_head_start;\n",
opts.prefix);
pr_out("#ifdef CONFIG_COMPAT\n");
pr_out("\telse\n");
pr_out("\t\tpbd->hdr.parasite_ip_off = "
"%s_sym__export_parasite_head_start_compat;\n",
opts.prefix);
pr_out("#endif /* CONFIG_COMPAT */\n");
pr_out("\tpbd->hdr.cmd_off = %s_sym__export_parasite_service_cmd;\n", opts.prefix);
pr_out("\tpbd->hdr.args_ptr_off = %s_sym__export_parasite_service_args_ptr;\n", opts.prefix);
pr_out("\tpbd->hdr.got_off = round_up(pbd->hdr.bsize, sizeof(long));\n");
pr_out("\tpbd->hdr.args_off = pbd->hdr.got_off + %zd*sizeof(long);\n", nr_gotpcrel);
pr_out("\tpbd->hdr.data_off = %#lx;\n", data_off);
pr_out("\tpbd->hdr.relocs = %s_relocs;\n", opts.prefix);
pr_out("\tpbd->hdr.nr_relocs = "
"sizeof(%s_relocs) / sizeof(%s_relocs[0]);\n",
opts.prefix, opts.prefix);
pr_out("}\n");
pr_out("\n");
pr_out("static void __maybe_unused %s_setup_c_header(struct parasite_ctl *ctl)\n", opts.prefix);
pr_out("{\n");
pr_out("\t%s_setup_c_header_desc(compel_parasite_blob_desc(ctl), compel_mode_native(ctl));\n", opts.prefix);
pr_out("}\n");
ret = 0;
err:
free(sec_hdrs);
return ret;
}
| 23,719 | 31.944444 | 117 |
c
|
criu
|
criu-master/compel/test/stack/spy.c
|
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <common/page.h>
#include <compel/log.h>
#include <compel/infect-rpc.h>
#include <errno.h>
#include "parasite.h"
#define PARASITE_CMD_INC PARASITE_USER_CMDS
#define PARASITE_CMD_DEC PARASITE_USER_CMDS + 1
#define err_and_ret(msg) \
do { \
fprintf(stderr, msg); \
return -1; \
} while (0)
void *saved_data = NULL;
#define SAVED_DATA_MAX page_size()
void cleanup_saved_data(void)
{
free(saved_data);
}
static void print_vmsg(unsigned int lvl, const char *fmt, va_list parms)
{
printf("\tLC%u: ", lvl);
vprintf(fmt, parms);
}
static void *get_parasite_rstack_start(struct parasite_ctl *ctl)
{
void *rstack, *r_thread_stack, *rstack_start;
compel_get_stack(ctl, &rstack, &r_thread_stack);
rstack_start = rstack;
if (r_thread_stack != NULL && r_thread_stack < rstack_start)
rstack_start = r_thread_stack;
return rstack_start;
}
static int page_writable(struct parasite_ctl *ctl, int pid, void *page)
{
FILE *maps;
size_t maps_line_len = 0;
char *maps_line = NULL;
char victim_maps_path[6 + 11 + 5 + 1];
int written;
int ret = 0;
if (((uintptr_t)page & (page_size() - 1)) != 0) {
fprintf(stderr, "Page address not aligned\n");
ret = -1;
goto done;
}
written = snprintf(victim_maps_path, sizeof(victim_maps_path), "/proc/%d/maps", pid);
if (written < 0 || written >= sizeof(victim_maps_path)) {
fprintf(stderr, "Failed to create path string to victim's /proc/%d/maps file\n", pid);
ret = -1;
goto done;
}
maps = fopen(victim_maps_path, "r");
if (maps == NULL) {
perror("Can't open victim's /proc/$pid/maps");
ret = -1;
goto done;
}
while (getline(&maps_line, &maps_line_len, maps) != -1) {
unsigned long vmstart, vmend;
char r, w;
if (sscanf(maps_line, "%lx-%lx %c%c", &vmstart, &vmend, &r, &w) < 4) {
fprintf(stderr, "Can't parse victim's /proc/%d/maps; line: %s\n", pid, maps_line);
ret = -1;
goto free_linebuf;
}
if (page >= (void *)vmstart && page < (void *)vmend) {
if (w == 'w') {
if (r != 'r') {
fprintf(stderr, "Expecting writable memory to also be readable");
ret = -1;
goto free_linebuf;
}
ret = 1;
}
break;
}
}
if (errno) {
perror("Can't read victim's /proc/$pid/maps");
ret = -1;
}
free_linebuf:
free(maps_line);
fclose(maps);
done:
return ret;
}
static void *read_proc_mem(int pid, void *offset, size_t len)
{
char victim_mem_path[6 + 11 + 4 + 1];
int written;
int fd;
void *data;
ssize_t mem_read;
written = snprintf(victim_mem_path, sizeof(victim_mem_path), "/proc/%d/mem", pid);
if (written < 0 || written >= sizeof(victim_mem_path)) {
fprintf(stderr, "Failed to create path string to victim's /proc/%d/mem file\n", pid);
return NULL;
}
fd = open(victim_mem_path, O_RDONLY);
if (fd < 0) {
perror("Failed to open victim's /proc/$pid/mem file");
return NULL;
}
data = malloc(len);
if (data == NULL) {
perror("Can't allocate memory to read victim's /proc/$pid/mem file");
return NULL;
}
mem_read = pread(fd, data, len, (off_t)offset);
if (mem_read == -1) {
perror("Failed to read victim's /proc/$pid/mem file");
goto freebuf;
}
return data;
freebuf:
free(data);
return NULL;
}
static int save_data_near_stack(struct parasite_ctl *ctl, int pid, void *stack, void **saved_data,
size_t *saved_data_size)
{
size_t page_mask = page_size() - 1;
size_t saved_size = 0;
size_t stack_size_last_page = (uintptr_t)stack & page_mask;
void *next_page = stack;
if (stack_size_last_page != 0) {
size_t empty_space_last_page = page_size() - stack_size_last_page;
saved_size = min(empty_space_last_page, (size_t)SAVED_DATA_MAX);
next_page += page_size() - stack_size_last_page;
}
while (saved_size < SAVED_DATA_MAX && next_page != NULL) {
switch (page_writable(ctl, pid, next_page)) {
case 1:
saved_size = min((size_t)(saved_size + page_size()), (size_t)SAVED_DATA_MAX);
next_page += page_size();
break;
case 0:
next_page = NULL;
break;
default:
return -1;
}
}
if (saved_size > 0) {
void *sd;
sd = read_proc_mem(pid, stack, saved_size);
if (sd == NULL)
return -1;
*saved_data = sd;
} else {
*saved_data = NULL;
}
*saved_data_size = saved_size;
return 0;
}
static int check_saved_data(struct parasite_ctl *ctl, int pid, void *stack, void *saved_data, size_t saved_data_size)
{
if (saved_data != NULL) {
void *current_data;
current_data = read_proc_mem(pid, stack, saved_data_size);
if (current_data == NULL)
return -1;
if (memcmp(saved_data, current_data, saved_data_size) != 0)
return 1;
}
return 0;
}
static int do_infection(int pid)
{
int state;
struct parasite_ctl *ctl;
struct infect_ctx *ictx;
int *arg;
void *stack;
size_t saved_data_size;
int saved_data_check;
compel_log_init(print_vmsg, COMPEL_LOG_DEBUG);
printf("Stopping task\n");
state = compel_stop_task(pid);
if (state < 0)
err_and_ret("Can't stop task\n");
printf("Preparing parasite ctl\n");
ctl = compel_prepare(pid);
if (!ctl)
err_and_ret("Can't prepare for infection\n");
printf("Configuring contexts\n");
/*
* First -- the infection context. Most of the stuff
* is already filled by compel_prepare(), just set the
* log descriptor for parasite side, library cannot
* live w/o it.
*/
ictx = compel_infect_ctx(ctl);
ictx->log_fd = STDERR_FILENO;
parasite_setup_c_header(ctl);
printf("Infecting\n");
if (compel_infect_no_daemon(ctl, 1, sizeof(int)))
err_and_ret("Can't infect victim\n");
if (atexit(cleanup_saved_data))
err_and_ret("Can't register cleanup function with atexit\n");
stack = get_parasite_rstack_start(ctl);
if (save_data_near_stack(ctl, pid, stack, &saved_data, &saved_data_size))
err_and_ret("Can't save data above stack\n");
if (compel_start_daemon(ctl))
err_and_ret("Can't start daemon in victim\n");
/*
* Now get the area with arguments and run two
* commands one by one.
*/
arg = compel_parasite_args(ctl, int);
printf("Running cmd 1\n");
*arg = 137;
if (compel_rpc_call_sync(PARASITE_CMD_INC, ctl))
err_and_ret("Can't run parasite command 1\n");
printf("Running cmd 2\n");
*arg = 404;
if (compel_rpc_call_sync(PARASITE_CMD_DEC, ctl))
err_and_ret("Can't run parasite command 2\n");
saved_data_check = check_saved_data(ctl, pid, stack, saved_data, saved_data_size);
if (saved_data_check == -1)
err_and_ret("Could not check saved data\n");
if (saved_data_check != 0)
err_and_ret("Saved data unexpectedly modified\n");
/*
* Done. Cure and resume the task.
*/
printf("Curing\n");
if (compel_cure(ctl))
err_and_ret("Can't cure victim\n");
if (compel_resume_task(pid, state, state))
err_and_ret("Can't unseize task\n");
printf("Done\n");
return 0;
}
static inline int chk(int fd, int val)
{
int v = 0;
if (read(fd, &v, sizeof(v)) != sizeof(v))
return 1;
printf("%d, want %d\n", v, val);
return v != val;
}
int main(int argc, char **argv)
{
int p_in[2], p_out[2], p_err[2], pid, i, err = 0;
/*
* Prepare IO-s and fork the victim binary
*/
if (pipe(p_in) || pipe(p_out) || pipe(p_err)) {
perror("Can't make pipe");
return -1;
}
pid = vfork();
if (pid == 0) {
close(p_in[1]);
dup2(p_in[0], 0);
close(p_in[0]);
close(p_out[0]);
dup2(p_out[1], 1);
close(p_out[1]);
close(p_err[0]);
dup2(p_err[1], 2);
close(p_err[1]);
execl("./victim", "victim", NULL);
exit(1);
}
close(p_in[0]);
close(p_out[1]);
close(p_err[1]);
/*
* Tell the little guy some numbers
*/
i = 1;
if (write(p_in[1], &i, sizeof(i)) != sizeof(i))
return 1;
i = 42;
if (write(p_in[1], &i, sizeof(i)) != sizeof(i))
return 1;
printf("Checking the victim alive\n");
err = chk(p_out[0], 1);
if (err)
return 1;
err = chk(p_out[0], 42);
if (err)
return 1;
/*
* Now do the infection with parasite.c
*/
printf("Infecting the victim\n");
if (do_infection(pid))
return 1;
/*
* Tell the victim some more stuff to check it's alive
*/
i = 1234;
if (write(p_in[1], &i, sizeof(i)) != sizeof(i))
return 1;
i = 4096;
if (write(p_in[1], &i, sizeof(i)) != sizeof(i))
return 1;
/*
* Stop the victim and check the infection went well
*/
printf("Closing victim stdin\n");
close(p_in[1]);
printf("Waiting for victim to die\n");
wait(NULL);
printf("Checking the result\n");
/* These two came from parasite */
err = chk(p_out[0], 138);
err |= chk(p_out[0], 403);
/* These two came from post-infect */
err |= chk(p_out[0], 1234);
err |= chk(p_out[0], 4096);
if (!err)
printf("All OK\n");
else
printf("Something went WRONG\n");
return 0;
}
| 8,729 | 20.502463 | 117 |
c
|
criu
|
criu-master/criu/aio.c
|
#include <unistd.h>
#include <stdio.h>
#include <stdbool.h>
#include "vma.h"
#include "xmalloc.h"
#include "pstree.h"
#include "restorer.h"
#include "aio.h"
#include "rst_info.h"
#include "rst-malloc.h"
#include "parasite.h"
#include "parasite-syscall.h"
#include "images/mm.pb-c.h"
#include "compel/infect.h"
#define NR_IOEVENTS_IN_NPAGES(npages) ((PAGE_SIZE * (npages) - sizeof(struct aio_ring)) / sizeof(struct io_event))
int dump_aio_ring(MmEntry *mme, struct vma_area *vma)
{
int nr = mme->n_aios;
AioRingEntry *re;
mme->aios = xrealloc(mme->aios, (nr + 1) * sizeof(re));
if (!mme->aios)
return -1;
re = xmalloc(sizeof(*re));
if (!re)
return -1;
aio_ring_entry__init(re);
re->id = vma->e->start;
re->ring_len = vma->e->end - vma->e->start;
re->nr_req = aio_estimate_nr_reqs(re->ring_len);
if (!re->nr_req) {
xfree(re);
return -1;
}
mme->aios[nr] = re;
mme->n_aios = nr + 1;
pr_info("Dumping AIO ring @%" PRIx64 "-%" PRIx64 "\n", vma->e->start, vma->e->end);
return 0;
}
void free_aios(MmEntry *mme)
{
int i;
if (mme->aios) {
for (i = 0; i < mme->n_aios; i++)
xfree(mme->aios[i]);
xfree(mme->aios);
}
}
unsigned int aio_estimate_nr_reqs(unsigned int size)
{
unsigned int k_max_reqs = NR_IOEVENTS_IN_NPAGES(size / PAGE_SIZE);
if (size & ~PAGE_MASK) {
pr_err("Ring size is not aligned\n");
return 0;
}
/*
* Kernel does
*
* nr_reqs = max(nr_reqs, nr_cpus * 4)
* nr_reqs *= 2
* nr_reqs += 2
* ring = roundup(sizeof(head) + nr_reqs * sizeof(req))
* nr_reqs = (ring - sizeof(head)) / sizeof(req)
*
* And the k_max_reqs here is the resulting value.
*
* We need to get the initial nr_reqs that would grow
* up back to the k_max_reqs.
*/
return (k_max_reqs - 2) / 2;
}
unsigned long aio_rings_args_size(struct vm_area_list *vmas)
{
return sizeof(struct parasite_check_aios_args) + vmas->nr_aios * sizeof(struct parasite_aio);
}
int parasite_collect_aios(struct parasite_ctl *ctl, struct vm_area_list *vmas)
{
struct vma_area *vma;
struct parasite_check_aios_args *aa;
struct parasite_aio *pa;
if (!vmas->nr_aios)
return 0;
pr_info("Checking AIO rings\n");
/*
* Go to parasite and
* a) check that no requests are currently pengind
* b) get the maximum number of requests kernel handles
* to estimate what was the user request on ring
* creation.
*/
aa = compel_parasite_args_s(ctl, aio_rings_args_size(vmas));
pa = &aa->ring[0];
list_for_each_entry(vma, &vmas->h, list) {
if (!vma_area_is(vma, VMA_AREA_AIORING))
continue;
pr_debug(" `- Ring #%ld @%" PRIx64 "\n", (long)(pa - &aa->ring[0]), vma->e->start);
pa->ctx = vma->e->start;
pa->size = vma->e->end - vma->e->start;
pa++;
}
aa->nr_rings = vmas->nr_aios;
if (compel_rpc_call_sync(PARASITE_CMD_CHECK_AIOS, ctl))
return -1;
return 0;
}
int prepare_aios(struct pstree_item *t, struct task_restore_args *ta)
{
int i;
MmEntry *mm = rsti(t)->mm;
/*
* Put info about AIO rings, they will get remapped
*/
ta->rings = (struct rst_aio_ring *)rst_mem_align_cpos(RM_PRIVATE);
ta->rings_n = mm->n_aios;
for (i = 0; i < mm->n_aios; i++) {
struct rst_aio_ring *raio;
raio = rst_mem_alloc(sizeof(*raio), RM_PRIVATE);
if (!raio)
return -1;
raio->addr = mm->aios[i]->id;
raio->nr_req = mm->aios[i]->nr_req;
raio->len = mm->aios[i]->ring_len;
}
return 0;
}
| 3,357 | 21.386667 | 114 |
c
|
criu
|
criu-master/criu/apparmor.c
|
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <unistd.h>
#include <ftw.h>
#include "common/config.h"
#include "imgset.h"
#include "pstree.h"
#include "util.h"
#include "string.h"
#include "lsm.h"
#include "cr_options.h"
#include "kerndat.h"
#include "protobuf.h"
#include "images/inventory.pb-c.h"
#include "images/apparmor.pb-c.h"
/*
* Apparmor stacked profile checkpoint restore. Previously, we just saved the
* profile that was in use by the task, and we expected it to be present on the
* target host. Now with stacking, containers are able to load their own
* profiles, so we can't rely on this.
*
* The basic idea here is that there is some (collection) of (potentially
* nested) namespaces that a container uses. We don't collect everything on the
* host level, but we *do* collect everything inside the namespace; a container
* could have loaded a profile but not yet used it when we start to checkpoint.
*
* Thus, the old code that saves and restores AA profiles is still relevant, we
* just need to add the new code in this file to walk the namespace and dump
* any blobs in that AA namespace, and then restore these blobs on restore so
* that the profiles the old code tries to use are actually present.
*/
static AaNamespace **namespaces = NULL;
static int n_namespaces = 0;
static AaNamespace *new_namespace(char *name, AaNamespace *parent)
{
void *m;
AaNamespace *ret;
ret = xzalloc(sizeof(*ret));
if (!ret)
return NULL;
aa_namespace__init(ret);
ret->name = xstrdup(name);
if (!ret->name) {
xfree(ret);
return NULL;
}
if (parent) {
m = xrealloc(parent->namespaces, sizeof(*parent->namespaces) * (parent->n_namespaces + 1));
if (!m) {
xfree(ret->name);
xfree(ret);
return NULL;
}
parent->namespaces = m;
parent->namespaces[parent->n_namespaces++] = ret;
}
m = xrealloc(namespaces, sizeof(*namespaces) * (n_namespaces + 1));
if (!m) {
if (parent)
parent->n_namespaces--;
xfree(ret->name);
xfree(ret);
return NULL;
}
namespaces = m;
namespaces[n_namespaces++] = ret;
return ret;
}
static int collect_profile(char *path, int offset, char *dir, AaNamespace *ns)
{
AaPolicy *cur;
int fd, my_offset, ret;
struct stat sb;
ssize_t n;
void *m;
FILE *f;
my_offset = snprintf(path + offset, PATH_MAX - offset, "%s/", dir);
if (my_offset < 0 || my_offset >= PATH_MAX - offset) {
pr_err("snprintf failed\n");
return -1;
}
my_offset += offset;
pr_info("dumping profile %s\n", path);
cur = xmalloc(sizeof(*cur));
if (!cur)
return -1;
aa_policy__init(cur);
__strlcat(path + my_offset, "name", PATH_MAX - my_offset);
f = fopen(path, "r");
if (!f) {
xfree(cur);
pr_perror("failed to open %s", path);
return -1;
}
ret = fscanf(f, "%ms", &cur->name);
fclose(f);
if (ret != 1) {
xfree(cur);
pr_perror("couldn't scanf %s", path);
return -1;
}
__strlcpy(path + my_offset, "raw_data", PATH_MAX - my_offset);
fd = open(path, O_RDONLY);
if (fd < 0) {
pr_perror("failed to open aa policy %s", path);
goto err;
}
if (fstat(fd, &sb) < 0) {
pr_perror("failed to stat %s", path);
goto close;
}
cur->blob.len = sb.st_size;
cur->blob.data = xmalloc(sb.st_size);
if (!cur->blob.data)
goto close;
n = read(fd, cur->blob.data, sb.st_size);
if (n < 0) {
pr_perror("failed to read %s", path);
goto close;
}
if (n != sb.st_size) {
pr_err("didn't read all of %s\n", path);
goto close;
}
close(fd);
m = xrealloc(ns->policies, sizeof(*ns->policies) * (ns->n_policies + 1));
if (!m)
goto err;
ns->policies = m;
ns->policies[ns->n_policies++] = cur;
return 0;
close:
close(fd);
err:
xfree(cur->name);
xfree(cur);
return -1;
}
char *ns_path;
int sort_err;
static int no_dirdots(const struct dirent *de)
{
return !dir_dots(de);
}
static int by_time(const struct dirent **de1, const struct dirent **de2)
{
char path[PATH_MAX];
struct stat sb1, sb2;
snprintf(path, sizeof(path), "%s/%s", ns_path, (*de1)->d_name);
if (stat(path, &sb1) < 0) {
pr_perror("couldn't stat %s", path);
sort_err = errno;
return 0;
}
snprintf(path, sizeof(path), "%s/%s", ns_path, (*de2)->d_name);
if (stat(path, &sb2) < 0) {
pr_perror("couldn't state %s", path);
sort_err = errno;
return 0;
}
if (sb1.st_mtim.tv_sec == sb2.st_mtim.tv_sec) {
if (sb1.st_mtim.tv_nsec < sb2.st_mtim.tv_nsec)
return -1;
if (sb1.st_mtim.tv_nsec == sb2.st_mtim.tv_nsec)
return 0;
return 1;
} else {
if (sb1.st_mtim.tv_sec < sb2.st_mtim.tv_sec)
return -1;
if (sb1.st_mtim.tv_sec == sb2.st_mtim.tv_sec)
return 0;
return 1;
}
}
static int walk_namespace(char *path, size_t offset, AaNamespace *ns)
{
DIR *dir = NULL;
struct dirent *de, **namelist = NULL;
int ret = -1, n_names = 0, i;
size_t my_offset;
/* collect all the child namespaces */
strcat(path, "/namespaces/");
my_offset = offset + 12;
dir = opendir(path);
if (!dir)
goto out;
while ((de = readdir(dir))) {
AaNamespace *cur;
if (dir_dots(de))
continue;
path[my_offset] = '\0';
strcat(path, de->d_name);
cur = new_namespace(de->d_name, ns);
if (!cur)
goto out;
if (walk_namespace(path, my_offset + strlen(de->d_name), cur) < 0) {
aa_namespace__free_unpacked(cur, NULL);
ns->n_namespaces--;
goto out;
}
}
closedir(dir);
dir = NULL;
/* now collect the profiles for this namespace */
path[offset] = '\0';
strcat(path, "/profiles/");
my_offset = offset + 10;
sort_err = 0;
ns_path = path;
n_names = scandir(path, &namelist, no_dirdots, by_time);
if (n_names < 0 || sort_err != 0) {
pr_perror("scandir failed");
goto out;
}
for (i = 0; i < n_names; i++) {
de = namelist[i];
path[my_offset] = 0;
if (collect_profile(path, my_offset, de->d_name, ns) < 0)
goto out;
}
ret = 0;
out:
if (dir)
closedir(dir);
if (namelist) {
for (i = 0; i < n_names; i++)
xfree(namelist[i]);
xfree(namelist);
}
return ret;
}
int collect_aa_namespace(char *profile)
{
char path[PATH_MAX], *namespace, *end;
int ret, i;
AaNamespace *ns;
if (!profile)
return 0;
namespace = strchr(profile, ':');
if (!namespace)
return 0; /* no namespace to dump */
namespace ++;
if (!kdat.apparmor_ns_dumping_enabled) {
pr_warn("Apparmor namespace present but dumping not enabled\n");
return 0;
}
/* XXX: this is not strictly correct; if something is using namespace
* views, extra //s can indicate a namespace separation. However, I
* think only the apparmor developers use this feature :)
*/
end = strchr(namespace, ':');
if (!end) {
pr_err("couldn't find AA namespace end in: %s\n", namespace);
return -1;
}
*end = '\0';
for (i = 0; i < n_namespaces; i++) {
/* did we already dump this namespace? */
if (!strcmp(namespaces[i]->name, namespace)) {
*end = ':';
return 0;
}
}
pr_info("dumping AA namespace %s\n", namespace);
ns = new_namespace(namespace, NULL);
*end = ':';
if (!ns)
return -1;
ret = snprintf(path, sizeof(path), AA_SECURITYFS_PATH "/policy/namespaces/%s", ns->name);
if (ret < 0 || ret >= sizeof(path)) {
pr_err("snprintf failed?\n");
goto err;
}
if (walk_namespace(path, ret, ns) < 0) {
pr_err("walking AA namespace %s failed\n", ns->name);
goto err;
}
return 0;
err:
aa_namespace__free_unpacked(ns, NULL);
n_namespaces--;
return -1;
}
/* An AA profile that allows everything that the parasite needs to do */
#define PARASITE_PROFILE \
("profile %s {\n" \
" /** rwmlkix,\n" \
" unix,\n" \
" capability,\n" \
" signal,\n" \
"}\n")
char policydir[PATH_MAX] = ".criu.temp-aa-policy.XXXXXX";
char cachedir[PATH_MAX];
struct apparmor_parser_args {
char *cache;
char *file;
};
static int apparmor_parser_exec(void *data)
{
struct apparmor_parser_args *args = data;
execlp("apparmor_parser", "apparmor_parser", "-QWL", args->cache, args->file, NULL);
return -1;
}
static int apparmor_cache_exec(void *data)
{
execlp("apparmor_parser", "apparmor_parser", "--cache-loc", "/", "--print-cache-dir", (char *)NULL);
return -1;
}
static void *get_suspend_policy(char *name, off_t *len)
{
char policy[1024], file[PATH_MAX], cache[PATH_MAX], clean_name[PATH_MAX];
void *ret = NULL;
int n, fd, policy_len, i;
struct stat sb;
struct apparmor_parser_args args = {
.cache = cache,
.file = file,
};
*len = 0;
policy_len = snprintf(policy, sizeof(policy), PARASITE_PROFILE, name);
if (policy_len < 0 || policy_len >= sizeof(policy)) {
pr_err("policy name %s too long\n", name);
return NULL;
}
/* policy names can have /s, but file paths can't */
for (i = 0; name[i]; i++) {
if (i == PATH_MAX) {
pr_err("name %s too long\n", name);
return NULL;
}
clean_name[i] = name[i] == '/' ? '.' : name[i];
}
clean_name[i] = 0;
n = snprintf(file, sizeof(file), "%s/%s", policydir, clean_name);
if (n < 0 || n >= sizeof(policy)) {
pr_err("policy name %s too long\n", clean_name);
return NULL;
}
n = snprintf(cache, sizeof(cache), "%s/cache", policydir);
if (n < 0 || n >= sizeof(policy)) {
pr_err("policy dir too long\n");
return NULL;
}
fd = open(file, O_CREAT | O_WRONLY, 0600);
if (fd < 0) {
pr_perror("couldn't create %s", file);
return NULL;
}
n = write(fd, policy, policy_len);
close(fd);
if (n < 0 || n != policy_len) {
pr_perror("couldn't write policy for %s", file);
return NULL;
}
n = run_command(cachedir, sizeof(cachedir), apparmor_cache_exec, NULL);
if (n < 0) {
pr_err("apparmor parsing failed %d\n", n);
return NULL;
}
n = run_command(NULL, 0, apparmor_parser_exec, &args);
if (n < 0) {
pr_err("apparmor parsing failed %d\n", n);
return NULL;
}
n = snprintf(file, sizeof(file), "%s/cache/%s/%s", policydir, cachedir, clean_name);
if (n < 0 || n >= sizeof(policy)) {
pr_err("policy name %s too long\n", clean_name);
return NULL;
}
fd = open(file, O_RDONLY);
if (fd < 0) {
pr_perror("couldn't open %s", file);
return NULL;
}
if (fstat(fd, &sb) < 0) {
pr_perror("couldn't stat fd");
goto out;
}
ret = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (ret == MAP_FAILED) {
pr_perror("mmap of %s failed", file);
goto out;
}
*len = sb.st_size;
out:
close(fd);
return ret;
}
#define NEXT_AA_TOKEN(pos) \
while (*pos) { \
if (*pos == '/' && *(pos + 1) && *(pos + 1) == '/' && *(pos + 2) && *(pos + 2) == '&') { \
pos += 3; \
break; \
} \
if (*pos == ':' && *(pos + 1) && *(pos + 1) == '/' && *(pos + 2) && *(pos + 2) == '/') { \
pos += 3; \
break; \
} \
pos++; \
}
static int write_aa_policy(AaNamespace *ns, char *path, int offset, char *rewrite, bool suspend)
{
int i, my_offset, ret;
char *rewrite_pos = rewrite, namespace[PATH_MAX];
if (rewrite && suspend) {
pr_err("requesting aa rewriting and suspension at the same time is not supported\n");
return -1;
}
if (!rewrite) {
strncpy(namespace, ns->name, sizeof(namespace) - 1);
} else {
NEXT_AA_TOKEN(rewrite_pos);
switch (*rewrite_pos) {
case ':': {
char tmp, *end;
end = strchr(rewrite_pos + 1, ':');
if (!end) {
pr_err("invalid namespace %s\n", rewrite_pos);
return -1;
}
tmp = *end;
*end = 0;
__strlcpy(namespace, rewrite_pos + 1, sizeof(namespace));
*end = tmp;
break;
}
default:
__strlcpy(namespace, ns->name, sizeof(namespace));
for (i = 0; i < ns->n_policies; i++) {
if (strcmp(ns->policies[i]->name, rewrite_pos))
pr_warn("binary rewriting of apparmor policies not supported right now, not renaming %s to %s\n",
ns->policies[i]->name, rewrite_pos);
}
}
}
my_offset = snprintf(path + offset, PATH_MAX - offset, "/namespaces/%s", ns->name);
if (my_offset < 0 || my_offset >= PATH_MAX - offset) {
pr_err("snprintf'd too many characters\n");
return -1;
}
if (!suspend && mkdir(path, 0755) < 0 && errno != EEXIST) {
pr_perror("failed to create namespace %s", path);
goto fail;
}
for (i = 0; i < ns->n_namespaces; i++) {
if (write_aa_policy(ns, path, offset + my_offset, rewrite_pos, suspend) < 0)
goto fail;
}
ret = snprintf(path + offset + my_offset, sizeof(path) - offset - my_offset, "/.replace");
if (ret < 0 || ret >= sizeof(path) - offset - my_offset) {
pr_err("snprintf failed\n");
goto fail;
}
for (i = 0; i < ns->n_policies; i++) {
AaPolicy *p = ns->policies[i];
void *data = p->blob.data;
int fd, n;
off_t len = p->blob.len;
fd = open(path, O_WRONLY);
if (fd < 0) {
pr_perror("couldn't open apparmor load file %s", path);
goto fail;
}
if (suspend) {
pr_info("suspending policy %s\n", p->name);
data = get_suspend_policy(p->name, &len);
if (!data) {
close(fd);
goto fail;
}
}
n = write(fd, data, len);
close(fd);
if (suspend && munmap(data, len) < 0) {
pr_perror("failed to munmap");
goto fail;
}
if (n != len) {
pr_perror("write AA policy %s in %s failed", p->name, namespace);
goto fail;
}
if (!suspend)
pr_info("wrote aa policy %s: %s %d\n", path, p->name, n);
}
return 0;
fail:
if (!suspend) {
path[offset + my_offset] = 0;
rmdir(path);
}
pr_err("failed to write policy in AA namespace %s\n", namespace);
return -1;
}
static int do_suspend(bool suspend)
{
int i;
for (i = 0; i < n_namespaces; i++) {
AaNamespace *ns = namespaces[i];
char path[PATH_MAX] = AA_SECURITYFS_PATH "/policy";
if (write_aa_policy(ns, path, strlen(path), opts.lsm_profile, suspend) < 0)
return -1;
}
return 0;
}
int suspend_aa(void)
{
int ret;
if (!mkdtemp(policydir)) {
pr_perror("failed to make AA policy dir");
return -1;
}
ret = do_suspend(true);
if (rmrf(policydir) < 0)
pr_err("failed removing policy dir %s\n", policydir);
return ret;
}
int unsuspend_aa(void)
{
return do_suspend(false);
}
int dump_aa_namespaces(void)
{
ApparmorEntry *ae = NULL;
int ret;
if (n_namespaces == 0)
return 0;
ae = xmalloc(sizeof(*ae));
if (!ae)
return -1;
apparmor_entry__init(ae);
ae->n_namespaces = n_namespaces;
ae->namespaces = namespaces;
ret = pb_write_one(img_from_set(glob_imgset, CR_FD_APPARMOR), ae, PB_APPARMOR);
apparmor_entry__free_unpacked(ae, NULL);
n_namespaces = -1;
namespaces = NULL;
return ret;
}
bool check_aa_ns_dumping(void)
{
char contents[49];
int major, minor, ret;
FILE *f;
f = fopen(AA_SECURITYFS_PATH "/features/domain/stack", "r");
if (!f)
return false;
ret = fscanf(f, "%48s", contents);
fclose(f);
if (ret != 1) {
pr_err("scanning aa stack feature failed\n");
return false;
}
if (strcmp("yes", contents)) {
pr_warn("aa stack featured disabled: %s\n", contents);
return false;
}
f = fopen(AA_SECURITYFS_PATH "/features/domain/version", "r");
if (!f)
return false;
ret = fscanf(f, "%d.%d", &major, &minor);
fclose(f);
if (ret != 2) {
pr_err("scanning aa stack version failed\n");
return false;
}
return major >= 1 && minor >= 2;
}
int prepare_apparmor_namespaces(void)
{
struct cr_img *img;
int ret, i;
ApparmorEntry *ae;
img = open_image(CR_FD_APPARMOR, O_RSTR);
if (!img)
return -1;
ret = pb_read_one_eof(img, &ae, PB_APPARMOR);
close_image(img);
if (ret <= 0)
return 0; /* there was no AA namespace entry */
if (!ae) {
pr_err("missing aa namespace entry\n");
return -1;
}
/* no real reason we couldn't do this in parallel, but in usually we
* expect one namespace so there's probably not a lot to be gained.
*/
for (i = 0; i < ae->n_namespaces; i++) {
char path[PATH_MAX] = AA_SECURITYFS_PATH "/policy";
if (write_aa_policy(ae->namespaces[i], path, strlen(path), opts.lsm_profile, false) < 0) {
ret = -1;
goto out;
}
}
ret = 0;
out:
apparmor_entry__free_unpacked(ae, NULL);
return ret;
}
int render_aa_profile(char **out, const char *cur)
{
const char *pos;
int n_namespaces = 0, n_profiles = 0;
bool last_namespace = false;
/* no rewriting necessary */
if (!opts.lsm_supplied) {
*out = xsprintf("changeprofile %s", cur);
if (!*out)
return -1;
return 0;
}
/* user asked to re-write to an unconfined profile */
if (!opts.lsm_profile) {
*out = NULL;
return 0;
}
pos = opts.lsm_profile;
while (*pos) {
switch (*pos) {
case ':':
n_namespaces++;
break;
default:
n_profiles++;
}
NEXT_AA_TOKEN(pos);
}
/* special case: there is no namespacing or stacking; we can just
* changeprofile to the rewritten string
*/
if (n_profiles == 1 && n_namespaces == 0) {
*out = xsprintf("changeprofile %s", opts.lsm_profile);
if (!*out)
return -1;
pr_info("rewrote apparmor profile from %s to %s\n", cur, *out);
return 0;
}
pos = cur;
while (*pos) {
switch (*pos) {
case ':':
n_namespaces--;
last_namespace = true;
break;
default:
n_profiles--;
}
NEXT_AA_TOKEN(pos);
if (n_profiles == 0 && n_namespaces == 0)
break;
}
*out = xsprintf("changeprofile %s//%s%s", opts.lsm_profile, last_namespace ? "" : "&", pos);
if (!*out)
return -1;
pr_info("rewrote apparmor profile from %s to %s\n", cur, *out);
return 0;
}
| 17,767 | 20.854859 | 106 |
c
|
criu
|
criu-master/criu/bitmap.c
|
#include "common/bitsperlong.h"
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BITMAP_FIRST_WORD_MASK(start) (~0ul << ((start) % BITS_PER_LONG))
#define BITMAP_LAST_WORD_MASK(nbits) (((nbits) % BITS_PER_LONG) ? (1ul << ((nbits) % BITS_PER_LONG)) - 1 : ~0ul)
#define small_const_nbits(nbits) (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
void bitmap_set(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_set >= 0) {
*p |= mask_to_set;
nr -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
p++;
}
if (nr) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
*p |= mask_to_set;
}
}
void bitmap_clear(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_clear >= 0) {
*p &= ~mask_to_clear;
nr -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
if (nr) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
*p &= ~mask_to_clear;
}
}
| 1,326 | 25.54 | 112 |
c
|
criu
|
criu-master/criu/bpfmap.c
|
#include <stdio.h>
#include <bpf/bpf.h>
#include "common/compiler.h"
#include "imgset.h"
#include "bpfmap.h"
#include "fdinfo.h"
#include "image.h"
#include "util.h"
#include "log.h"
#include "protobuf.h"
#ifndef LIBBPF_OPTS
#define LIBBPF_OPTS DECLARE_LIBBPF_OPTS
#define LEGACY_LIBBPF /* Using libbpf < 0.7 */
#endif
int is_bpfmap_link(char *link)
{
return is_anon_link_type(link, "bpf-map");
}
static void pr_info_bpfmap(char *action, BpfmapFileEntry *bpf)
{
pr_info("%sbpfmap: id %#08x map_id %#08x map_type %d flags %" PRIx32 "\n", action, bpf->id, bpf->map_id,
bpf->map_type, bpf->map_flags);
}
struct bpfmap_data_rst *bpfmap_data_hash_table[BPFMAP_DATA_TABLE_SIZE];
static int bpfmap_data_read(struct cr_img *img, struct bpfmap_data_rst *r)
{
unsigned long bytes = r->bde->keys_bytes + r->bde->values_bytes;
if (!bytes)
return 0;
r->data = mmap(NULL, bytes, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
if (r->data == MAP_FAILED) {
pr_perror("Can't map mem for bpfmap buffers");
return -1;
}
return read_img_buf(img, r->data, bytes);
}
int do_collect_bpfmap_data(struct bpfmap_data_rst *r, ProtobufCMessage *msg, struct cr_img *img,
struct bpfmap_data_rst **bpf_hash_table)
{
int ret;
int table_index;
r->bde = pb_msg(msg, BpfmapDataEntry);
ret = bpfmap_data_read(img, r);
if (ret < 0)
return ret;
table_index = r->bde->map_id & BPFMAP_DATA_HASH_MASK;
r->next = bpf_hash_table[table_index];
bpf_hash_table[table_index] = r;
pr_info("Collected bpfmap data for %#x\n", r->bde->map_id);
return 0;
}
int restore_bpfmap_data(int map_fd, uint32_t map_id, struct bpfmap_data_rst **bpf_hash_table)
{
struct bpfmap_data_rst *map_data;
BpfmapDataEntry *bde;
void *keys = NULL;
void *values = NULL;
unsigned int count;
LIBBPF_OPTS(bpf_map_batch_opts, opts);
for (map_data = bpf_hash_table[map_id & BPFMAP_DATA_HASH_MASK]; map_data != NULL; map_data = map_data->next) {
if (map_data->bde->map_id == map_id)
break;
}
if (!map_data || map_data->bde->count == 0) {
pr_info("No data for BPF map %#x\n", map_id);
return 0;
}
bde = map_data->bde;
count = bde->count;
keys = mmap(NULL, bde->keys_bytes, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
if (keys == MAP_FAILED) {
pr_perror("Can't map memory for BPF map keys");
goto err;
}
memcpy(keys, map_data->data, bde->keys_bytes);
values = mmap(NULL, bde->values_bytes, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
if (values == MAP_FAILED) {
pr_perror("Can't map memory for BPF map values");
goto err;
}
memcpy(values, map_data->data + bde->keys_bytes, bde->values_bytes);
if (bpf_map_update_batch(map_fd, keys, values, &count, &opts)) {
pr_perror("Can't load key-value pairs to BPF map");
goto err;
}
munmap(keys, bde->keys_bytes);
munmap(values, bde->values_bytes);
return 0;
err:
munmap(keys, bde->keys_bytes);
munmap(values, bde->values_bytes);
return -1;
}
static int collect_bpfmap_data(void *obj, ProtobufCMessage *msg, struct cr_img *img)
{
return do_collect_bpfmap_data(obj, msg, img, bpfmap_data_hash_table);
}
struct collect_image_info bpfmap_data_cinfo = {
.fd_type = CR_FD_BPFMAP_DATA,
.pb_type = PB_BPFMAP_DATA,
.priv_size = sizeof(struct bpfmap_data_rst),
.collect = collect_bpfmap_data,
};
int dump_one_bpfmap_data(BpfmapFileEntry *bpf, int lfd, const struct fd_parms *p)
{
/*
* Linux kernel patch notes for bpf_map_*_batch():
*
* in_batch/out_batch are opaque values use to communicate between
* user/kernel space, in_batch/out_batch must be of key_size length.
* To start iterating from the beginning in_batch must be null,
* count is the # of key/value elements to retrieve. Note that the 'keys'
* buffer must be a buffer of key_size * count size and the 'values' buffer
* must be value_size * count, where value_size must be aligned to 8 bytes
* by userspace if it's dealing with percpu maps. 'count' will contain the
* number of keys/values successfully retrieved. Note that 'count' is an
* input/output variable and it can contain a lower value after a call.
*
* If there's no more entries to retrieve, ENOENT will be returned. If error
* is ENOENT, count might be > 0 in case it copied some values but there were
* no more entries to retrieve.
*
* Note that if the return code is an error and not -EFAULT,
* count indicates the number of elements successfully processed.
*/
struct cr_img *img;
uint32_t key_size, value_size, max_entries, count;
void *keys = NULL, *values = NULL;
void *in_batch = NULL, *out_batch = NULL;
BpfmapDataEntry bde = BPFMAP_DATA_ENTRY__INIT;
LIBBPF_OPTS(bpf_map_batch_opts, opts);
int ret;
key_size = bpf->key_size;
value_size = bpf->value_size;
max_entries = bpf->max_entries;
count = max_entries;
keys = mmap(NULL, key_size * max_entries, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
if (keys == MAP_FAILED) {
pr_perror("Can't map memory for BPF map keys");
goto err;
}
values = mmap(NULL, value_size * max_entries, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
if (values == MAP_FAILED) {
pr_perror("Can't map memory for BPF map values");
goto err;
}
out_batch = mmap(NULL, key_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
if (out_batch == MAP_FAILED) {
pr_perror("Can't map memory for BPF map out_batch");
goto err;
}
ret = bpf_map_lookup_batch(lfd, in_batch, out_batch, keys, values, &count, &opts);
if (ret && errno != ENOENT) {
pr_perror("Can't perform a batch lookup on BPF map");
goto err;
}
img = img_from_set(glob_imgset, CR_FD_BPFMAP_DATA);
bde.map_id = bpf->map_id;
bde.keys_bytes = (key_size * count);
bde.values_bytes = (value_size * count);
bde.count = count;
if (pb_write_one(img, &bde, PB_BPFMAP_DATA))
goto err;
if (write(img_raw_fd(img), keys, key_size * count) != (key_size * count)) {
pr_perror("Can't write BPF map's keys");
goto err;
}
if (write(img_raw_fd(img), values, value_size * count) != (value_size * count)) {
pr_perror("Can't write BPF map's values");
goto err;
}
munmap(keys, key_size * max_entries);
munmap(values, value_size * max_entries);
munmap(out_batch, key_size);
return 0;
err:
munmap(keys, key_size * max_entries);
munmap(values, value_size * max_entries);
munmap(out_batch, key_size);
return -1;
}
static int dump_one_bpfmap(int lfd, u32 id, const struct fd_parms *p)
{
BpfmapFileEntry bpf = BPFMAP_FILE_ENTRY__INIT;
FileEntry fe = FILE_ENTRY__INIT;
int ret;
/* If we are using a bigger struct than the kernel knows of,
* ensure all the unknown bits are 0 - i.e. new user-space
* does not rely on any unknown kernel feature extensions.
* https://github.com/torvalds/linux/blob/a1994480/kernel/bpf/syscall.c#L70
*/
struct bpf_map_info map_info = {};
uint32_t info_len = sizeof(struct bpf_map_info);
if (parse_fdinfo(lfd, FD_TYPES__BPFMAP, &bpf))
return -1;
ret = bpf_obj_get_info_by_fd(lfd, &map_info, &info_len);
if (ret) {
pr_perror("Could not get BPF map info");
return -1;
}
switch (bpf.map_type) {
case BPF_MAP_TYPE_HASH:
case BPF_MAP_TYPE_ARRAY:
bpf.id = id;
bpf.flags = p->flags;
bpf.fown = (FownEntry *)&p->fown;
bpf.map_name = xstrdup(map_info.name);
bpf.ifindex = map_info.ifindex;
fe.type = FD_TYPES__BPFMAP;
fe.id = bpf.id;
fe.bpf = &bpf;
pr_info_bpfmap("Dumping ", &bpf);
if (pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE))
return -1;
pr_info_bpfmap("Dumping data for ", &bpf);
ret = dump_one_bpfmap_data(&bpf, lfd, p);
break;
default:
pr_err("CRIU does not currently support dumping BPF map type %u!\n", bpf.map_type);
ret = -1;
}
return ret;
}
const struct fdtype_ops bpfmap_dump_ops = {
.type = FD_TYPES__BPFMAP,
.dump = dump_one_bpfmap,
};
static int bpfmap_open(struct file_desc *d, int *new_fd)
{
struct bpfmap_file_info *info;
BpfmapFileEntry *bpfe;
int bpfmap_fd;
#ifdef LEGACY_LIBBPF
struct bpf_create_map_attr xattr;
#else
LIBBPF_OPTS(bpf_map_create_opts, bpfmap_opts);
#endif
info = container_of(d, struct bpfmap_file_info, d);
bpfe = info->bpfe;
pr_info_bpfmap("Creating and opening ", bpfe);
#ifdef LEGACY_LIBBPF
xattr.name = xstrdup(bpfe->map_name);
xattr.map_type = bpfe->map_type;
xattr.map_flags = bpfe->map_flags;
xattr.key_size = bpfe->key_size;
xattr.value_size = bpfe->value_size;
xattr.max_entries = bpfe->max_entries;
xattr.numa_node = 0;
xattr.btf_fd = 0;
xattr.btf_key_type_id = 0;
xattr.btf_value_type_id = 0;
xattr.map_ifindex = bpfe->ifindex;
xattr.inner_map_fd = 0;
bpfmap_fd = bpf_create_map_xattr(&xattr);
#else
bpfmap_opts.map_flags = bpfe->map_flags;
bpfmap_opts.map_ifindex = bpfe->ifindex;
if (bpfe->has_map_extra)
bpfmap_opts.map_extra = bpfe->map_extra;
bpfmap_fd = bpf_map_create(bpfe->map_type, bpfe->map_name, bpfe->key_size, bpfe->value_size, bpfe->max_entries,
&bpfmap_opts);
#endif
if (bpfmap_fd < 0) {
pr_perror("Can't create bpfmap %#08x", bpfe->id);
return -1;
}
if (bpfe->has_map_extra && bpfe->map_extra)
pr_warn("bpfmap map_extra has non-zero value. This will not be restored.\n");
if (restore_bpfmap_data(bpfmap_fd, bpfe->map_id, bpfmap_data_hash_table))
return -1;
if (bpfe->frozen) {
if (bpf_map_freeze(bpfmap_fd)) {
pr_perror("Can't freeze bpfmap %#08x", bpfe->id);
goto err_close;
}
}
if (rst_file_params(bpfmap_fd, bpfe->fown, bpfe->flags)) {
pr_perror("Can't restore params on bpfmap %#08x", bpfe->id);
goto err_close;
}
*new_fd = bpfmap_fd;
return 0;
err_close:
close(bpfmap_fd);
return -1;
}
static struct file_desc_ops bpfmap_desc_ops = {
.type = FD_TYPES__BPFMAP,
.open = bpfmap_open,
};
static int collect_one_bpfmap(void *obj, ProtobufCMessage *msg, struct cr_img *i)
{
struct bpfmap_file_info *info = obj;
info->bpfe = pb_msg(msg, BpfmapFileEntry);
pr_info_bpfmap("Collected ", info->bpfe);
return file_desc_add(&info->d, info->bpfe->id, &bpfmap_desc_ops);
}
struct collect_image_info bpfmap_cinfo = {
.fd_type = CR_FD_BPFMAP_FILE,
.pb_type = PB_BPFMAP_FILE,
.priv_size = sizeof(struct bpfmap_file_info),
.collect = collect_one_bpfmap,
};
| 10,158 | 26.756831 | 112 |
c
|
criu
|
criu-master/criu/cgroup-props.c
|
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "int.h"
#include "common/config.h"
#include "common/compiler.h"
#include "cgroup-props.h"
#include "cr_options.h"
#include "xmalloc.h"
#include "string.h"
#include "util.h"
#include "common/list.h"
#include "log.h"
#include "common/bug.h"
#undef LOG_PREFIX
#define LOG_PREFIX "cg-prop: "
enum {
CGP_MERGE,
CGP_REPLACE,
};
static const char *____criu_global_props____[] = {
"cgroup.clone_children",
"notify_on_release",
"cgroup.procs",
"tasks",
};
/* cgroup2 global properties */
// clang-format off
static const char *____criu_global_props_v2____[] = {
"cgroup.subtree_control",
"cgroup.max.descendants",
"cgroup.max.depth",
"cgroup.freeze",
"cgroup.type",
};
// clang-format on
cgp_t cgp_global = {
.name = "____criu_global_props____",
.nr_props = ARRAY_SIZE(____criu_global_props____),
.props = ____criu_global_props____,
};
cgp_t cgp_global_v2 = {
.name = "____criu_global_props_v2____",
.nr_props = ARRAY_SIZE(____criu_global_props_v2____),
.props = ____criu_global_props_v2____,
};
typedef struct {
struct list_head list;
cgp_t cgp;
} cgp_list_entry_t;
static LIST_HEAD(cgp_list);
static void cgp_free(cgp_list_entry_t *p)
{
size_t i;
if (p) {
for (i = 0; i < p->cgp.nr_props; i++)
xfree((void *)p->cgp.props[i]);
xfree((void *)p->cgp.name);
xfree((void *)p->cgp.props);
xfree(p);
}
}
static int cgp_merge_props(cgp_list_entry_t *d, cgp_list_entry_t *s)
{
size_t nr_props, i, j;
nr_props = d->cgp.nr_props + s->cgp.nr_props;
if (xrealloc_safe(&d->cgp.props, nr_props * sizeof(char *)))
return -ENOMEM;
/*
* FIXME: Check for duplicates in propties?
*/
for (i = d->cgp.nr_props, j = 0; i < nr_props; i++, j++) {
d->cgp.props[i] = xstrdup(s->cgp.props[j]);
if (!d->cgp.props[i])
return -ENOMEM;
d->cgp.nr_props++;
}
return 0;
}
static int cgp_handle_props(cgp_list_entry_t **p, int strategy)
{
cgp_list_entry_t *s = *p;
cgp_list_entry_t *t;
list_for_each_entry(t, &cgp_list, list) {
if (strcmp(t->cgp.name, s->cgp.name))
continue;
pr_debug("%s \"%s\" controller properties\n", strategy == CGP_MERGE ? "Merging" : "Replacing",
s->cgp.name);
if (strategy == CGP_MERGE) {
int ret;
ret = cgp_merge_props(t, s);
cgp_free(s);
*p = NULL;
return ret;
} else if (strategy == CGP_REPLACE) {
/*
* Simply drop out previous instance.
*/
list_del(&t->list);
cgp_free(t);
break;
} else
BUG();
}
/*
* New controller, simply add it.
*/
list_add(&s->list, &cgp_list);
*p = NULL;
return 0;
}
static char *skip_spaces(char **stream, size_t *len)
{
if (stream && *len) {
char *p = *stream;
while (p && *len && *p == ' ')
p++, (*len)--;
if (p != *stream)
*stream = p;
return p;
}
return NULL;
}
static bool eat_symbol(char **stream, size_t *len, char sym, bool skip_ws)
{
char *p = skip_ws ? skip_spaces(stream, len) : (stream ? *stream : NULL);
if (!p || *p != sym || !*len)
return false;
(*stream) = p + 1;
(*len)--;
return true;
}
static bool eat_symbols(char **stream, size_t *len, char *syms, size_t n_syms, bool skip_ws)
{
char *p = skip_ws ? skip_spaces(stream, len) : (stream ? *stream : NULL);
size_t i;
if (p && *len) {
char *stream_orig = *stream;
size_t len_orig = *len;
for (i = 0; i < n_syms; i++) {
if (!eat_symbol(stream, len, syms[i], false)) {
*stream = stream_orig;
*len = len_orig;
goto nomatch;
}
}
return true;
}
nomatch:
return false;
}
static bool eat_word(char **stream, size_t *len, char *word, size_t word_len, bool skip_ws)
{
char *p = skip_ws ? skip_spaces(stream, len) : (stream ? *stream : NULL);
if (p && *len >= word_len) {
if (!strncmp(p, word, word_len)) {
(*stream) += word_len;
(*len) -= word_len;
return true;
}
}
return false;
}
static char *get_quoted(char **stream, size_t *len, bool skip_ws)
{
char *p = skip_ws ? skip_spaces(stream, len) : (stream ? *stream : NULL);
char *from = p + 1;
char *dst;
if (!p || *p != '\"')
return NULL;
for (p = from, (*len)--; (*len); p++, (*len)--) {
if (*p == '\"') {
if (p == from)
break;
dst = xmalloc(p - from + 1);
if (!dst)
break;
memcpy(dst, from, p - from);
dst[p - from] = '\0';
(*stream) = p + 1;
(*len)--;
return dst;
}
}
return NULL;
}
static int cgp_parse_stream(char *stream, size_t len)
{
cgp_list_entry_t *cgp_entry = NULL;
int strategy;
int ret = 0;
char *p;
/*
* We expect the following format here
* (very simplified YAML!)
*
* "cpu":
* - "strategy": "replace"
* - "properties": ["cpu.shares", "cpu.cfs_period_us"]
* "memory":
* - "strategy": "merge"
* - "properties": ["memory.limit_in_bytes", "memory.memsw.limit_in_bytes"]
*
* and etc.
*/
while (len) {
/*
* Controller name.
*/
p = get_quoted(&stream, &len, false);
if (!p) {
pr_err("Expecting controller name\n");
goto err_parse;
}
pr_info("Parsing controller \"%s\"\n", p);
cgp_entry = xzalloc(sizeof(*cgp_entry));
if (cgp_entry) {
INIT_LIST_HEAD(&cgp_entry->list);
cgp_entry->cgp.name = p;
} else {
pr_err("Can't allocate memory for controller %s\n", p);
xfree(p);
return -ENOMEM;
}
if (!eat_symbols(&stream, &len, ":\n - ", 5, true)) {
pr_err("Expected \':\\n - \' sequence controller's %s stream\n", cgp_entry->cgp.name);
goto err_parse;
}
if (!eat_word(&stream, &len, "\"strategy\":", 11, true)) {
pr_err("Expected \'strategy:\' keyword in controller's %s stream\n", cgp_entry->cgp.name);
goto err_parse;
}
p = get_quoted(&stream, &len, true);
if (!p) {
pr_err("Expected strategy in controller's %s stream\n", cgp_entry->cgp.name);
goto err_parse;
};
if (!strcmp(p, "merge")) {
strategy = CGP_MERGE;
} else if (!strcmp(p, "replace")) {
strategy = CGP_REPLACE;
} else {
pr_err("Unknown strategy \"%s\" in controller's %s stream\n", p, cgp_entry->cgp.name);
xfree(p);
goto err_parse;
}
pr_info("\tStrategy \"%s\"\n", p);
xfree(p);
if (!eat_symbols(&stream, &len, "\n - ", 4, true)) {
pr_err("Expected \':\\n - \' sequence controller's %s stream\n", cgp_entry->cgp.name);
goto err_parse;
}
if (!eat_word(&stream, &len, "\"properties\":", 13, true)) {
pr_err("Expected \"properties:\" keyword in controller's %s stream\n", cgp_entry->cgp.name);
goto err_parse;
}
if (!eat_symbol(&stream, &len, '[', true)) {
pr_err("Expected \'[\' sequence controller's %s properties stream\n", cgp_entry->cgp.name);
goto err_parse;
}
while ((p = get_quoted(&stream, &len, true))) {
if (!p) {
pr_err("Expected property name for controller %s\n", cgp_entry->cgp.name);
goto err_parse;
}
if (xrealloc_safe(&cgp_entry->cgp.props, (cgp_entry->cgp.nr_props + 1) * sizeof(char *))) {
pr_err("Can't allocate property for controller %s\n", cgp_entry->cgp.name);
xfree(p);
goto err_parse;
}
cgp_entry->cgp.props[cgp_entry->cgp.nr_props++] = p;
pr_info("\tProperty \"%s\"\n", p);
if (!eat_symbol(&stream, &len, ',', true)) {
if (stream[0] == ']') {
stream++, len--;
break;
}
pr_err("Expected ']' in controller's %s stream\n", cgp_entry->cgp.name);
goto err_parse;
}
}
if (cgp_entry->cgp.nr_props == 0 && !eat_symbol(&stream, &len, ']', true)) {
pr_err("Expected ']' in empty property list for %s\n", cgp_entry->cgp.name);
goto err_parse;
}
if (!eat_symbol(&stream, &len, '\n', true) && len) {
pr_err("Expected \'\\n\' symbol in controller's %s stream\n", cgp_entry->cgp.name);
goto err_parse;
}
if (cgp_handle_props(&cgp_entry, strategy))
goto err_parse;
cgp_entry = NULL;
}
ret = 0;
out:
return ret;
err_parse:
cgp_free(cgp_entry);
ret = -EINVAL;
goto out;
}
static int cgp_parse_file(char *path)
{
void *mem = MAP_FAILED;
int fd = -1, ret = -1;
struct stat st;
fd = open(path, O_RDONLY);
if (fd < 0) {
pr_perror("Can't open file %s", path);
goto err;
}
if (fstat(fd, &st)) {
pr_perror("Can't stat file %s", path);
goto err;
}
mem = mmap(NULL, st.st_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FILE, fd, 0);
if (mem == MAP_FAILED) {
pr_perror("Can't mmap file %s", path);
goto err;
}
if (cgp_parse_stream(mem, st.st_size)) {
pr_err("Failed to parse file `%s'\n", path);
goto err;
}
ret = 0;
err:
if (mem != MAP_FAILED)
munmap(mem, st.st_size);
close_safe(&fd);
return ret;
}
static int cgp_parse_builtins(void)
{
static const char predefined_stream[] = "\"cpu\":\n"
" - \"strategy\": \"replace\"\n"
" - \"properties\": "
"[ "
"\"cpu.shares\", "
"\"cpu.cfs_period_us\", "
"\"cpu.cfs_quota_us\", "
"\"cpu.rt_period_us\", "
"\"cpu.rt_runtime_us\" "
"]\n"
/* limit_in_bytes and memsw.limit_in_bytes must be set in this order */
"\"memory\":\n"
" - \"strategy\": \"replace\"\n"
" - \"properties\": "
"[ "
"\"memory.limit_in_bytes\", "
"\"memory.memsw.limit_in_bytes\", "
"\"memory.swappiness\", "
"\"memory.soft_limit_in_bytes\", "
"\"memory.move_charge_at_immigrate\", "
"\"memory.oom_control\", "
"\"memory.use_hierarchy\", "
"\"memory.kmem.limit_in_bytes\", "
"\"memory.kmem.tcp.limit_in_bytes\" "
"]\n"
/*
* cpuset.cpus and cpuset.mems must be set before the process moves
* into its cgroup; they are "initialized" below to whatever the root
* values are in copy_special_cg_props so as not to cause ENOSPC when
* values are restored via this code.
*/
"\"cpuset\":\n"
" - \"strategy\": \"replace\"\n"
" - \"properties\": "
"[ "
"\"cpuset.cpus\", "
"\"cpuset.mems\", "
"\"cpuset.memory_migrate\", "
"\"cpuset.cpu_exclusive\", "
"\"cpuset.mem_exclusive\", "
"\"cpuset.mem_hardwall\", "
"\"cpuset.memory_spread_page\", "
"\"cpuset.memory_spread_slab\", "
"\"cpuset.sched_load_balance\", "
"\"cpuset.sched_relax_domain_level\" "
"]\n"
"\"blkio\":\n"
" - \"strategy\": \"replace\"\n"
" - \"properties\": "
"[ "
"\"blkio.weight\" "
"]\n"
"\"freezer\":\n"
" - \"strategy\": \"replace\"\n"
" - \"properties\": "
"[ "
"]\n"
"\"perf_event\":\n"
" - \"strategy\": \"replace\"\n"
" - \"properties\": "
"[ "
"]\n"
"\"net_cls\":\n"
" - \"strategy\": \"replace\"\n"
" - \"properties\": "
"[ "
"\"net_cls.classid\" "
"]\n"
"\"net_prio\":\n"
" - \"strategy\": \"replace\"\n"
" - \"properties\": "
"[ "
"\"net_prio.ifpriomap\" "
"]\n"
"\"pids\":\n"
" - \"strategy\": \"replace\"\n"
" - \"properties\": "
"[ "
"\"pids.max\" "
"]\n"
"\"devices\":\n"
" - \"strategy\": \"replace\"\n"
" - \"properties\": "
"[ "
"\"devices.list\" "
"]\n";
return cgp_parse_stream((void *)predefined_stream, strlen(predefined_stream));
}
int cgp_init(char *stream, size_t len, char *path)
{
int ret;
ret = cgp_parse_builtins();
if (ret)
goto err;
if (stream && len) {
ret = cgp_parse_stream(stream, len);
if (ret)
goto err;
}
if (path)
ret = cgp_parse_file(path);
err:
return ret;
}
static char **dump_controllers;
static size_t nr_dump_controllers;
bool cgp_add_dump_controller(const char *name)
{
if (xrealloc_safe(&dump_controllers, (nr_dump_controllers + 1) * sizeof(char *))) {
pr_err("Can't add controller \"%s\" to mark\n", name);
return false;
}
dump_controllers[nr_dump_controllers] = xstrdup(name);
if (!dump_controllers[nr_dump_controllers])
return false;
pr_debug("Mark controller \"%s\" to dump\n", name);
nr_dump_controllers++;
return true;
}
bool cgp_should_skip_controller(const char *name)
{
size_t i;
/*
* Dump all by default.
*/
if (!nr_dump_controllers)
return false;
for (i = 0; i < nr_dump_controllers; i++) {
if (!strcmp(name, dump_controllers[i]))
return false;
}
return true;
}
const cgp_t *cgp_get_props(const char *name)
{
cgp_list_entry_t *p;
list_for_each_entry(p, &cgp_list, list) {
if (!strcmp(p->cgp.name, name))
return &p->cgp;
}
return NULL;
}
void cgp_fini(void)
{
cgp_list_entry_t *p, *t;
size_t i;
list_for_each_entry_safe(p, t, &cgp_list, list)
cgp_free(p);
INIT_LIST_HEAD(&cgp_list);
for (i = 0; i < nr_dump_controllers; i++)
xfree(dump_controllers[i]);
xfree(dump_controllers);
nr_dump_controllers = 0;
}
| 12,694 | 20.850258 | 96 |
c
|
criu
|
criu-master/criu/clone-noasan.c
|
#include <stdlib.h>
#include <sched.h>
#include <unistd.h>
#include <compel/plugins/std/syscall-codes.h>
#include "sched.h"
#include "common/compiler.h"
#include "log.h"
#include "common/bug.h"
/*
* ASan doesn't play nicely with clone if we use current stack for
* child task. ASan puts local variables on the fake stack
* to catch use-after-return bug:
* https://github.com/google/sanitizers/wiki/AddressSanitizerUseAfterReturn#algorithm
*
* So it's become easy to overflow this fake stack frame in cloned child.
* We need a real stack for clone().
*
* To workaround this we add clone_noasan() not-instrumented wrapper for
* clone(). Unfortunately we can't use __attribute__((no_sanitize_address))
* for this because of bug in GCC > 6:
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69863
*
* So the only way is to put this wrapper in separate non-instrumented file
*
* WARNING: When calling clone_noasan make sure your not sitting in a later
* __restore__ phase where other tasks might be creating threads, otherwise
* all calls to clone_noasan should be guarder with
*
* lock_last_pid
* clone_noasan
* ... wait for process to finish ...
* unlock_last_pid
*/
int clone_noasan(int (*fn)(void *), int flags, void *arg)
{
void *stack_ptr = (void *)round_down((unsigned long)&stack_ptr - 1024, 16);
BUG_ON((flags & CLONE_VM) && !(flags & CLONE_VFORK));
/*
* Reserve some bytes for clone() internal needs
* and use as stack the address above this area.
*/
return clone(fn, stack_ptr, flags, arg);
}
int clone3_with_pid_noasan(int (*fn)(void *), void *arg, int flags, int exit_signal, pid_t pid)
{
struct _clone_args c_args = {};
BUG_ON(flags & CLONE_VM);
/*
* Make sure no child signals are requested. clone3() uses
* exit_signal for that.
*/
BUG_ON(flags & 0xff);
pr_debug("Creating process using clone3()\n");
/*
* clone3() explicitly blocks setting an exit_signal
* if CLONE_PARENT is specified. With clone() it also
* did not work, but there was no error message. The
* exit signal from the thread group leader is taken.
*/
if (!(flags & CLONE_PARENT)) {
if (exit_signal != SIGCHLD) {
pr_err("Exit signal not SIGCHLD\n");
errno = EINVAL;
return -1;
}
c_args.exit_signal = exit_signal;
}
c_args.flags = flags;
c_args.set_tid = ptr_to_u64(&pid);
c_args.set_tid_size = 1;
pid = syscall(__NR_clone3, &c_args, sizeof(c_args));
if (pid == 0)
exit(fn(arg));
return pid;
}
| 2,473 | 28.105882 | 95 |
c
|
criu
|
criu-master/criu/crtools.c
|
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <ctype.h>
#include <sched.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <dlfcn.h>
#include <sys/utsname.h>
#include "int.h"
#include "page.h"
#include "common/compiler.h"
#include "crtools.h"
#include "cr_options.h"
#include "external.h"
#include "files.h"
#include "sk-inet.h"
#include "net.h"
#include "page-xfer.h"
#include "tty.h"
#include "file-lock.h"
#include "cr-service.h"
#include "plugin.h"
#include "criu-log.h"
#include "util.h"
#include "protobuf-desc.h"
#include "namespaces.h"
#include "cgroup.h"
#include "cpu.h"
#include "fault-injection.h"
#include "proc_parse.h"
#include "kerndat.h"
#include "setproctitle.h"
#include "sysctl.h"
void flush_early_log_to_stderr(void) __attribute__((destructor));
void flush_early_log_to_stderr(void)
{
flush_early_log_buffer(STDERR_FILENO);
}
static int image_dir_mode(char *argv[], int optind)
{
switch (opts.mode) {
case CR_DUMP:
/* fallthrough */
case CR_PRE_DUMP:
return O_DUMP;
case CR_RESTORE:
return O_RSTR;
case CR_CPUINFO:
if (!strcmp(argv[optind + 1], "dump"))
return O_DUMP;
/* fallthrough */
default:
return -1;
}
/* never reached */
BUG();
return -1;
}
static int parse_criu_mode(char *mode)
{
if (!strcmp(mode, "dump"))
opts.mode = CR_DUMP;
else if (!strcmp(mode, "pre-dump"))
opts.mode = CR_PRE_DUMP;
else if (!strcmp(mode, "restore"))
opts.mode = CR_RESTORE;
else if (!strcmp(mode, "lazy-pages"))
opts.mode = CR_LAZY_PAGES;
else if (!strcmp(mode, "check"))
opts.mode = CR_CHECK;
else if (!strcmp(mode, "page-server"))
opts.mode = CR_PAGE_SERVER;
else if (!strcmp(mode, "service"))
opts.mode = CR_SERVICE;
else if (!strcmp(mode, "swrk"))
opts.mode = CR_SWRK;
else if (!strcmp(mode, "dedup"))
opts.mode = CR_DEDUP;
else if (!strcmp(mode, "cpuinfo"))
opts.mode = CR_CPUINFO;
else if (!strcmp(mode, "exec"))
opts.mode = CR_EXEC_DEPRECATED;
else if (!strcmp(mode, "show"))
opts.mode = CR_SHOW_DEPRECATED;
else
return -1;
return 0;
}
int main(int argc, char *argv[], char *envp[])
{
int ret = -1;
bool usage_error = true;
bool has_exec_cmd = false;
bool has_sub_command;
int state = PARSING_GLOBAL_CONF;
BUILD_BUG_ON(CTL_32 != SYSCTL_TYPE__CTL_32);
BUILD_BUG_ON(__CTL_STR != SYSCTL_TYPE__CTL_STR);
/* We use it for fd overlap handling in clone_service_fd() */
BUG_ON(get_service_fd(SERVICE_FD_MIN + 1) < get_service_fd(SERVICE_FD_MAX - 1));
if (fault_injection_init()) {
pr_err("Failed to initialize fault injection when initializing crtools.\n");
return 1;
}
cr_pb_init();
__setproctitle_init(argc, argv, envp);
if (argc < 2)
goto usage;
init_opts();
ret = parse_options(argc, argv, &usage_error, &has_exec_cmd, state);
if (ret == 1)
return 1;
if (ret == 2)
goto usage;
if (optind >= argc) {
pr_err("command is required\n");
goto usage;
}
log_set_loglevel(opts.log_level);
/*
* There kernel might send us lethal signals in the following cases:
* 1) Writing a pipe which reader has disappeared.
* 2) Writing to a socket of type SOCK_STREAM which is no longer connected.
* We deal with write()/Send() failures on our own, and prefer not to get killed.
* So we ignore SIGPIPEs.
*
* Pipes are used in various places:
* 1) Receiving application page data
* 2) Transmitting data to the image streamer
* 3) Emitting logs (potentially to a pipe).
* Sockets are mainly used in transmitting memory data.
*/
if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) {
pr_perror("Failed to set a SIGPIPE signal ignore.");
return 1;
}
if (parse_criu_mode(argv[optind])) {
pr_err("unknown command: %s\n", argv[optind]);
goto usage;
}
if (opts.mode == CR_SWRK) {
if (argc != optind + 2) {
fprintf(stderr, "Usage: criu swrk <fd>\n");
return 1;
}
/*
* This is to start criu service worker from libcriu calls.
* The usage is "criu swrk <fd>" and is not for CLI/scripts.
* The arguments semantics can change at any time with the
* corresponding lib call change.
*/
opts.swrk_restore = true;
return cr_service_work(atoi(argv[optind + 1]));
}
if (check_caps())
return 1;
if (opts.imgs_dir == NULL)
SET_CHAR_OPTS(imgs_dir, ".");
if (opts.work_dir == NULL)
SET_CHAR_OPTS(work_dir, opts.imgs_dir);
has_sub_command = (argc - optind) > 1;
if (has_exec_cmd) {
if (!has_sub_command) {
pr_err("--exec-cmd requires a command\n");
goto usage;
}
if (opts.mode != CR_RESTORE) {
pr_err("--exec-cmd is available for the restore command only\n");
goto usage;
}
if (opts.restore_detach) {
pr_err("--restore-detached and --exec-cmd cannot be used together\n");
goto usage;
}
opts.exec_cmd = xmalloc((argc - optind) * sizeof(char *));
if (!opts.exec_cmd)
return 1;
memcpy(opts.exec_cmd, &argv[optind + 1], (argc - optind - 1) * sizeof(char *));
opts.exec_cmd[argc - optind - 1] = NULL;
} else {
/* No subcommands except for cpuinfo and restore --exec-cmd */
if (opts.mode != CR_CPUINFO && has_sub_command) {
pr_err("excessive parameter%s for command %s\n", (argc - optind) > 2 ? "s" : "", argv[optind]);
goto usage;
} else if (opts.mode == CR_CPUINFO && !has_sub_command) {
pr_err("cpuinfo requires an action: dump or check\n");
goto usage;
}
}
if (opts.stream && image_dir_mode(argv, optind) == -1) {
pr_err("--stream cannot be used with the %s command\n", argv[optind]);
goto usage;
}
/* We must not open imgs dir, if service is called */
if (opts.mode != CR_SERVICE) {
ret = open_image_dir(opts.imgs_dir, image_dir_mode(argv, optind));
if (ret < 0) {
pr_err("Couldn't open image dir %s\n", opts.imgs_dir);
return 1;
}
}
/*
* When a process group becomes an orphan,
* its processes are sent a SIGHUP signal
*/
if (opts.mode == CR_RESTORE && opts.restore_detach && opts.final_state == TASK_STOPPED && opts.shell_job)
pr_warn("Stopped and detached shell job will get SIGHUP from OS.\n");
if (chdir(opts.work_dir)) {
pr_perror("Can't change directory to %s", opts.work_dir);
return 1;
}
util_init();
if (log_init(opts.output))
return 1;
if (kerndat_init()) {
pr_err("Could not initialize kernel features detection.\n");
return 1;
}
if (check_options())
return 1;
if (fault_injected(FI_CANNOT_MAP_VDSO))
kdat.can_map_vdso = 0;
if (!list_empty(&opts.inherit_fds)) {
if (opts.mode != CR_RESTORE) {
pr_err("--inherit-fd is restore-only option\n");
return 1;
}
/* now that log file is set up, print inherit fd list */
inherit_fd_log();
}
if (opts.img_parent)
pr_info("Will do snapshot from %s\n", opts.img_parent);
if (opts.mode == CR_DUMP) {
if (!opts.tree_id)
goto opt_pid_missing;
return cr_dump_tasks(opts.tree_id);
}
if (opts.mode == CR_PRE_DUMP) {
if (!opts.tree_id)
goto opt_pid_missing;
if (opts.lazy_pages) {
pr_err("Cannot pre-dump with --lazy-pages\n");
return 1;
}
return cr_pre_dump_tasks(opts.tree_id) != 0;
}
if (opts.mode == CR_RESTORE) {
if (opts.tree_id)
pr_warn("Using -t with criu restore is obsoleted\n");
ret = cr_restore_tasks();
if (ret == 0 && opts.exec_cmd) {
close_pid_proc();
execvp(opts.exec_cmd[0], opts.exec_cmd);
pr_perror("Failed to exec command %s", opts.exec_cmd[0]);
ret = 1;
}
return ret != 0;
}
if (opts.mode == CR_LAZY_PAGES)
return cr_lazy_pages(opts.daemon_mode) != 0;
if (opts.mode == CR_CHECK)
return cr_check() != 0;
if (opts.mode == CR_PAGE_SERVER)
return cr_page_server(opts.daemon_mode, false, -1) != 0;
if (opts.mode == CR_SERVICE)
return cr_service(opts.daemon_mode);
if (opts.mode == CR_DEDUP)
return cr_dedup() != 0;
if (opts.mode == CR_CPUINFO) {
if (!argv[optind + 1]) {
pr_err("cpuinfo requires an action: dump or check\n");
goto usage;
}
if (!strcmp(argv[optind + 1], "dump"))
return cpuinfo_dump();
else if (!strcmp(argv[optind + 1], "check"))
return cpuinfo_check();
}
if (opts.mode == CR_EXEC_DEPRECATED) {
pr_err("The \"exec\" action is deprecated by the Compel library.\n");
return -1;
}
if (opts.mode == CR_SHOW_DEPRECATED) {
pr_err("The \"show\" action is deprecated by the CRIT utility.\n");
pr_err("To view an image use the \"crit decode -i $name --pretty\" command.\n");
return -1;
}
pr_err("unknown command: %s\n", argv[optind]);
usage:
pr_msg("\n"
"Usage:\n"
" criu dump|pre-dump -t PID [<options>]\n"
" criu restore [<options>]\n"
" criu check [--feature FEAT]\n"
" criu page-server\n"
" criu service [<options>]\n"
" criu dedup\n"
" criu lazy-pages -D DIR [<options>]\n"
"\n"
"Commands:\n"
" dump checkpoint a process/tree identified by pid\n"
" pre-dump pre-dump task(s) minimizing their frozen time\n"
" restore restore a process/tree\n"
" check checks whether the kernel support is up-to-date\n"
" page-server launch page server\n"
" service launch service\n"
" dedup remove duplicates in memory dump\n"
" cpuinfo dump writes cpu information into image file\n"
" cpuinfo check validates cpu information read from image file\n");
if (usage_error) {
pr_msg("\nTry -h|--help for more info\n");
return 1;
}
pr_msg("\n"
"Most of the true / false long options (the ones without arguments) can be\n"
"prefixed with --no- to negate the option (example: --display-stats and\n"
"--no-display-stats).\n"
"\n"
"Dump/Restore options:\n"
"\n"
"* Generic:\n"
" -t|--tree PID checkpoint a process tree identified by PID\n"
" -d|--restore-detached detach after restore\n"
" -S|--restore-sibling restore root task as sibling\n"
" -s|--leave-stopped leave tasks in stopped state after checkpoint\n"
" -R|--leave-running leave tasks in running state after checkpoint\n"
" -D|--images-dir DIR directory for image files\n"
" --pidfile FILE write root task, service or page-server pid to FILE\n"
" -W|--work-dir DIR directory to cd and write logs/pidfiles/stats to\n"
" (if not specified, value of --images-dir is used)\n"
" --cpu-cap [CAP] CPU capabilities to write/check. CAP is comma-separated\n"
" list of: cpu, fpu, all, ins, none. To disable\n"
" a capability, use ^CAP. Empty argument implies all\n"
" --exec-cmd execute the command specified after '--' on successful\n"
" restore making it the parent of the restored process\n"
" --freeze-cgroup use cgroup freezer to collect processes\n"
" --weak-sysctls skip restoring sysctls that are not available\n"
" --lazy-pages restore pages on demand\n"
" this requires running a second instance of criu\n"
" in lazy-pages mode: 'criu lazy-pages -D DIR'\n"
" --lazy-pages and lazy-pages mode require userfaultfd\n"
" --stream dump/restore images using criu-image-streamer\n"
" --mntns-compat-mode Use mount engine in compatibility mode. By default criu\n"
" tries to use mount-v2 mode with more reliable algorithm\n"
" based on MOVE_MOUNT_SET_GROUP kernel feature\n"
" --network-lock METHOD network locking/unlocking method; argument\n"
" can be 'nftables' or 'iptables' (default).\n"
" --unprivileged accept limitations when running as non-root\n"
" consult documentation for further details\n"
"\n"
"* External resources support:\n"
" --external RES dump objects from this list as external resources:\n"
" Formats of RES on dump:\n"
" tty[rdev:dev]\n"
" file[mnt_id:inode]\n"
" dev[major/minor]:NAME\n"
" unix[ino]\n"
" mnt[MOUNTPOINT]:COOKIE\n"
" mnt[]{:AUTO_OPTIONS}\n"
" Formats of RES on restore:\n"
" dev[NAME]:DEVPATH\n"
" veth[IFNAME]:OUTNAME{@BRIDGE}\n"
" macvlan[IFNAME]:OUTNAME\n"
" mnt[COOKIE]:ROOT\n"
" netdev[IFNAME]:ORIGNAME\n"
"\n"
"* Special resources support:\n"
" --" SK_EST_PARAM " checkpoint/restore established TCP connections\n"
" --" SK_INFLIGHT_PARAM " skip (ignore) in-flight TCP connections\n"
" --" SK_CLOSE_PARAM " don't dump the state of, or block, established tcp\n"
" connections, and restore them in closed state.\n"
" -r|--root PATH change the root filesystem (when run in mount namespace)\n"
" --evasive-devices use any path to a device file if the original one\n"
" is inaccessible\n"
" --link-remap allow one to link unlinked files back when possible\n"
" --ghost-limit size limit max size of deleted file contents inside image\n"
" --ghost-fiemap enable dumping of deleted files using fiemap\n"
" --action-script FILE add an external action script\n"
" -j|--" OPT_SHELL_JOB " allow one to dump and restore shell jobs\n"
" -l|--" OPT_FILE_LOCKS " handle file locks, for safety, only used for container\n"
" -L|--libdir path to a plugin directory (by default " CR_PLUGIN_DEFAULT ")\n"
" --timeout NUM a timeout (in seconds) on collecting tasks during dump\n"
" (default 10 seconds)\n"
" --force-irmap force resolving names for inotify/fsnotify watches\n"
" --irmap-scan-path FILE\n"
" add a path the irmap hints to scan\n"
" --manage-cgroups [m] dump/restore process' cgroups; argument can be one of\n"
" 'none', 'props', 'soft' (default), 'full', 'strict'\n"
" or 'ignore'\n"
" --cgroup-root [controller:]/newroot\n"
" on dump: change the root for the controller that will\n"
" be dumped. By default, only the paths with tasks in\n"
" them and below will be dumped.\n"
" on restore: change the root cgroup the controller will\n"
" be installed into. No controller means that root is the\n"
" default for all controllers not specified\n"
" --cgroup-props STRING\n"
" define cgroup controllers and properties\n"
" to be checkpointed, which are described\n"
" via STRING using simplified YAML format\n"
" --cgroup-props-file FILE\n"
" same as --cgroup-props, but taking description\n"
" from the path specified\n"
" --cgroup-dump-controller NAME\n"
" define cgroup controller to be dumped\n"
" and skip anything else present in system\n"
" --cgroup-yard PATH\n"
" instead of trying to mount cgroups in CRIU, provide\n"
" a path to a directory with already created cgroup yard.\n"
" Useful if you don't want to grant CAP_SYS_ADMIN to CRIU\n"
" --lsm-profile TYPE:NAME\n"
" Specify an LSM profile to be used during restore.\n"
" The type can be either 'apparmor' or 'selinux'.\n"
" --lsm-mount-context CTX\n"
" Specify a mount context to be used during restore.\n"
" Only mounts with an existing context will have their\n"
" mount context replaced with CTX.\n"
" --skip-mnt PATH ignore this mountpoint when dumping the mount namespace\n"
" --enable-fs FSNAMES a comma separated list of filesystem names or \"all\"\n"
" force criu to (try to) dump/restore these filesystem's\n"
" mountpoints even if fs is not supported\n"
" --inherit-fd fd[NUM]:RES\n"
" Inherit file descriptors, treating fd NUM as being\n"
" already opened via an existing RES, which can be:\n"
" tty[rdev:dev]\n"
" pipe[inode]\n"
" socket[inode]\n"
" file[mnt_id:inode]\n"
" /memfd:name\n"
" path/to/file\n"
" --empty-ns net Create a namespace, but don't restore its properties\n"
" (assuming it will be restored by action scripts)\n"
" -J|--join-ns NS:{PID|NS_FILE}[,OPTIONS]\n"
" Join existing namespace and restore process in it.\n"
" Namespace can be specified as either pid or file path.\n"
" OPTIONS can be used to specify parameters for userns:\n"
" user:PID,UID,GID\n"
" --file-validation METHOD\n"
" pass the validation method to be used; argument\n"
" can be 'filesize' or 'buildid' (default).\n"
" --skip-file-rwx-check\n"
" Skip checking file permissions\n"
" (r/w/x for u/g/o) on restore.\n"
"\n"
"Check options:\n"
" Without options, \"criu check\" checks availability of absolutely required\n"
" kernel features, critical for performing dump and restore.\n"
" --extra add check for extra kernel features\n"
" --experimental add check for experimental kernel features\n"
" --all same as --extra --experimental\n"
" --feature FEAT only check a particular feature, one of:");
pr_check_features(" ", ", ", 80);
pr_msg("\n"
"* Logging:\n"
" -o|--log-file FILE log file name\n"
" --log-pid enable per-process logging to separate FILE.pid files\n"
" -v[v...]|--verbosity increase verbosity (can use multiple v)\n"
" -vNUM|--verbosity=NUM set verbosity to NUM (higher level means more output):\n"
" -v1 - only errors and messages\n"
" -v2 - also warnings (default level)\n"
" -v3 - also information messages and timestamps\n"
" -v4 - lots of debug\n"
" --display-stats print out dump/restore stats\n"
"\n"
"* Memory dumping options:\n"
" --track-mem turn on memory changes tracker in kernel\n"
" --prev-images-dir DIR path to images from previous dump (relative to -D)\n"
" --page-server send pages to page server (see options below as well)\n"
" --auto-dedup when used on dump it will deduplicate \"old\" data in\n"
" pages images of previous dump\n"
" when used on restore, as soon as page is restored, it\n"
" will be punched from the image\n"
" --pre-dump-mode splice - parasite based pre-dumping (default)\n"
" read - process_vm_readv syscall based pre-dumping\n"
"\n"
"Page/Service server options:\n"
" --address ADDR address of server or service\n"
" --port PORT port of page server\n"
" --ps-socket FD use specified FD as page server socket\n"
" -d|--daemon run in the background after creating socket\n"
" --status-fd FD write \\0 to the FD and close it once process is ready\n"
" to handle requests\n"
#ifdef CONFIG_GNUTLS
" --tls-cacert FILE trust certificates signed only by this CA\n"
" --tls-cacrl FILE path to CA certificate revocation list file\n"
" --tls-cert FILE path to TLS certificate file\n"
" --tls-key FILE path to TLS private key file\n"
" --tls use TLS to secure remote connection\n"
" --tls-no-cn-verify do not verify common name in server certificate\n"
#endif
"\n"
"Configuration file options:\n"
" --config FILEPATH pass a specific configuration file\n"
" --no-default-config forbid usage of default configuration files\n"
"\n"
"Other options:\n"
" -h|--help show this text\n"
" -V|--version show version\n");
return 0;
opt_pid_missing:
pr_err("pid not specified\n");
return 1;
}
| 21,860 | 36.887348 | 106 |
c
|
criu
|
criu-master/criu/eventfd.c
|
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <limits.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/eventfd.h>
#include "common/compiler.h"
#include "imgset.h"
#include "eventfd.h"
#include "fdinfo.h"
#include "image.h"
#include "util.h"
#include "log.h"
#include "protobuf.h"
#include "images/eventfd.pb-c.h"
#undef LOG_PREFIX
#define LOG_PREFIX "eventfd: "
struct eventfd_file_info {
EventfdFileEntry *efe;
struct file_desc d;
};
/* Checks if file descriptor @lfd is eventfd */
int is_eventfd_link(char *link)
{
return is_anon_link_type(link, "[eventfd]");
}
static void pr_info_eventfd(char *action, EventfdFileEntry *efe)
{
pr_info("%s: id %#08x flags %#04x counter %#016" PRIx64 "\n", action, efe->id, efe->flags, efe->counter);
}
static int dump_one_eventfd(int lfd, u32 id, const struct fd_parms *p)
{
EventfdFileEntry efd = EVENTFD_FILE_ENTRY__INIT;
FileEntry fe = FILE_ENTRY__INIT;
if (parse_fdinfo(lfd, FD_TYPES__EVENTFD, &efd))
return -1;
efd.id = id;
efd.flags = p->flags;
efd.fown = (FownEntry *)&p->fown;
fe.type = FD_TYPES__EVENTFD;
fe.id = efd.id;
fe.efd = &efd;
pr_info_eventfd("Dumping ", &efd);
return pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE);
}
const struct fdtype_ops eventfd_dump_ops = {
.type = FD_TYPES__EVENTFD,
.dump = dump_one_eventfd,
};
static int eventfd_open(struct file_desc *d, int *new_fd)
{
struct eventfd_file_info *info;
int tmp;
info = container_of(d, struct eventfd_file_info, d);
tmp = eventfd(info->efe->counter, 0);
if (tmp < 0) {
pr_perror("Can't create eventfd %#08x", info->efe->id);
return -1;
}
if (rst_file_params(tmp, info->efe->fown, info->efe->flags)) {
pr_perror("Can't restore params on eventfd %#08x", info->efe->id);
goto err_close;
}
*new_fd = tmp;
return 0;
err_close:
close(tmp);
return -1;
}
static struct file_desc_ops eventfd_desc_ops = {
.type = FD_TYPES__EVENTFD,
.open = eventfd_open,
};
static int collect_one_efd(void *obj, ProtobufCMessage *msg, struct cr_img *i)
{
struct eventfd_file_info *info = obj;
info->efe = pb_msg(msg, EventfdFileEntry);
pr_info_eventfd("Collected ", info->efe);
return file_desc_add(&info->d, info->efe->id, &eventfd_desc_ops);
}
struct collect_image_info eventfd_cinfo = {
.fd_type = CR_FD_EVENTFD_FILE,
.pb_type = PB_EVENTFD_FILE,
.priv_size = sizeof(struct eventfd_file_info),
.collect = collect_one_efd,
};
| 2,545 | 21.13913 | 106 |
c
|
criu
|
criu-master/criu/eventpoll.c
|
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <limits.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/epoll.h>
#include "types.h"
#include "crtools.h"
#include "common/compiler.h"
#include "imgset.h"
#include "rst_info.h"
#include "eventpoll.h"
#include "fdinfo.h"
#include "image.h"
#include "util.h"
#include "log.h"
#include "pstree.h"
#include "parasite.h"
#include "kerndat.h"
#include "file-ids.h"
#include "kcmp-ids.h"
#include "protobuf.h"
#include "images/eventpoll.pb-c.h"
#undef LOG_PREFIX
#define LOG_PREFIX "epoll: "
static LIST_HEAD(dinfo_list);
typedef struct {
uint32_t tfd;
uint32_t off;
uint32_t idx;
} toff_t;
struct eventpoll_dinfo {
struct list_head list;
FileEntry *fe;
EventpollFileEntry *e;
toff_t *toff;
FownEntry fown;
pid_t pid;
int efd;
};
struct eventpoll_file_info {
EventpollFileEntry *efe;
struct file_desc d;
};
/* Checks if file descriptor @lfd is eventfd */
int is_eventpoll_link(char *link)
{
return is_anon_link_type(link, "[eventpoll]");
}
static void pr_info_eventpoll_tfd(char *action, uint32_t id, EventpollTfdEntry *e)
{
pr_info("%seventpoll-tfd: id %#08x tfd %8d events %#08x data %#016" PRIx64 "\n", action, id, e->tfd, e->events,
e->data);
}
static void pr_info_eventpoll(char *action, EventpollFileEntry *e)
{
pr_info("%seventpoll: id %#08x flags %#04x\n", action, e->id, e->flags);
}
static int queue_dinfo(FileEntry **fe, EventpollFileEntry **e, toff_t **toff, const struct fd_parms *p)
{
struct eventpoll_dinfo *dinfo;
pr_info_eventpoll("Queueing ", *e);
dinfo = xmalloc(sizeof(*dinfo));
if (!dinfo)
return -ENOMEM;
memcpy(&dinfo->fown, &p->fown, sizeof(dinfo->fown));
INIT_LIST_HEAD(&dinfo->list);
dinfo->fe = *fe;
dinfo->e = *e;
dinfo->toff = *toff;
dinfo->e->fown = &dinfo->fown;
dinfo->pid = p->pid;
dinfo->efd = p->fd;
*fe = NULL;
*e = NULL;
*toff = NULL;
list_add_tail(&dinfo->list, &dinfo_list);
return 0;
}
static void dequeue_dinfo(struct eventpoll_dinfo *dinfo)
{
ssize_t i;
for (i = 0; i < dinfo->e->n_tfd; i++)
eventpoll_tfd_entry__free_unpacked(dinfo->e->tfd[i], NULL);
xfree(dinfo->fe);
xfree(dinfo->e->tfd);
xfree(dinfo->e);
xfree(dinfo->toff);
list_del(&dinfo->list);
xfree(dinfo);
}
int flush_eventpoll_dinfo_queue(void)
{
struct eventpoll_dinfo *dinfo, *t;
ssize_t i;
list_for_each_entry_safe(dinfo, t, &dinfo_list, list) {
EventpollFileEntry *e = dinfo->e;
for (i = 0; i < e->n_tfd; i++) {
EventpollTfdEntry *tfde = e->tfd[i];
struct kid_elem ke = {
.pid = dinfo->pid,
.genid = make_gen_id(tfde->dev, tfde->inode, tfde->pos),
.idx = tfde->tfd,
};
kcmp_epoll_slot_t slot = {
.efd = dinfo->efd,
.tfd = tfde->tfd,
.toff = dinfo->toff[i].off,
};
struct kid_elem *t = kid_lookup_epoll_tfd(&fd_tree, &ke, &slot);
if (!t) {
pr_debug("kid_lookup_epoll: no match pid %d efd %d tfd %d toff %u\n", dinfo->pid,
dinfo->efd, tfde->tfd, dinfo->toff[i].off);
goto err;
}
pr_debug("kid_lookup_epoll: rbsearch match pid %d efd %d tfd %d toff %u -> %d\n", dinfo->pid,
dinfo->efd, tfde->tfd, dinfo->toff[i].off, t->idx);
/* Make sure the pid matches */
if (t->pid != dinfo->pid) {
pr_debug("kid_lookup_epoll: pid mismatch %d %d efd %d tfd %d toff %u\n", dinfo->pid,
t->pid, dinfo->efd, tfde->tfd, dinfo->toff[i].off);
goto err;
}
tfde->tfd = t->idx;
}
pr_info_eventpoll("Dumping ", e);
if (pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), dinfo->fe, PB_FILE))
goto err;
for (i = 0; i < e->n_tfd; i++)
pr_info_eventpoll_tfd("Dumping: ", e->id, e->tfd[i]);
dequeue_dinfo(dinfo);
}
return 0;
err:
list_for_each_entry_safe(dinfo, t, &dinfo_list, list)
dequeue_dinfo(dinfo);
return -1;
}
static int tfd_cmp(const void *a, const void *b)
{
if (((int *)a)[0] > ((int *)b)[0])
return 1;
if (((int *)a)[0] < ((int *)b)[0])
return -1;
return 0;
}
static int toff_cmp(const void *a, const void *b)
{
if (((toff_t *)a)[0].tfd > ((toff_t *)b)[0].tfd)
return 1;
if (((toff_t *)a)[0].tfd < ((toff_t *)b)[0].tfd)
return -1;
if (((toff_t *)a)[0].idx > ((toff_t *)b)[0].idx)
return 1;
if (((toff_t *)a)[0].idx < ((toff_t *)b)[0].idx)
return -1;
return 0;
}
static int toff_cmp_idx(const void *a, const void *b)
{
if (((toff_t *)a)[0].idx > ((toff_t *)b)[0].idx)
return 1;
if (((toff_t *)a)[0].idx < ((toff_t *)b)[0].idx)
return -1;
return 0;
}
/*
* fds in fd_parms are sorted so we can use binary search
* for better performance.
*/
static int find_tfd_bsearch(pid_t pid, int efd, int fds[], size_t nr_fds, int tfd, unsigned int toff)
{
kcmp_epoll_slot_t slot = {
.efd = efd,
.tfd = tfd,
.toff = toff,
};
int *tfd_found;
pr_debug("find_tfd_bsearch: pid %d efd %d tfd %d toff %u\n", pid, efd, tfd, toff);
/*
* Optimistic case: the target fd belongs to us
* and wasn't dup'ed.
*/
tfd_found = bsearch(&tfd, fds, nr_fds, sizeof(int), tfd_cmp);
if (tfd_found) {
if (kdat.has_kcmp_epoll_tfd) {
if (syscall(SYS_kcmp, pid, pid, KCMP_EPOLL_TFD, tfd, &slot) == 0) {
pr_debug("find_tfd_bsearch (kcmp-yes): bsearch match pid %d efd %d tfd %d toff %u\n",
pid, efd, tfd, toff);
return tfd;
}
} else {
pr_debug("find_tfd_bsearch (kcmp-no): bsearch match pid %d efd %d tfd %d toff %u\n", pid, efd,
tfd, toff);
return tfd;
}
}
pr_debug("find_tfd_bsearch: no match pid %d efd %d tfd %d toff %u\n", pid, efd, tfd, toff);
return -1;
}
static int dump_one_eventpoll(int lfd, u32 id, const struct fd_parms *p)
{
toff_t *toff = NULL;
EventpollFileEntry *e = NULL;
FileEntry *fe = NULL;
int ret = -1;
ssize_t i;
e = xmalloc(sizeof(*e));
if (!e)
goto out;
eventpoll_file_entry__init(e);
fe = xmalloc(sizeof(*fe));
if (!fe)
goto out;
file_entry__init(fe);
e->id = id;
e->flags = p->flags;
e->fown = (FownEntry *)&p->fown;
if (parse_fdinfo(lfd, FD_TYPES__EVENTPOLL, e))
goto out;
fe->type = FD_TYPES__EVENTPOLL;
fe->id = e->id;
fe->epfd = e;
/*
* In regular case there is no so many dup'ed
* descriptors so instead of complex mappings
* lets rather walk over members with O(n^2)
*/
if (p->dfds) {
toff = xmalloc(sizeof(*toff) * e->n_tfd);
if (!toff)
goto out;
for (i = 0; i < e->n_tfd; i++) {
toff[i].idx = i;
toff[i].tfd = e->tfd[i]->tfd;
toff[i].off = 0;
}
qsort(toff, e->n_tfd, sizeof(*toff), toff_cmp);
for (i = 1; i < e->n_tfd; i++)
if (toff[i].tfd == toff[i - 1].tfd)
toff[i].off = toff[i - 1].off + 1;
qsort(toff, e->n_tfd, sizeof(*toff), toff_cmp_idx);
}
/*
* Handling dup'ed or transferred target
* files is tricky: we need to use kcmp
* to find out where file came from. Until
* it's implemented lets use simpler approach
* just check the targets are belonging to the
* pid's file set.
*/
if (p->dfds) {
for (i = 0; i < e->n_tfd; i++) {
int tfd = find_tfd_bsearch(p->pid, p->fd, p->dfds->fds, p->dfds->nr_fds, e->tfd[i]->tfd,
toff[i].off);
if (tfd == -1) {
if (kdat.has_kcmp_epoll_tfd) {
ret = queue_dinfo(&fe, &e, &toff, p);
} else {
pr_err("Escaped/closed fd descriptor %d on pid %d\n", e->tfd[i]->tfd, p->pid);
}
goto out;
}
}
} else
pr_warn_once("Unix SCM files are not verified\n");
pr_info_eventpoll("Dumping ", e);
ret = pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), fe, PB_FILE);
if (!ret) {
for (i = 0; i < e->n_tfd; i++)
pr_info_eventpoll_tfd("Dumping: ", e->id, e->tfd[i]);
}
out:
for (i = 0; e && i < e->n_tfd; i++)
eventpoll_tfd_entry__free_unpacked(e->tfd[i], NULL);
xfree(fe);
if (e)
xfree(e->tfd);
xfree(e);
xfree(toff);
return ret;
}
const struct fdtype_ops eventpoll_dump_ops = {
.type = FD_TYPES__EVENTPOLL,
.dump = dump_one_eventpoll,
};
static int eventpoll_post_open(struct file_desc *d, int fd);
static int eventpoll_open(struct file_desc *d, int *new_fd)
{
struct fdinfo_list_entry *fle = file_master(d);
struct eventpoll_file_info *info;
int tmp;
info = container_of(d, struct eventpoll_file_info, d);
if (fle->stage >= FLE_OPEN)
return eventpoll_post_open(d, fle->fe->fd);
pr_info_eventpoll("Restore ", info->efe);
tmp = epoll_create(1);
if (tmp < 0) {
pr_perror("Can't create epoll %#08x", info->efe->id);
return -1;
}
if (rst_file_params(tmp, info->efe->fown, info->efe->flags)) {
pr_perror("Can't restore file params on epoll %#08x", info->efe->id);
goto err_close;
}
*new_fd = tmp;
return 1;
err_close:
close(tmp);
return -1;
}
static int epoll_not_ready_tfd(EventpollTfdEntry *tdefe)
{
struct fdinfo_list_entry *fle;
list_for_each_entry(fle, &rsti(current)->fds, ps_list) {
if (tdefe->tfd != fle->fe->fd)
continue;
if (fle->desc->ops->type == FD_TYPES__EVENTPOLL)
return (fle->stage < FLE_OPEN);
else
return (fle->stage != FLE_RESTORED);
}
/*
* If tgt fle is not on the fds list, it's already
* restored (see open_fdinfos), so we're ready.
*/
return 0;
}
static int eventpoll_retore_tfd(int fd, int id, EventpollTfdEntry *tdefe)
{
struct epoll_event event;
pr_info_eventpoll_tfd("Restore ", id, tdefe);
event.events = tdefe->events;
event.data.u64 = tdefe->data;
if (epoll_ctl(fd, EPOLL_CTL_ADD, tdefe->tfd, &event)) {
pr_perror("Can't add event on %#08x", id);
return -1;
}
return 0;
}
static int eventpoll_post_open(struct file_desc *d, int fd)
{
struct eventpoll_file_info *info;
int i;
info = container_of(d, struct eventpoll_file_info, d);
for (i = 0; i < info->efe->n_tfd; i++) {
if (epoll_not_ready_tfd(info->efe->tfd[i]))
return 1;
}
for (i = 0; i < info->efe->n_tfd; i++) {
if (eventpoll_retore_tfd(fd, info->efe->id, info->efe->tfd[i]))
return -1;
}
return 0;
}
static struct file_desc_ops desc_ops = {
.type = FD_TYPES__EVENTPOLL,
.open = eventpoll_open,
};
static int collect_one_epoll_tfd(void *o, ProtobufCMessage *msg, struct cr_img *i)
{
EventpollTfdEntry *tfde;
struct file_desc *d;
struct eventpoll_file_info *ef;
EventpollFileEntry *efe;
int n_tfd;
if (!deprecated_ok("Epoll TFD image"))
return -1;
tfde = pb_msg(msg, EventpollTfdEntry);
d = find_file_desc_raw(FD_TYPES__EVENTPOLL, tfde->id);
if (!d) {
pr_err("No epoll FD for %u\n", tfde->id);
return -1;
}
ef = container_of(d, struct eventpoll_file_info, d);
efe = ef->efe;
n_tfd = efe->n_tfd + 1;
if (xrealloc_safe(&efe->tfd, n_tfd * sizeof(EventpollTfdEntry *)))
return -1;
efe->tfd[efe->n_tfd] = tfde;
efe->n_tfd = n_tfd;
return 0;
}
struct collect_image_info epoll_tfd_cinfo = {
.fd_type = CR_FD_EVENTPOLL_TFD,
.pb_type = PB_EVENTPOLL_TFD,
.collect = collect_one_epoll_tfd,
.flags = COLLECT_NOFREE,
};
static int collect_one_epoll(void *o, ProtobufCMessage *msg, struct cr_img *i)
{
struct eventpoll_file_info *info = o;
info->efe = pb_msg(msg, EventpollFileEntry);
pr_info_eventpoll("Collected ", info->efe);
return file_desc_add(&info->d, info->efe->id, &desc_ops);
}
struct collect_image_info epoll_cinfo = {
.fd_type = CR_FD_EVENTPOLL_FILE,
.pb_type = PB_EVENTPOLL_FILE,
.priv_size = sizeof(struct eventpoll_file_info),
.collect = collect_one_epoll,
};
| 11,255 | 21.467066 | 112 |
c
|
criu
|
criu-master/criu/fifo.c
|
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include "imgset.h"
#include "image.h"
#include "files.h"
#include "files-reg.h"
#include "file-ids.h"
#include "pipes.h"
#include "fifo.h"
#include "protobuf.h"
#include "images/regfile.pb-c.h"
#include "images/fifo.pb-c.h"
/*
* FIFO checkpoint and restore is done in a bit unusual manner.
* We use files-reg.c engine to save fifo path and flags,
* thus regular files image will contain fifo descriptors which
* are useless for reg-files engine itself but needed for our fifo
* engine.
*
* In particular we dump fifo-entry automatically and appropriate
* reg-file entry manually, thus on restore we need to ask reg-file
* engine to restore fifo path and flags via direct call.
*/
struct fifo_info {
struct list_head list;
struct file_desc d;
FifoEntry *fe;
bool restore_data;
};
static LIST_HEAD(fifo_head);
static struct pipe_data_dump pd_fifo = {
.img_type = CR_FD_FIFO_DATA,
};
static int dump_one_fifo(int lfd, u32 id, const struct fd_parms *p)
{
struct cr_img *img = img_from_set(glob_imgset, CR_FD_FILES);
FileEntry fe = FILE_ENTRY__INIT;
FifoEntry e = FIFO_ENTRY__INIT;
u32 rf_id;
fd_id_generate_special(NULL, &rf_id);
/*
* It's a trick here, we use regular files dumping
* code to save path to a fifo, then we reuse it
* on restore.
*/
if (dump_one_reg_file(lfd, rf_id, p))
return -1;
pr_info("Dumping fifo %d with id %#x pipe_id %#x\n", lfd, id, pipe_id(p));
e.id = id;
e.pipe_id = pipe_id(p);
e.has_regf_id = true;
e.regf_id = rf_id;
fe.type = FD_TYPES__FIFO;
fe.id = e.id;
fe.fifo = &e;
if (pb_write_one(img, &fe, PB_FILE))
return -1;
return dump_one_pipe_data(&pd_fifo, lfd, p);
}
const struct fdtype_ops fifo_dump_ops = {
.type = FD_TYPES__FIFO,
.dump = dump_one_fifo,
};
static struct pipe_data_rst *pd_hash_fifo[PIPE_DATA_HASH_SIZE];
static int do_open_fifo(int ns_root_fd, struct reg_file_info *rfi, void *arg)
{
struct fifo_info *info = arg;
int new_fifo, fake_fifo = -1;
/*
* The fifos (except read-write fifos) do wait until
* another pipe-end get connected, so to be able to
* proceed the restoration procedure we open a fake
* fifo here.
*/
fake_fifo = openat(ns_root_fd, rfi->path, O_RDWR);
if (fake_fifo < 0) {
pr_perror("Can't open fake fifo %#x [%s]", info->fe->id, rfi->path);
return -1;
}
new_fifo = openat(ns_root_fd, rfi->path, rfi->rfe->flags);
if (new_fifo < 0) {
pr_perror("Can't open fifo %#x [%s]", info->fe->id, rfi->path);
goto out;
}
if (info->restore_data)
if (restore_pipe_data(CR_FD_FIFO_DATA, fake_fifo, info->fe->pipe_id, pd_hash_fifo)) {
close(new_fifo);
new_fifo = -1;
}
out:
close(fake_fifo);
return new_fifo;
}
static int open_fifo_fd(struct file_desc *d, int *new_fd)
{
struct fifo_info *info = container_of(d, struct fifo_info, d);
struct file_desc *reg_d;
int fd;
reg_d = collect_special_file(info->fe->has_regf_id ? info->fe->regf_id : info->fe->id);
if (!reg_d)
return -1;
fd = open_path(reg_d, do_open_fifo, info);
if (fd < 0)
return -1;
*new_fd = fd;
return 0;
}
static struct file_desc_ops fifo_desc_ops = {
.type = FD_TYPES__FIFO,
.open = open_fifo_fd,
};
static int collect_one_fifo(void *o, ProtobufCMessage *base, struct cr_img *i)
{
struct fifo_info *info = o, *f;
info->fe = pb_msg(base, FifoEntry);
pr_info("Collected fifo entry ID %#x PIPE ID %#x\n", info->fe->id, info->fe->pipe_id);
/* check who will restore the fifo data */
list_for_each_entry(f, &fifo_head, list)
if (f->fe->pipe_id == info->fe->pipe_id)
break;
if (&f->list == &fifo_head) {
list_add(&info->list, &fifo_head);
info->restore_data = true;
} else {
INIT_LIST_HEAD(&info->list);
info->restore_data = false;
}
return file_desc_add(&info->d, info->fe->id, &fifo_desc_ops);
}
struct collect_image_info fifo_cinfo = {
.fd_type = CR_FD_FIFO,
.pb_type = PB_FIFO,
.priv_size = sizeof(struct fifo_info),
.collect = collect_one_fifo,
};
static int collect_fifo_data(void *obj, ProtobufCMessage *msg, struct cr_img *img)
{
return do_collect_pipe_data(obj, msg, img, pd_hash_fifo);
}
struct collect_image_info fifo_data_cinfo = {
.fd_type = CR_FD_FIFO_DATA,
.pb_type = PB_PIPE_DATA,
.priv_size = sizeof(struct pipe_data_rst),
.collect = collect_fifo_data,
};
| 4,348 | 22.895604 | 88 |
c
|
criu
|
criu-master/criu/file-lock.c
|
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <sys/file.h>
#include <fcntl.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/fsuid.h>
#include <sys/sysmacros.h>
#include "cr_options.h"
#include "imgset.h"
#include "files.h"
#include "fs-magic.h"
#include "kerndat.h"
#include "image.h"
#include "util.h"
#include "mount.h"
#include "proc_parse.h"
#include "servicefd.h"
#include "file-lock.h"
#include "pstree.h"
#include "files-reg.h"
struct file_lock_rst {
FileLockEntry *fle;
struct list_head l;
};
struct list_head file_lock_list = LIST_HEAD_INIT(file_lock_list);
static int collect_one_file_lock(void *o, ProtobufCMessage *m, struct cr_img *i)
{
struct file_lock_rst *lr = o;
lr->fle = pb_msg(m, FileLockEntry);
list_add_tail(&lr->l, &file_lock_list);
return 0;
}
struct collect_image_info file_locks_cinfo = {
.fd_type = CR_FD_FILE_LOCKS,
.pb_type = PB_FILE_LOCK,
.priv_size = sizeof(struct file_lock_rst),
.collect = collect_one_file_lock,
};
struct file_lock *alloc_file_lock(void)
{
struct file_lock *flock;
flock = xzalloc(sizeof(*flock));
if (!flock)
return NULL;
INIT_LIST_HEAD(&flock->list);
flock->real_owner = -1;
flock->owners_fd = -1;
flock->fl_holder = -1;
return flock;
}
void free_file_locks(void)
{
struct file_lock *flock, *tmp;
list_for_each_entry_safe(flock, tmp, &file_lock_list, list) {
xfree(flock);
}
INIT_LIST_HEAD(&file_lock_list);
}
static int dump_one_file_lock(FileLockEntry *fle)
{
pr_info("LOCK flag: %d,type: %d,pid: %d,fd: %d,start: %8" PRIx64 ",len: %8" PRIx64 "\n", fle->flag, fle->type,
fle->pid, fle->fd, fle->start, fle->len);
return pb_write_one(img_from_set(glob_imgset, CR_FD_FILE_LOCKS), fle, PB_FILE_LOCK);
}
static void fill_flock_entry(FileLockEntry *fle, int fl_kind, int fl_ltype)
{
fle->flag |= fl_kind;
fle->type = fl_ltype;
}
int dump_file_locks(void)
{
FileLockEntry fle;
struct file_lock *fl;
int ret = 0;
pr_info("Dumping file-locks\n");
list_for_each_entry(fl, &file_lock_list, list) {
if (fl->real_owner == -1) {
if (fl->fl_kind == FL_POSIX) {
pr_err("Unresolved lock found pid %d ino %ld\n", fl->fl_owner, fl->i_no);
return -1;
}
continue;
}
if (!opts.handle_file_locks) {
pr_err("Some file locks are hold by dumping tasks! "
"You can try --" OPT_FILE_LOCKS " to dump them.\n");
return -1;
}
file_lock_entry__init(&fle);
fle.pid = fl->real_owner;
fle.fd = fl->owners_fd;
fill_flock_entry(&fle, fl->fl_kind, fl->fl_ltype);
fle.start = fl->start;
if (!strncmp(fl->end, "EOF", 3))
fle.len = 0;
else
fle.len = (atoll(fl->end) + 1) - fl->start;
ret = dump_one_file_lock(&fle);
if (ret) {
pr_err("Dump file lock failed!\n");
goto err;
}
}
err:
return ret;
}
static int lock_btrfs_file_match(pid_t pid, int fd, struct file_lock *fl, struct fd_parms *p)
{
int phys_dev = MKKDEV(fl->maj, fl->min);
char link[PATH_MAX], t[32];
struct ns_id *ns;
int ret;
snprintf(t, sizeof(t), "/proc/%d/fd/%d", pid, fd);
ret = readlink(t, link, sizeof(link)) - 1;
if (ret < 0) {
pr_perror("Can't read link of fd %d", fd);
return -1;
} else if ((size_t)ret == sizeof(link)) {
pr_err("Buffer for read link of fd %d is too small\n", fd);
return -1;
}
link[ret] = 0;
ns = lookup_nsid_by_mnt_id(p->mnt_id);
return phys_stat_dev_match(p->stat.st_dev, phys_dev, ns, link);
}
static inline int lock_file_match(pid_t pid, int fd, struct file_lock *fl, struct fd_parms *p)
{
dev_t dev = p->stat.st_dev;
if (fl->i_no != p->stat.st_ino)
return 0;
/*
* Get the right devices for BTRFS. Look at phys_stat_resolve_dev()
* for more details.
*/
if (p->fs_type == BTRFS_SUPER_MAGIC) {
if (p->mnt_id != -1) {
struct mount_info *m;
m = lookup_mnt_id(p->mnt_id);
BUG_ON(m == NULL);
dev = kdev_to_odev(m->s_dev);
} else /* old kernel */
return lock_btrfs_file_match(pid, fd, fl, p);
}
return makedev(fl->maj, fl->min) == dev;
}
static int lock_check_fd(int lfd, struct file_lock *fl)
{
int ret;
if (fl->fl_ltype & LOCK_MAND)
ret = flock(lfd, LOCK_MAND | LOCK_RW);
else
ret = flock(lfd, LOCK_EX | LOCK_NB);
pr_debug(" `- %d/%d\n", ret, errno);
if (ret != 0) {
if (errno != EAGAIN) {
pr_err("Bogus lock test result %d\n", ret);
return -1;
}
return 0;
} else {
/*
* The ret == 0 means, that new lock doesn't conflict
* with any others on the file. But since we do know,
* that there should be some other one (file is found
* in /proc/locks), it means that the lock is already
* on file pointed by fd.
*/
pr_debug(" `- downgrading lock back\n");
if (fl->fl_ltype & LOCK_MAND)
ret = flock(lfd, fl->fl_ltype);
else if (fl->fl_ltype == F_RDLCK)
ret = flock(lfd, LOCK_SH);
if (ret) {
pr_err("Can't downgrade lock back %d\n", ret);
return -1;
}
}
return 1;
}
static int lock_ofd_check_fd(int lfd, struct file_lock *fl)
{
int ret;
struct flock lck = { .l_whence = SEEK_SET, .l_type = F_WRLCK, .l_start = fl->start };
if (strcmp(fl->end, "EOF")) {
unsigned long end;
ret = sscanf(fl->end, "%lu", &end);
if (ret <= 0) {
pr_err("Invalid lock entry\n");
return -1;
}
lck.l_len = end - fl->start + 1;
} else {
lck.l_len = 0;
}
ret = fcntl(lfd, F_OFD_SETLK, &lck);
pr_debug(" `- %d/%d\n", ret, errno);
if (ret != 0) {
if (errno != EAGAIN) {
pr_err("Bogus lock test result %d\n", ret);
return -1;
}
return 0;
} else {
/*
* The ret == 0 means, that new lock doesn't conflict
* with any others on the file. But since we do know,
* that there should be some other one (file is found
* in /proc/locks), it means that the lock is already
* on file pointed by fd.
*/
pr_debug(" `- downgrading lock back\n");
if (fl->fl_ltype & LOCK_WRITE)
lck.l_type = F_WRLCK;
else
lck.l_type = F_RDLCK;
ret = fcntl(lfd, F_OFD_SETLK, &lck);
if (ret) {
pr_err("Can't downgrade lock back %d\n", ret);
return -1;
}
}
return 1;
}
static int lease_check_fd(int fd, int file_flags, struct file_lock *fl)
{
int file_lease_type, err;
int lease_type = fl->fl_ltype & (~LEASE_BREAKING);
if ((file_flags & O_ACCMODE) != O_RDONLY) {
/*
* Write OFD conflicts with any lease not associated
* with it, therefore there is can't be other lease
* or OFD for this file.
*/
return 1;
}
file_lease_type = fcntl(fd, F_GETLEASE);
if (file_lease_type < 0) {
pr_err("Can't get lease type\n");
return -1;
}
/*
* Only read OFDs can be present for the file. If
* read and write OFDs with at least one lease had
* presented, it would have conflicted.
*/
if (fl->fl_ltype & LEASE_BREAKING) {
/*
* Only read leases are possible for read OFDs
* and they all should be in breaking state,
* because the current one is.
*/
int compatible_type = file_lease_type;
if (compatible_type != F_UNLCK) {
pr_err("Lease doesn't conflicts but breaks\n");
return -1;
}
/*
* Due to activated breaking sequence we can't
* get actual lease type with F_GETLEASE.
* The err == 0 after lease upgrade means, that
* there is already read lease on OFD. Otherwise
* it would fail, because current read lease is
* still set and breaking.
*/
err = fcntl(fd, F_SETLEASE, F_RDLCK);
if (err < 0) {
if (errno != EAGAIN) {
pr_perror("Can't set lease (fd %i)", fd);
return -1;
}
return 0;
}
return 1;
} else {
/*
* The file can have only non-breaking read
* leases, because otherwise the current one
* also would have broke.
*/
if (lease_type != F_RDLCK) {
pr_err("Incorrect lease type\n");
return -1;
}
if (file_lease_type == F_UNLCK)
return 0;
if (file_lease_type == F_RDLCK)
return 1;
pr_err("Invalid file lease type\n");
return -1;
}
}
int note_file_lock(struct pid *pid, int fd, int lfd, struct fd_parms *p)
{
struct file_lock *fl;
int ret;
if (kdat.has_fdinfo_lock)
return 0;
list_for_each_entry(fl, &file_lock_list, list) {
ret = lock_file_match(pid->real, fd, fl, p);
if (ret < 0)
return -1;
if (ret == 0)
continue;
if (!opts.handle_file_locks) {
pr_err("Some file locks are hold by dumping tasks!"
"You can try --" OPT_FILE_LOCKS " to dump them.\n");
return -1;
}
if (fl->fl_kind == FL_POSIX) {
/*
* POSIX locks cannot belong to anyone
* but creator.
*/
if (fl->fl_owner != pid->real)
continue;
} else if (fl->fl_kind == FL_LEASE) {
if (fl->owners_fd >= 0)
continue;
if (fl->fl_owner != pid->real && fl->real_owner != -1)
continue;
ret = lease_check_fd(lfd, p->flags, fl);
if (ret < 0)
return ret;
if (ret == 0)
continue;
} else /* fl->fl_kind == FL_FLOCK || fl->fl_kind == FL_OFD */ {
int ret;
/*
* OFD locks & FLOCKs can be inherited across fork,
* thus we can have any task as lock
* owner. But the creator is preferred
* anyway.
*/
if (fl->fl_owner != pid->real && fl->real_owner != -1)
continue;
pr_debug("Checking lock holder %d:%d\n", pid->real, fd);
if (fl->fl_kind == FL_FLOCK)
ret = lock_check_fd(lfd, fl);
else
ret = lock_ofd_check_fd(lfd, fl);
if (ret < 0)
return ret;
if (ret == 0)
continue;
}
fl->fl_holder = pid->real;
fl->real_owner = pid->ns[0].virt;
fl->owners_fd = fd;
pr_info("Found lock entry %d.%d %d vs %d\n", pid->real, pid->ns[0].virt, fd, fl->fl_owner);
}
return 0;
}
void discard_dup_locks_tail(pid_t pid, int fd)
{
struct file_lock *fl, *p;
list_for_each_entry_safe_reverse(fl, p, &file_lock_list, list) {
if (fl->owners_fd != fd || pid != fl->fl_holder)
break;
list_del(&fl->list);
xfree(fl);
}
}
int correct_file_leases_type(struct pid *pid, int fd, int lfd)
{
struct file_lock *fl;
int target_type;
list_for_each_entry(fl, &file_lock_list, list) {
/* owners_fd should be set before usage */
if (fl->fl_holder != pid->real || fl->owners_fd != fd)
continue;
if (fl->fl_kind == FL_LEASE && (fl->fl_ltype & LEASE_BREAKING)) {
/*
* Set lease type to actual 'target lease type'
* instead of 'READ' returned by procfs.
*/
target_type = fcntl(lfd, F_GETLEASE);
if (target_type < 0) {
perror("Can't get lease type\n");
return -1;
}
fl->fl_ltype &= ~O_ACCMODE;
fl->fl_ltype |= target_type;
break;
}
}
return 0;
}
static int open_break_cb(int ns_root_fd, struct reg_file_info *rfi, void *arg)
{
int fd, flags = *(int *)arg | O_NONBLOCK;
fd = openat(ns_root_fd, rfi->path, flags);
if (fd >= 0) {
pr_err("Conflicting lease wasn't found\n");
close(fd);
return -1;
} else if (errno != EWOULDBLOCK) {
pr_perror("Can't break lease");
return -1;
}
return 0;
}
static int break_lease(int lease_type, struct file_desc *desc)
{
int target_type = lease_type & (~LEASE_BREAKING);
int break_flags;
/*
* Flags for open call chosen in a way to even
* 'target lease type' returned by fcntl(F_GETLEASE)
* and lease type from the image.
*/
if (target_type == F_UNLCK) {
break_flags = O_WRONLY;
} else if (target_type == F_RDLCK) {
break_flags = O_RDONLY;
} else {
pr_err("Incorrect target lease type\n");
return -1;
}
return open_path(desc, open_break_cb, (void *)&break_flags);
}
static int set_file_lease(int fd, int type)
{
int old_fsuid, ret;
struct stat st;
if (fstat(fd, &st)) {
pr_perror("Can't get file stat (%i)", fd);
return -1;
}
/*
* An unprivileged process may take out a lease only if
* uid of the file matches the fsuid of the process.
*/
old_fsuid = setfsuid(st.st_uid);
ret = fcntl(fd, F_SETLEASE, type);
if (ret < 0)
pr_perror("Can't set lease");
setfsuid(old_fsuid);
return ret;
}
static int restore_lease_prebreaking_state(int fd, int fd_type)
{
int access_flags = fd_type & O_ACCMODE;
int lease_type = (access_flags == O_RDONLY) ? F_RDLCK : F_WRLCK;
return set_file_lease(fd, lease_type);
}
static struct fdinfo_list_entry *find_fd_unordered(struct pstree_item *task, int fd)
{
struct list_head *head = &rsti(task)->fds;
struct fdinfo_list_entry *fle;
list_for_each_entry_reverse(fle, head, ps_list) {
if (fle->fe->fd == fd)
return fle;
}
return NULL;
}
static int restore_breaking_file_lease(FileLockEntry *fle)
{
struct fdinfo_list_entry *fdle;
int ret;
fdle = find_fd_unordered(current, fle->fd);
if (fdle == NULL) {
pr_err("Can't get file description\n");
return -1;
}
ret = restore_lease_prebreaking_state(fle->fd, fdle->desc->ops->type);
if (ret)
return ret;
/*
* It could be broken by 2 types of open call:
* 1. non-blocking: It failed because of the lease.
* 2. blocking: It had been blocked at the moment
* of dumping, otherwise lease wouldn't be broken.
* Thus, it was canceled by CRIU.
*
* There are no files or leases in image, which will
* conflict with each other. Therefore we should explicitly
* break leases. Restoring can be done in any order.
*/
return break_lease(fle->type, fdle->desc);
}
static int restore_file_lease(FileLockEntry *fle)
{
sigset_t blockmask, oldmask;
int signum_fcntl, signum, ret;
if (fle->type & LEASE_BREAKING) {
signum_fcntl = fcntl(fle->fd, F_GETSIG);
signum = signum_fcntl ? signum_fcntl : SIGIO;
if (signum_fcntl < 0) {
pr_perror("Can't get file i/o signum");
return -1;
}
if (sigemptyset(&blockmask) || sigaddset(&blockmask, signum) ||
sigprocmask(SIG_BLOCK, &blockmask, &oldmask)) {
pr_perror("Can't block file i/o signal");
return -1;
}
ret = restore_breaking_file_lease(fle);
if (sigprocmask(SIG_SETMASK, &oldmask, NULL)) {
pr_perror("Can't restore sigmask");
ret = -1;
}
return ret;
} else {
ret = set_file_lease(fle->fd, fle->type);
if (ret < 0)
pr_perror("Can't restore non breaking lease");
return ret;
}
}
static int restore_file_lock(FileLockEntry *fle)
{
int ret = -1;
unsigned int cmd;
if (fle->flag & FL_FLOCK) {
if (fle->type & LOCK_MAND) {
cmd = fle->type;
} else if (fle->type == F_RDLCK) {
cmd = LOCK_SH;
} else if (fle->type == F_WRLCK) {
cmd = LOCK_EX;
} else if (fle->type == F_UNLCK) {
cmd = LOCK_UN;
} else {
pr_err("Unknown flock type!\n");
goto err;
}
pr_info("(flock)flag: %d, type: %d, cmd: %d, pid: %d, fd: %d\n", fle->flag, fle->type, cmd, fle->pid,
fle->fd);
ret = flock(fle->fd, cmd);
if (ret < 0) {
pr_err("Can not set flock!\n");
goto err;
}
} else if (fle->flag & FL_POSIX) {
struct flock flk;
memset(&flk, 0, sizeof(flk));
flk.l_whence = SEEK_SET;
flk.l_start = fle->start;
flk.l_len = fle->len;
flk.l_pid = fle->pid;
flk.l_type = fle->type;
pr_info("(posix)flag: %d, type: %d, pid: %d, fd: %d, "
"start: %8" PRIx64 ", len: %8" PRIx64 "\n",
fle->flag, fle->type, fle->pid, fle->fd, fle->start, fle->len);
ret = fcntl(fle->fd, F_SETLKW, &flk);
if (ret < 0) {
pr_err("Can not set posix lock!\n");
goto err;
}
} else if (fle->flag & FL_OFD) {
struct flock flk = {
.l_whence = SEEK_SET, .l_start = fle->start, .l_len = fle->len, .l_pid = 0, .l_type = fle->type
};
pr_info("(ofd)flag: %d, type: %d, pid: %d, fd: %d, "
"start: %8" PRIx64 ", len: %8" PRIx64 "\n",
fle->flag, fle->type, fle->pid, fle->fd, fle->start, fle->len);
ret = fcntl(fle->fd, F_OFD_SETLK, &flk);
if (ret < 0) {
pr_err("Can not set ofd lock!\n");
goto err;
}
} else if (fle->flag & FL_LEASE) {
pr_info("(lease)flag: %d, type: %d, pid: %d, fd: %d, "
"start: %8" PRIx64 ", len: %8" PRIx64 "\n",
fle->flag, fle->type, fle->pid, fle->fd, fle->start, fle->len);
ret = restore_file_lease(fle);
if (ret < 0)
goto err;
} else {
pr_err("Unknown file lock style!\n");
goto err;
}
return 0;
err:
return ret;
}
static int restore_file_locks(int pid)
{
int ret = 0;
struct file_lock_rst *lr;
list_for_each_entry(lr, &file_lock_list, l) {
if (lr->fle->pid == pid) {
ret = restore_file_lock(lr->fle);
if (ret)
break;
}
}
return ret;
}
int prepare_file_locks(int pid)
{
if (!opts.handle_file_locks)
return 0;
return restore_file_locks(pid);
}
| 16,118 | 21.896307 | 111 |
c
|
criu
|
criu-master/criu/files-ext.c
|
/* An external file is a file, which is dumped with help a plugin */
#include <unistd.h>
#include "imgset.h"
#include "files.h"
#include "plugin.h"
#include "protobuf.h"
#include "images/ext-file.pb-c.h"
static int dump_one_ext_file(int lfd, u32 id, const struct fd_parms *p)
{
int ret;
struct cr_img *rimg;
FileEntry fe = FILE_ENTRY__INIT;
ExtFileEntry xfe = EXT_FILE_ENTRY__INIT;
ret = run_plugins(DUMP_EXT_FILE, lfd, id);
if (ret < 0)
return ret;
xfe.id = id;
xfe.fown = (FownEntry *)&p->fown;
fe.type = FD_TYPES__EXT;
fe.id = xfe.id;
fe.ext = &xfe;
rimg = img_from_set(glob_imgset, CR_FD_FILES);
return pb_write_one(rimg, &fe, PB_FILE);
}
const struct fdtype_ops ext_dump_ops = {
.type = FD_TYPES__EXT,
.dump = dump_one_ext_file,
};
struct ext_file_info {
struct file_desc d;
ExtFileEntry *xfe;
};
static int open_fd(struct file_desc *d, int *new_fd)
{
struct ext_file_info *xfi;
int fd;
xfi = container_of(d, struct ext_file_info, d);
fd = run_plugins(RESTORE_EXT_FILE, xfi->xfe->id);
if (fd < 0) {
pr_err("Unable to restore %#x\n", xfi->xfe->id);
return -1;
}
if (restore_fown(fd, xfi->xfe->fown))
return -1;
*new_fd = fd;
return 0;
}
static struct file_desc_ops ext_desc_ops = {
.type = FD_TYPES__EXT,
.open = open_fd,
};
static int collect_one_ext(void *o, ProtobufCMessage *base, struct cr_img *i)
{
struct ext_file_info *xfi = o;
xfi->xfe = pb_msg(base, ExtFileEntry);
pr_info("Collected external file with ID %#x\n", xfi->xfe->id);
return file_desc_add(&xfi->d, xfi->xfe->id, &ext_desc_ops);
}
struct collect_image_info ext_file_cinfo = {
.fd_type = CR_FD_EXT_FILES,
.pb_type = PB_EXT_FILE,
.priv_size = sizeof(struct ext_file_info),
.collect = collect_one_ext,
};
int dump_unsupp_fd(struct fd_parms *p, int lfd, char *more, char *info, FdinfoEntry *e)
{
int ret;
ret = do_dump_gen_file(p, lfd, &ext_dump_ops, e);
if (ret == 0)
return 0;
if (ret == -ENOTSUP)
pr_err("Can't dump file %d of that type [%o] (%s %s)\n", p->fd, p->stat.st_mode, more, info);
return -1;
}
| 2,052 | 20.164948 | 95 |
c
|
criu
|
criu-master/criu/fsnotify.c
|
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <string.h>
#include <utime.h>
#include <limits.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/inotify.h>
#include <linux/magic.h>
#include <sys/wait.h>
#include <poll.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <aio.h>
#include <sys/fanotify.h>
#include "common/compiler.h"
#include "imgset.h"
#include "fsnotify.h"
#include "fdinfo.h"
#include "mount.h"
#include "filesystems.h"
#include "image.h"
#include "util.h"
#include "crtools.h"
#include "files.h"
#include "files-reg.h"
#include "file-ids.h"
#include "criu-log.h"
#include "kerndat.h"
#include "common/list.h"
#include "common/lock.h"
#include "irmap.h"
#include "cr_options.h"
#include "namespaces.h"
#include "pstree.h"
#include "fault-injection.h"
#include <compel/plugins/std/syscall-codes.h>
#include "protobuf.h"
#include "images/fsnotify.pb-c.h"
#include "images/mnt.pb-c.h"
#undef LOG_PREFIX
#define LOG_PREFIX "fsnotify: "
struct fsnotify_mark_info {
struct list_head list;
union {
InotifyWdEntry *iwe;
FanotifyMarkEntry *fme;
};
struct pprep_head prep; /* XXX union with remap */
struct file_remap *remap;
};
struct fsnotify_file_info {
union {
InotifyFileEntry *ife;
FanotifyFileEntry *ffe;
};
struct list_head marks;
struct file_desc d;
};
/* File handle */
typedef struct {
u32 bytes;
u32 type;
u64 __handle[16];
} fh_t;
/* Checks if file descriptor @lfd is inotify */
int is_inotify_link(char *link)
{
return is_anon_link_type(link, "inotify");
}
/* Checks if file descriptor @lfd is fanotify */
int is_fanotify_link(char *link)
{
return is_anon_link_type(link, "[fanotify]");
}
static void decode_handle(fh_t *handle, FhEntry *img)
{
memzero(handle, sizeof(*handle));
handle->type = img->type;
handle->bytes = img->bytes;
memcpy(handle->__handle, img->handle, min(pb_repeated_size(img, handle), sizeof(handle->__handle)));
}
static int open_by_handle(void *arg, int fd, int pid)
{
return syscall(__NR_open_by_handle_at, fd, arg, O_PATH);
}
enum { ERR_NO_MOUNT = -1, ERR_NO_PATH_IN_MOUNT = -2, ERR_GENERIC = -3 };
static char *alloc_openable(unsigned int s_dev, unsigned long i_ino, FhEntry *f_handle)
{
struct mount_info *m;
fh_t handle;
int fd = -1;
char *path;
char suitable_mount_found = 0;
decode_handle(&handle, f_handle);
/*
* We gonna try to open the handle and then
* depending on command line options and type
* of the filesystem (tmpfs/devtmpfs do not
* preserve their inodes between mounts) we
* might need to find out an openable path
* get used on restore as a watch destination.
*/
for (m = mntinfo; m; m = m->next) {
char buf[PATH_MAX], *__path;
int mntfd, openable_fd;
struct stat st;
if (m->s_dev != s_dev)
continue;
if (!mnt_is_dir(m))
continue;
mntfd = __open_mountpoint(m);
pr_debug("\t\tTrying via mntid %d root %s ns_mountpoint @%s (%d)\n", m->mnt_id, m->root,
m->ns_mountpoint, mntfd);
if (mntfd < 0)
continue;
fd = userns_call(open_by_handle, UNS_FDOUT, &handle, sizeof(handle), mntfd);
close(mntfd);
if (fd < 0)
continue;
suitable_mount_found = 1;
if (read_fd_link(fd, buf, sizeof(buf)) < 0) {
close(fd);
goto err;
}
close(fd);
/*
* Convert into a relative path.
*/
__path = (buf[1] != '\0') ? buf + 1 : ".";
pr_debug("\t\t\tlink as %s\n", __path);
mntfd = mntns_get_root_fd(m->nsid);
if (mntfd < 0)
goto err;
openable_fd = openat(mntfd, __path, O_PATH);
if (openable_fd >= 0) {
if (fstat(openable_fd, &st)) {
pr_perror("Can't stat on %s", __path);
close(openable_fd);
goto err;
}
close(openable_fd);
pr_debug("\t\t\topenable (inode %s) as %s\n", st.st_ino == i_ino ? "match" : "don't match",
__path);
if (st.st_ino == i_ino) {
path = xstrdup(buf);
if (path == NULL)
return ERR_PTR(ERR_GENERIC);
if (root_ns_mask & CLONE_NEWNS) {
f_handle->has_mnt_id = true;
f_handle->mnt_id = m->mnt_id;
}
return path;
}
} else
pr_debug("\t\t\tnot openable as %s (%s)\n", __path, strerror(errno));
}
err:
if (suitable_mount_found)
return ERR_PTR(ERR_NO_PATH_IN_MOUNT);
return ERR_PTR(ERR_NO_MOUNT);
}
static int open_handle(unsigned int s_dev, unsigned long i_ino, FhEntry *f_handle)
{
struct mount_info *m;
int mntfd, fd = -1;
fh_t handle;
decode_handle(&handle, f_handle);
pr_debug("Opening fhandle %x:%llx...\n", s_dev, (unsigned long long)handle.__handle[0]);
for (m = mntinfo; m; m = m->next) {
if (m->s_dev != s_dev || !mnt_is_dir(m))
continue;
mntfd = __open_mountpoint(m);
if (mntfd < 0) {
pr_warn("Can't open mount for s_dev %x, continue\n", s_dev);
continue;
}
fd = userns_call(open_by_handle, UNS_FDOUT, &handle, sizeof(handle), mntfd);
if (fd >= 0) {
close(mntfd);
goto out;
}
close(mntfd);
}
out:
return fd;
}
int check_open_handle(unsigned int s_dev, unsigned long i_ino, FhEntry *f_handle)
{
char *path, *irmap_path;
struct mount_info *mi;
if (fault_injected(FI_CHECK_OPEN_HANDLE))
goto fault;
/*
* Always try to fetch watchee path first. There are several reasons:
*
* - tmpfs/devtmps do not save inode numbers between mounts,
* so it is critical to have the complete path under our
* hands for restore purpose;
*
* - in case of migration the inodes might be changed as well
* so the only portable solution is to carry the whole path
* to the watchee inside image.
*/
path = alloc_openable(s_dev, i_ino, f_handle);
if (!IS_ERR_OR_NULL(path)) {
pr_debug("\tHandle 0x%x:0x%lx is openable\n", s_dev, i_ino);
goto out;
} else if (IS_ERR(path) && PTR_ERR(path) == ERR_NO_MOUNT) {
goto fault;
} else if (IS_ERR(path) && PTR_ERR(path) == ERR_GENERIC) {
goto err;
}
mi = lookup_mnt_sdev(s_dev);
if (mi == NULL) {
pr_err("Unable to lookup a mount by dev 0x%x\n", s_dev);
goto err;
}
if ((mi->fstype->code == FSTYPE__TMPFS) || (mi->fstype->code == FSTYPE__DEVTMPFS)) {
pr_err("Can't find suitable path for handle (dev %#x ino %#lx): %d\n", s_dev, i_ino,
(int)PTR_ERR(path));
goto err;
}
if (!opts.force_irmap)
/*
* If we're not forced to do irmap, then
* say we have no path for watch. Otherwise
* do irmap scan even if the handle is
* working.
*
* FIXME -- no need to open-by-handle if
* we are in force-irmap and not on tempfs
*/
goto out_nopath;
fault:
pr_warn("\tHandle 0x%x:0x%lx cannot be opened\n", s_dev, i_ino);
irmap_path = irmap_lookup(s_dev, i_ino);
if (!irmap_path) {
pr_err("\tCan't dump that handle\n");
return -1;
}
path = xstrdup(irmap_path);
if (!path)
goto err;
out:
pr_debug("\tDumping %s as path for handle\n", path);
f_handle->path = path;
out_nopath:
return 0;
err:
return -1;
}
static int check_one_wd(InotifyWdEntry *we)
{
pr_info("wd: wd %#08x s_dev %#08x i_ino %#16" PRIx64 " mask %#08x\n", we->wd, we->s_dev, we->i_ino, we->mask);
pr_info("\t[fhandle] bytes %#08x type %#08x __handle %#016" PRIx64 ":%#016" PRIx64 "\n", we->f_handle->bytes,
we->f_handle->type, we->f_handle->handle[0], we->f_handle->handle[1]);
if (we->mask & KERNEL_FS_EVENT_ON_CHILD)
pr_warn_once("\t\tDetected FS_EVENT_ON_CHILD bit "
"in mask (will be ignored on restore)\n");
if (check_open_handle(we->s_dev, we->i_ino, we->f_handle))
return -1;
return 0;
}
static int dump_one_inotify(int lfd, u32 id, const struct fd_parms *p)
{
FileEntry fe = FILE_ENTRY__INIT;
InotifyFileEntry ie = INOTIFY_FILE_ENTRY__INIT;
int exit_code = -1, i, ret;
ret = fd_has_data(lfd);
if (ret < 0)
return -1;
else if (ret > 0)
pr_warn("The %#08x inotify events will be dropped\n", id);
ie.id = id;
ie.flags = p->flags;
ie.fown = (FownEntry *)&p->fown;
if (parse_fdinfo(lfd, FD_TYPES__INOTIFY, &ie))
goto free;
for (i = 0; i < ie.n_wd; i++)
if (check_one_wd(ie.wd[i]))
goto free;
fe.type = FD_TYPES__INOTIFY;
fe.id = ie.id;
fe.ify = &ie;
pr_info("id %#08x flags %#08x\n", ie.id, ie.flags);
if (pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE))
goto free;
exit_code = 0;
free:
for (i = 0; i < ie.n_wd; i++)
xfree(ie.wd[i]);
xfree(ie.wd);
return exit_code;
}
static int pre_dump_one_inotify(int pid, int lfd)
{
InotifyFileEntry ie = INOTIFY_FILE_ENTRY__INIT;
int i;
if (parse_fdinfo_pid(pid, lfd, FD_TYPES__INOTIFY, &ie))
return -1;
for (i = 0; i < ie.n_wd; i++) {
InotifyWdEntry *we = ie.wd[i];
if (irmap_queue_cache(we->s_dev, we->i_ino, we->f_handle))
return -1;
xfree(we);
}
return 0;
}
const struct fdtype_ops inotify_dump_ops = {
.type = FD_TYPES__INOTIFY,
.dump = dump_one_inotify,
.pre_dump = pre_dump_one_inotify,
};
static int check_one_mark(FanotifyMarkEntry *fme)
{
if (fme->type == MARK_TYPE__INODE) {
BUG_ON(!fme->ie);
pr_info("mark: s_dev %#08x i_ino %#016" PRIx64 " mask %#08x\n", fme->s_dev, fme->ie->i_ino, fme->mask);
pr_info("\t[fhandle] bytes %#08x type %#08x __handle %#016" PRIx64 ":%#016" PRIx64 "\n",
fme->ie->f_handle->bytes, fme->ie->f_handle->type, fme->ie->f_handle->handle[0],
fme->ie->f_handle->handle[1]);
if (check_open_handle(fme->s_dev, fme->ie->i_ino, fme->ie->f_handle))
return -1;
}
if (fme->type == MARK_TYPE__MOUNT) {
struct mount_info *m;
BUG_ON(!fme->me);
m = lookup_mnt_id(fme->me->mnt_id);
if (!m) {
pr_err("Can't find mnt_id 0x%x\n", fme->me->mnt_id);
return -1;
}
if (!(root_ns_mask & CLONE_NEWNS))
fme->me->path = m->ns_mountpoint + 1;
fme->s_dev = m->s_dev;
pr_info("mark: s_dev %#08x mnt_id %#08x mask %#08x\n", fme->s_dev, fme->me->mnt_id, fme->mask);
}
return 0;
}
static int dump_one_fanotify(int lfd, u32 id, const struct fd_parms *p)
{
FileEntry fle = FILE_ENTRY__INIT;
FanotifyFileEntry fe = FANOTIFY_FILE_ENTRY__INIT;
int ret = -1, i;
ret = fd_has_data(lfd);
if (ret < 0)
return -1;
else if (ret > 0)
pr_warn("The %#08x fanotify events will be dropped\n", id);
ret = -1;
fe.id = id;
fe.flags = p->flags;
fe.fown = (FownEntry *)&p->fown;
if (parse_fdinfo(lfd, FD_TYPES__FANOTIFY, &fe) < 0)
goto free;
for (i = 0; i < fe.n_mark; i++)
if (check_one_mark(fe.mark[i]))
goto free;
pr_info("id %#08x flags %#08x\n", fe.id, fe.flags);
fle.type = FD_TYPES__FANOTIFY;
fle.id = fe.id;
fle.ffy = &fe;
ret = pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fle, PB_FILE);
free:
for (i = 0; i < fe.n_mark; i++)
xfree(fe.mark[i]);
xfree(fe.mark);
return ret;
}
static int pre_dump_one_fanotify(int pid, int lfd)
{
FanotifyFileEntry fe = FANOTIFY_FILE_ENTRY__INIT;
int i;
if (parse_fdinfo_pid(pid, lfd, FD_TYPES__FANOTIFY, &fe))
return -1;
for (i = 0; i < fe.n_mark; i++) {
FanotifyMarkEntry *me = fe.mark[i];
if (me->type == MARK_TYPE__INODE && irmap_queue_cache(me->s_dev, me->ie->i_ino, me->ie->f_handle))
return -1;
xfree(me);
}
xfree(fe.mark);
return 0;
}
const struct fdtype_ops fanotify_dump_ops = {
.type = FD_TYPES__FANOTIFY,
.dump = dump_one_fanotify,
.pre_dump = pre_dump_one_fanotify,
};
static char *get_mark_path(const char *who, struct file_remap *remap, FhEntry *f_handle, unsigned long i_ino,
unsigned int s_dev, char *buf, int *target)
{
char *path = NULL;
if (remap) {
int mntns_root;
mntns_root = mntns_get_root_by_mnt_id(remap->rmnt_id);
pr_debug("\t\tRestore %s watch for %#08x:%#016lx (via %s)\n", who, s_dev, i_ino, remap->rpath);
*target = openat(mntns_root, remap->rpath, O_PATH);
} else if (f_handle->path) {
int mntns_root;
char *path = ".";
uint32_t mnt_id = f_handle->has_mnt_id ? f_handle->mnt_id : -1;
/* irmap cache is collected in the root namespaces. */
mntns_root = mntns_get_root_by_mnt_id(mnt_id);
/* change "/foo" into "foo" and "/" into "." */
if (f_handle->path[1] != '\0')
path = f_handle->path + 1;
pr_debug("\t\tRestore with path hint %d:%s\n", mnt_id, path);
*target = openat(mntns_root, path, O_PATH);
} else
*target = open_handle(s_dev, i_ino, f_handle);
if (*target < 0) {
pr_perror("Unable to open %s", f_handle->path);
goto err;
}
/*
* fanotify/inotify open syscalls want path to attach
* watch to. But the only thing we have is an FD obtained
* via fhandle. Fortunately, when trying to attach the
* /proc/pid/fd/ link, we will watch the inode the link
* points to, i.e. -- just what we want.
*/
sprintf(buf, "/proc/self/fd/%d", *target);
path = buf;
if (!pr_quelled(LOG_DEBUG)) {
char link[PATH_MAX];
if (read_fd_link(*target, link, sizeof(link)) < 0)
link[0] = '\0';
pr_debug("\t\tRestore %s watch for %#08x:%#016lx (via %s -> %s)\n", who, s_dev, i_ino, path, link);
}
err:
return path;
}
static int restore_one_inotify(int inotify_fd, struct fsnotify_mark_info *info)
{
InotifyWdEntry *iwe = info->iwe;
int ret = -1, target = -1;
char buf[PSFDS], *path;
uint32_t mask;
path = get_mark_path("inotify", info->remap, iwe->f_handle, iwe->i_ino, iwe->s_dev, buf, &target);
if (!path)
goto err;
mask = iwe->mask & IN_ALL_EVENTS;
if (iwe->mask & ~IN_ALL_EVENTS) {
pr_info("\t\tfilter event mask %#x -> %#x\n", iwe->mask, mask);
}
if (kdat.has_inotify_setnextwd) {
if (ioctl(inotify_fd, INOTIFY_IOC_SETNEXTWD, iwe->wd)) {
pr_perror("Can't set next inotify wd");
return -1;
}
}
while (1) {
int wd;
wd = inotify_add_watch(inotify_fd, path, mask);
if (wd < 0) {
pr_perror("Can't add watch for 0x%x with 0x%x", inotify_fd, iwe->wd);
break;
} else if (wd == iwe->wd) {
ret = 0;
break;
} else if (wd > iwe->wd) {
pr_err("Unsorted watch 0x%x found for 0x%x with 0x%x\n", wd, inotify_fd, iwe->wd);
break;
}
if (kdat.has_inotify_setnextwd)
return -1;
inotify_rm_watch(inotify_fd, wd);
}
err:
close_safe(&target);
return ret;
}
static int restore_one_fanotify(int fd, struct fsnotify_mark_info *mark)
{
FanotifyMarkEntry *fme = mark->fme;
unsigned int flags = FAN_MARK_ADD;
int ret = -1, target = -1;
char buf[PSFDS], *path = NULL;
if (fme->type == MARK_TYPE__MOUNT) {
struct mount_info *m;
int mntns_root;
char *p = fme->me->path;
struct ns_id *nsid = NULL;
if (root_ns_mask & CLONE_NEWNS) {
m = lookup_mnt_id(fme->me->mnt_id);
if (!m) {
pr_err("Can't find mount mnt_id 0x%x\n", fme->me->mnt_id);
return -1;
}
nsid = m->nsid;
p = m->ns_mountpoint;
}
mntns_root = mntns_get_root_fd(nsid);
target = openat(mntns_root, p, O_PATH);
if (target == -1) {
pr_perror("Unable to open %s", p);
goto err;
}
flags |= FAN_MARK_MOUNT;
snprintf(buf, sizeof(buf), "/proc/self/fd/%d", target);
path = buf;
} else if (fme->type == MARK_TYPE__INODE) {
path = get_mark_path("fanotify", mark->remap, fme->ie->f_handle, fme->ie->i_ino, fme->s_dev, buf,
&target);
if (!path)
goto err;
} else {
pr_err("Bad fsnotify mark type 0x%x\n", fme->type);
goto err;
}
flags |= fme->mflags;
if (mark->fme->mask) {
ret = fanotify_mark(fd, flags, fme->mask, AT_FDCWD, path);
if (ret) {
pr_err("Adding fanotify mask 0x%x on 0x%x/%s failed (%d)\n", fme->mask, fme->id, path, ret);
goto err;
}
}
if (fme->ignored_mask) {
ret = fanotify_mark(fd, flags | FAN_MARK_IGNORED_MASK, fme->ignored_mask, AT_FDCWD, path);
if (ret) {
pr_err("Adding fanotify ignored-mask 0x%x on 0x%x/%s failed (%d)\n", fme->ignored_mask, fme->id,
path, ret);
goto err;
}
}
err:
close_safe(&target);
return ret;
}
static int open_inotify_fd(struct file_desc *d, int *new_fd)
{
struct fsnotify_file_info *info;
struct fsnotify_mark_info *wd_info;
int tmp;
info = container_of(d, struct fsnotify_file_info, d);
tmp = inotify_init1(info->ife->flags);
if (tmp < 0) {
pr_perror("Can't create inotify for %#08x", info->ife->id);
return -1;
}
list_for_each_entry(wd_info, &info->marks, list) {
pr_info("\tRestore 0x%x wd for %#08x\n", wd_info->iwe->wd, wd_info->iwe->id);
if (restore_one_inotify(tmp, wd_info)) {
close_safe(&tmp);
return -1;
}
pr_info("\t 0x%x wd for %#08x is restored\n", wd_info->iwe->wd, wd_info->iwe->id);
}
if (restore_fown(tmp, info->ife->fown))
close_safe(&tmp);
*new_fd = tmp;
return 0;
}
static int open_fanotify_fd(struct file_desc *d, int *new_fd)
{
struct fsnotify_file_info *info;
struct fsnotify_mark_info *mark;
unsigned int flags = 0;
int ret;
info = container_of(d, struct fsnotify_file_info, d);
flags = info->ffe->faflags;
if (info->ffe->flags & O_CLOEXEC)
flags |= FAN_CLOEXEC;
if (info->ffe->flags & O_NONBLOCK)
flags |= FAN_NONBLOCK;
ret = fanotify_init(flags, info->ffe->evflags);
if (ret < 0) {
pr_perror("Can't init fanotify mark (%d)", ret);
return -1;
}
list_for_each_entry(mark, &info->marks, list) {
pr_info("\tRestore fanotify for %#08x\n", mark->fme->id);
if (restore_one_fanotify(ret, mark)) {
close_safe(&ret);
return -1;
}
}
if (restore_fown(ret, info->ffe->fown))
close_safe(&ret);
*new_fd = ret;
return 0;
}
static struct file_desc_ops inotify_desc_ops = {
.type = FD_TYPES__INOTIFY,
.open = open_inotify_fd,
};
static struct file_desc_ops fanotify_desc_ops = {
.type = FD_TYPES__FANOTIFY,
.open = open_fanotify_fd,
};
static int inotify_resolve_remap(struct pprep_head *ph)
{
struct fsnotify_mark_info *m;
m = container_of(ph, struct fsnotify_mark_info, prep);
m->remap = lookup_ghost_remap(m->iwe->s_dev, m->iwe->i_ino);
return 0;
}
static int fanotify_resolve_remap(struct pprep_head *ph)
{
struct fsnotify_mark_info *m;
m = container_of(ph, struct fsnotify_mark_info, prep);
m->remap = lookup_ghost_remap(m->fme->s_dev, m->fme->ie->i_ino);
return 0;
}
static int __collect_inotify_mark(struct fsnotify_file_info *p, struct fsnotify_mark_info *mark)
{
struct fsnotify_mark_info *m;
/*
* We should put marks in wd ascending order. See comment
* in restore_one_inotify() for explanation.
*/
list_for_each_entry(m, &p->marks, list)
if (m->iwe->wd > mark->iwe->wd)
break;
list_add_tail(&mark->list, &m->list);
mark->prep.actor = inotify_resolve_remap;
add_post_prepare_cb(&mark->prep);
return 0;
}
static int __collect_fanotify_mark(struct fsnotify_file_info *p, struct fsnotify_mark_info *mark)
{
list_add(&mark->list, &p->marks);
if (mark->fme->type == MARK_TYPE__INODE) {
mark->prep.actor = fanotify_resolve_remap;
add_post_prepare_cb(&mark->prep);
}
return 0;
}
static int collect_one_inotify(void *o, ProtobufCMessage *msg, struct cr_img *img)
{
struct fsnotify_file_info *info = o;
int i;
info->ife = pb_msg(msg, InotifyFileEntry);
INIT_LIST_HEAD(&info->marks);
pr_info("Collected id %#08x flags %#08x\n", info->ife->id, info->ife->flags);
for (i = 0; i < info->ife->n_wd; i++) {
struct fsnotify_mark_info *mark;
mark = xmalloc(sizeof(*mark));
if (!mark)
return -1;
mark->iwe = info->ife->wd[i];
INIT_LIST_HEAD(&mark->list);
mark->remap = NULL;
if (__collect_inotify_mark(info, mark))
return -1;
}
return file_desc_add(&info->d, info->ife->id, &inotify_desc_ops);
}
struct collect_image_info inotify_cinfo = {
.fd_type = CR_FD_INOTIFY_FILE,
.pb_type = PB_INOTIFY_FILE,
.priv_size = sizeof(struct fsnotify_file_info),
.collect = collect_one_inotify,
};
static int collect_one_fanotify(void *o, ProtobufCMessage *msg, struct cr_img *img)
{
struct fsnotify_file_info *info = o;
int i;
info->ffe = pb_msg(msg, FanotifyFileEntry);
INIT_LIST_HEAD(&info->marks);
pr_info("Collected id %#08x flags %#08x\n", info->ffe->id, info->ffe->flags);
for (i = 0; i < info->ffe->n_mark; i++) {
struct fsnotify_mark_info *mark;
mark = xmalloc(sizeof(*mark));
if (!mark)
return -1;
mark->fme = info->ffe->mark[i];
INIT_LIST_HEAD(&mark->list);
mark->remap = NULL;
if (__collect_fanotify_mark(info, mark))
return -1;
}
return file_desc_add(&info->d, info->ffe->id, &fanotify_desc_ops);
}
struct collect_image_info fanotify_cinfo = {
.fd_type = CR_FD_FANOTIFY_FILE,
.pb_type = PB_FANOTIFY_FILE,
.priv_size = sizeof(struct fsnotify_file_info),
.collect = collect_one_fanotify,
};
static int collect_one_inotify_mark(void *o, ProtobufCMessage *msg, struct cr_img *i)
{
struct fsnotify_mark_info *mark = o;
struct file_desc *d;
if (!deprecated_ok("separate images for fsnotify marks"))
return -1;
mark->iwe = pb_msg(msg, InotifyWdEntry);
INIT_LIST_HEAD(&mark->list);
mark->remap = NULL;
/*
* The kernel prior 4.3 might export internal event
* mask bits which are not part of user-space API. It
* is fixed in kernel but we have to keep backward
* compatibility with old images. So mask out
* inappropriate bits (in particular fdinfo might
* have FS_EVENT_ON_CHILD bit set).
*/
mark->iwe->mask &= ~KERNEL_FS_EVENT_ON_CHILD;
d = find_file_desc_raw(FD_TYPES__INOTIFY, mark->iwe->id);
if (!d) {
pr_err("Can't find inotify with id %#08x\n", mark->iwe->id);
return -1;
}
return __collect_inotify_mark(container_of(d, struct fsnotify_file_info, d), mark);
}
struct collect_image_info inotify_mark_cinfo = {
.fd_type = CR_FD_INOTIFY_WD,
.pb_type = PB_INOTIFY_WD,
.priv_size = sizeof(struct fsnotify_mark_info),
.collect = collect_one_inotify_mark,
};
static int collect_one_fanotify_mark(void *o, ProtobufCMessage *msg, struct cr_img *i)
{
struct fsnotify_mark_info *mark = o;
struct file_desc *d;
if (!deprecated_ok("separate images for fsnotify marks"))
return -1;
mark->fme = pb_msg(msg, FanotifyMarkEntry);
INIT_LIST_HEAD(&mark->list);
mark->remap = NULL;
d = find_file_desc_raw(FD_TYPES__FANOTIFY, mark->fme->id);
if (!d) {
pr_err("Can't find fanotify with id %#08x\n", mark->fme->id);
return -1;
}
return __collect_fanotify_mark(container_of(d, struct fsnotify_file_info, d), mark);
}
struct collect_image_info fanotify_mark_cinfo = {
.fd_type = CR_FD_FANOTIFY_MARK,
.pb_type = PB_FANOTIFY_MARK,
.priv_size = sizeof(struct fsnotify_mark_info),
.collect = collect_one_fanotify_mark,
};
| 21,957 | 23.156216 | 111 |
c
|
criu
|
criu-master/criu/hugetlb.c
|
#include "hugetlb.h"
#include "kerndat.h"
#include "sizes.h"
// clang-format off
struct htlb_info hugetlb_info[HUGETLB_MAX] = {
[HUGETLB_16KB] = { SZ_16K, MAP_HUGETLB_16KB },
[HUGETLB_64KB] = { SZ_64K, MAP_HUGETLB_64KB },
[HUGETLB_512KB] = { SZ_512K, MAP_HUGETLB_512KB },
[HUGETLB_1MB] = { SZ_1M, MAP_HUGETLB_1MB },
[HUGETLB_2MB] = { SZ_2M, MAP_HUGETLB_2MB },
[HUGETLB_8MB] = { SZ_8M, MAP_HUGETLB_8MB },
[HUGETLB_16MB] = { SZ_16M, MAP_HUGETLB_16MB },
[HUGETLB_32MB] = { SZ_32M, MAP_HUGETLB_32MB },
[HUGETLB_256MB] = { SZ_256M, MAP_HUGETLB_256MB },
[HUGETLB_512MB] = { SZ_512M, MAP_HUGETLB_512MB },
[HUGETLB_1GB] = { SZ_1G, MAP_HUGETLB_1GB },
[HUGETLB_2GB] = { SZ_2G, MAP_HUGETLB_2GB },
[HUGETLB_16GB] = { SZ_16G, MAP_HUGETLB_16GB },
};
// clang-format on
int is_hugetlb_dev(dev_t dev, int *hugetlb_size_flag)
{
int i;
for (i = 0; i < HUGETLB_MAX; i++) {
if (kdat.hugetlb_dev[i] == dev) {
if (hugetlb_size_flag)
*hugetlb_size_flag = hugetlb_info[i].flag;
return 1;
}
}
return 0;
}
int can_dump_with_memfd_hugetlb(dev_t dev, int *hugetlb_size_flag, const char *file_path, struct vma_area *vma)
{
/*
* Dump the hugetlb backed mapping using memfd_hugetlb when it is not
* anonymous private mapping.
*/
if (kdat.has_memfd_hugetlb && is_hugetlb_dev(dev, hugetlb_size_flag) &&
!((vma->e->flags & MAP_PRIVATE) && !strncmp(file_path, ANON_HUGEPAGE_PREFIX, ANON_HUGEPAGE_PREFIX_LEN)))
return 1;
return 0;
}
unsigned long get_size_from_hugetlb_flag(int flag)
{
int i;
for (i = 0; i < HUGETLB_MAX; i++)
if (flag == hugetlb_info[i].flag)
return hugetlb_info[i].size;
return -1;
}
| 1,631 | 25.754098 | 111 |
c
|
criu
|
criu-master/criu/image-desc.c
|
#include <stdlib.h>
#include "image-desc.h"
#include "magic.h"
#include "image.h"
/*
* The cr fd set is the set of files where the information
* about dumped processes is stored. Each file carries some
* small portion of info about the whole picture, see below
* for more details.
*/
#define FD_ENTRY(_name, _fmt) \
[CR_FD_##_name] = { \
.fmt = _fmt ".img", \
.magic = _name##_MAGIC, \
}
#define FD_ENTRY_F(_name, _fmt, _f) \
[CR_FD_##_name] = { \
.fmt = _fmt ".img", \
.magic = _name##_MAGIC, \
.oflags = _f, \
}
struct cr_fd_desc_tmpl imgset_template[CR_FD_MAX] = {
FD_ENTRY(INVENTORY, "inventory"),
FD_ENTRY(FDINFO, "fdinfo-%u"),
FD_ENTRY(PAGEMAP, "pagemap-%lu"),
FD_ENTRY(SHMEM_PAGEMAP, "pagemap-shmem-%lu"),
FD_ENTRY(REG_FILES, "reg-files"),
FD_ENTRY(EXT_FILES, "ext-files"),
FD_ENTRY(NS_FILES, "ns-files"),
FD_ENTRY(EVENTFD_FILE, "eventfd"),
FD_ENTRY(EVENTPOLL_FILE,"eventpoll"),
FD_ENTRY(EVENTPOLL_TFD, "eventpoll-tfd"),
FD_ENTRY(SIGNALFD, "signalfd"),
FD_ENTRY(INOTIFY_FILE, "inotify"),
FD_ENTRY(INOTIFY_WD, "inotify-wd"),
FD_ENTRY(FANOTIFY_FILE, "fanotify"),
FD_ENTRY(FANOTIFY_MARK, "fanotify-mark"),
FD_ENTRY(CORE, "core-%u"),
FD_ENTRY(IDS, "ids-%u"),
FD_ENTRY(MM, "mm-%u"),
FD_ENTRY(VMAS, "vmas-%u"),
FD_ENTRY(PIPES, "pipes"),
FD_ENTRY_F(PIPES_DATA, "pipes-data", O_NOBUF), /* splices data */
FD_ENTRY(FIFO, "fifo"),
FD_ENTRY_F(FIFO_DATA, "fifo-data", O_NOBUF), /* the same */
FD_ENTRY(PSTREE, "pstree"),
FD_ENTRY(SIGACT, "sigacts-%u"),
FD_ENTRY(UNIXSK, "unixsk"),
FD_ENTRY(INETSK, "inetsk"),
FD_ENTRY(PACKETSK, "packetsk"),
FD_ENTRY(NETLINK_SK, "netlinksk"),
FD_ENTRY_F(SK_QUEUES, "sk-queues", O_NOBUF), /* lseeks the image */
FD_ENTRY(ITIMERS, "itimers-%u"),
FD_ENTRY(POSIX_TIMERS, "posix-timers-%u"),
FD_ENTRY(CREDS, "creds-%u"),
FD_ENTRY(UTSNS, "utsns-%u"),
FD_ENTRY(IPC_VAR, "ipcns-var-%u"),
FD_ENTRY_F(IPCNS_SHM, "ipcns-shm-%u", O_NOBUF), /* writes segments of data */
FD_ENTRY(IPCNS_MSG, "ipcns-msg-%u"),
FD_ENTRY(IPCNS_SEM, "ipcns-sem-%u"),
FD_ENTRY(FS, "fs-%u"),
FD_ENTRY(REMAP_FPATH, "remap-fpath"),
FD_ENTRY_F(GHOST_FILE, "ghost-file-%x", O_NOBUF),
FD_ENTRY_F(MEMFD_INODE, "memfd", O_NOBUF),
FD_ENTRY(TCP_STREAM, "tcp-stream-%x"),
FD_ENTRY(MNTS, "mountpoints-%u"),
FD_ENTRY(NETDEV, "netdev-%u"),
FD_ENTRY(NETNS, "netns-%u"),
FD_ENTRY_F(IFADDR, "ifaddr-%u", O_NOBUF),
FD_ENTRY_F(ROUTE, "route-%u", O_NOBUF),
FD_ENTRY_F(ROUTE6, "route6-%u", O_NOBUF),
FD_ENTRY_F(RULE, "rule-%u", O_NOBUF),
FD_ENTRY_F(IPTABLES, "iptables-%u", O_NOBUF),
FD_ENTRY_F(IP6TABLES, "ip6tables-%u", O_NOBUF),
FD_ENTRY_F(NFTABLES, "nftables-%u", O_NOBUF),
FD_ENTRY_F(TMPFS_IMG, "tmpfs-%u.tar.gz", O_NOBUF),
FD_ENTRY_F(TMPFS_DEV, "tmpfs-dev-%u.tar.gz", O_NOBUF),
FD_ENTRY_F(AUTOFS, "autofs-%u", O_NOBUF),
FD_ENTRY(BINFMT_MISC_OLD, "binfmt-misc-%u"),
FD_ENTRY(BINFMT_MISC, "binfmt-misc"),
FD_ENTRY(TTY_FILES, "tty"),
FD_ENTRY(TTY_INFO, "tty-info"),
FD_ENTRY_F(TTY_DATA, "tty-data", O_NOBUF),
FD_ENTRY(FILE_LOCKS, "filelocks"),
FD_ENTRY(RLIMIT, "rlimit-%u"),
FD_ENTRY_F(PAGES, "pages-%u", O_NOBUF),
FD_ENTRY_F(PAGES_OLD, "pages-%d", O_NOBUF),
FD_ENTRY_F(SHM_PAGES_OLD, "pages-shmem-%ld", O_NOBUF),
FD_ENTRY(SIGNAL, "signal-s-%u"),
FD_ENTRY(PSIGNAL, "signal-p-%u"),
FD_ENTRY(TUNFILE, "tunfile"),
FD_ENTRY(CGROUP, "cgroup"),
FD_ENTRY(TIMERFD, "timerfd"),
FD_ENTRY(CPUINFO, "cpuinfo"),
FD_ENTRY(SECCOMP, "seccomp"),
FD_ENTRY(USERNS, "userns-%u"),
FD_ENTRY(NETNF_CT, "netns-ct-%u"),
FD_ENTRY(NETNF_EXP, "netns-exp-%u"),
FD_ENTRY(FILES, "files"),
FD_ENTRY(TIMENS, "timens-%u"),
FD_ENTRY(PIDNS, "pidns-%u"),
FD_ENTRY_F(BPFMAP_FILE, "bpfmap-file", O_NOBUF),
FD_ENTRY_F(BPFMAP_DATA, "bpfmap-data", O_NOBUF),
FD_ENTRY(APPARMOR, "apparmor"),
[CR_FD_STATS] = {
.fmt = "stats-%s",
.magic = STATS_MAGIC,
.oflags = O_SERVICE | O_FORCE_LOCAL,
},
[CR_FD_IRMAP_CACHE] = {
.fmt = "irmap-cache",
.magic = IRMAP_CACHE_MAGIC,
.oflags = O_SERVICE | O_FORCE_LOCAL,
},
};
| 4,031 | 31.780488 | 78 |
c
|
criu
|
criu-master/criu/img-streamer.c
|
#include <sys/socket.h>
#include <sys/un.h>
#include <unistd.h>
#include <stdio.h>
#include "cr_options.h"
#include "img-streamer.h"
#include "image.h"
#include "images/img-streamer.pb-c.h"
#include "protobuf.h"
#include "servicefd.h"
#include "rst-malloc.h"
#include "common/scm.h"
#include "common/lock.h"
/*
* We use different path names for the dump and restore sockets because:
* 1) The user may want to perform both at the same time (akin to live
* migration). Specifying the same images-dir is convenient.
* 2) It fails quickly when the user mix-up the streamer and CRIU operations.
* (e.g., streamer is in capture more, while CRIU is in restore mode).
*/
#define IMG_STREAMER_CAPTURE_SOCKET_NAME "streamer-capture.sock"
#define IMG_STREAMER_SERVE_SOCKET_NAME "streamer-serve.sock"
/* All requests go through the same socket connection. We must synchronize */
static mutex_t *img_streamer_fd_lock;
/* Either O_DUMP or O_RSTR */
static int img_streamer_mode;
static const char *socket_name_for_mode(int mode)
{
switch (mode) {
case O_DUMP:
return IMG_STREAMER_CAPTURE_SOCKET_NAME;
case O_RSTR:
return IMG_STREAMER_SERVE_SOCKET_NAME;
default:
BUG();
return NULL;
}
}
/*
* img_streamer_init() connects to the image streamer socket.
* mode should be either O_DUMP or O_RSTR.
*/
int img_streamer_init(const char *image_dir, int mode)
{
struct sockaddr_un addr;
int sockfd;
img_streamer_mode = mode;
sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
if (sockfd < 0) {
pr_perror("Unable to instantiate UNIX socket");
return -1;
}
memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
snprintf(addr.sun_path, sizeof(addr.sun_path), "%s/%s", image_dir, socket_name_for_mode(mode));
if (connect(sockfd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
pr_perror("Unable to connect to image streamer socket: %s", addr.sun_path);
goto err;
}
img_streamer_fd_lock = shmalloc(sizeof(*img_streamer_fd_lock));
if (!img_streamer_fd_lock) {
pr_err("Failed to allocate memory\n");
goto err;
}
mutex_init(img_streamer_fd_lock);
if (install_service_fd(IMG_STREAMER_FD_OFF, sockfd) < 0)
return -1;
return 0;
err:
close(sockfd);
return -1;
}
/*
* img_streamer_finish() indicates that no more files will be opened.
* In other words, img_streamer_open() will no longer be called.
*/
void img_streamer_finish(void)
{
if (get_service_fd(IMG_STREAMER_FD_OFF) >= 0) {
pr_info("Dismissing the image streamer\n");
close_service_fd(IMG_STREAMER_FD_OFF);
}
}
/*
* The regular protobuf APIs pb_write_one() and pb_read_one() operate over a
* `struct cr_img` object. Sadly, we don't have such object. We just have a
* file descriptor. The following pb_write_one_fd() and pb_read_one_fd()
* provide a protobuf API over a file descriptor. The implementation is a bit
* of a hack, but should be fine. At some point we can revisit to have a
* proper protobuf API over fds.
*/
static int pb_write_one_fd(int fd, void *obj, int type)
{
int ret;
struct cr_img img;
memset(&img, 0, sizeof(img));
img._x.fd = fd;
ret = pb_write_one(&img, obj, type);
if (ret < 0)
pr_perror("Failed to communicate with the image streamer");
return ret;
}
static int pb_read_one_fd(int fd, void **pobj, int type)
{
int ret;
struct cr_img img;
memset(&img, 0, sizeof(img));
img._x.fd = fd;
ret = pb_read_one(&img, pobj, type);
if (ret < 0)
pr_perror("Failed to communicate with the image streamer");
return ret;
}
static int send_file_request(char *filename)
{
ImgStreamerRequestEntry req = IMG_STREAMER_REQUEST_ENTRY__INIT;
req.filename = filename;
return pb_write_one_fd(get_service_fd(IMG_STREAMER_FD_OFF), &req, PB_IMG_STREAMER_REQUEST);
}
static int recv_file_reply(bool *exists)
{
ImgStreamerReplyEntry *reply;
int ret = pb_read_one_fd(get_service_fd(IMG_STREAMER_FD_OFF), (void **)&reply, PB_IMG_STREAMER_REPLY);
if (ret < 0)
return ret;
*exists = reply->exists;
free(reply);
return 0;
}
/*
* Using a pipe for image file transfers allows the data to be spliced by the
* image streamer, greatly improving performance.
* Transfer rates of up to 15GB/s can be seen with this technique.
*/
#define READ_PIPE 0 /* index of the read pipe returned by pipe() */
#define WRITE_PIPE 1
static int establish_streamer_file_pipe(void)
{
/*
* If the other end of the pipe closes, the kernel will want to kill
* us with a SIGPIPE. These signal must be ignored, which we do in
* crtools.c:main() with signal(SIGPIPE, SIG_IGN).
*/
int ret = -1;
int criu_pipe_direction = img_streamer_mode == O_DUMP ? WRITE_PIPE : READ_PIPE;
int streamer_pipe_direction = 1 - criu_pipe_direction;
int fds[2];
if (pipe(fds) < 0) {
pr_perror("Unable to create pipe");
return -1;
}
if (send_fd(get_service_fd(IMG_STREAMER_FD_OFF), NULL, 0, fds[streamer_pipe_direction]) < 0)
close(fds[criu_pipe_direction]);
else
ret = fds[criu_pipe_direction];
close(fds[streamer_pipe_direction]);
return ret;
}
static int _img_streamer_open(char *filename)
{
if (send_file_request(filename) < 0)
return -1;
if (img_streamer_mode == O_RSTR) {
/* The streamer replies whether the file exists */
bool exists;
if (recv_file_reply(&exists) < 0)
return -1;
if (!exists)
return -ENOENT;
}
/*
* When the image streamer encounters a fatal error, it won't report
* errors via protobufs. Instead, CRIU will get a broken pipe error
* when trying to access a streaming pipe. This behavior is similar to
* what would happen if we were connecting criu and * criu-image-streamer
* via a shell pipe.
*/
return establish_streamer_file_pipe();
}
/*
* Opens an image file via a UNIX pipe with the image streamer.
*
* Return:
* A file descriptor on success
* -ENOENT when the file was not found.
* -1 on any other error.
*/
int img_streamer_open(char *filename, int flags)
{
int ret;
BUG_ON(flags != img_streamer_mode);
mutex_lock(img_streamer_fd_lock);
ret = _img_streamer_open(filename);
mutex_unlock(img_streamer_fd_lock);
return ret;
}
| 6,035 | 24.905579 | 103 |
c
|
criu
|
criu-master/criu/ipc_ns.c
|
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <sys/wait.h>
#include <sys/msg.h>
#include <sys/sem.h>
#include <sys/shm.h>
#include <sched.h>
#include "util.h"
#include "cr_options.h"
#include "imgset.h"
#include "namespaces.h"
#include "sysctl.h"
#include "ipc_ns.h"
#include "shmem.h"
#include "types.h"
#include "protobuf.h"
#include "images/ipc-var.pb-c.h"
#include "images/ipc-shm.pb-c.h"
#include "images/ipc-sem.pb-c.h"
#include "images/ipc-msg.pb-c.h"
#if defined(__GLIBC__) && __GLIBC__ >= 2
#define KEY __key
#else
#define KEY key
#endif
#ifndef MSGMAX
#define MSGMAX 8192
#endif
#ifndef MSG_COPY
#define MSG_COPY 040000
#endif
static void pr_ipc_desc_entry(const IpcDescEntry *desc)
{
pr_info("id: %-10d key: %#08x uid: %-10d gid: %-10d "
"cuid: %-10d cgid: %-10d mode: %-10o ",
desc->id, desc->key, desc->uid, desc->gid, desc->cuid, desc->cgid, desc->mode);
}
static void fill_ipc_desc(int id, IpcDescEntry *desc, const struct ipc_perm *ipcp)
{
desc->id = id;
desc->key = ipcp->KEY;
desc->uid = userns_uid(ipcp->uid);
desc->gid = userns_gid(ipcp->gid);
desc->cuid = userns_uid(ipcp->cuid);
desc->cgid = userns_gid(ipcp->cgid);
desc->mode = ipcp->mode;
}
static void pr_ipc_sem_array(int nr, u16 *values)
{
while (nr--)
pr_info(" %-5d", values[nr]); // no \n
pr_info("\n");
}
#define pr_info_ipc_sem_array(nr, values) pr_ipc_sem_array(nr, values)
static void pr_info_ipc_sem_entry(const IpcSemEntry *sem)
{
pr_ipc_desc_entry(sem->desc);
pr_info("nsems: %-10d\n", sem->nsems);
}
static int dump_ipc_sem_set(struct cr_img *img, const IpcSemEntry *sem)
{
size_t rounded;
int ret, size;
u16 *values;
size = sizeof(u16) * sem->nsems;
rounded = round_up(size, sizeof(u64));
values = xmalloc(rounded);
if (values == NULL) {
pr_err("Failed to allocate memory for semaphore set values\n");
ret = -ENOMEM;
goto out;
}
ret = semctl(sem->desc->id, 0, GETALL, values);
if (ret < 0) {
pr_perror("Failed to get semaphore set values");
ret = -errno;
goto out;
}
pr_info_ipc_sem_array(sem->nsems, values);
memzero((void *)values + size, rounded - size);
ret = write_img_buf(img, values, rounded);
if (ret < 0) {
pr_err("Failed to write IPC message data\n");
goto out;
}
out:
xfree(values);
return ret;
}
static int dump_ipc_sem_desc(struct cr_img *img, int id, const struct semid_ds *ds)
{
IpcSemEntry sem = IPC_SEM_ENTRY__INIT;
IpcDescEntry desc = IPC_DESC_ENTRY__INIT;
int ret;
sem.desc = &desc;
sem.nsems = ds->sem_nsems;
fill_ipc_desc(id, sem.desc, &ds->sem_perm);
pr_info_ipc_sem_entry(&sem);
ret = pb_write_one(img, &sem, PB_IPC_SEM);
if (ret < 0) {
pr_err("Failed to write IPC semaphores set\n");
return ret;
}
return dump_ipc_sem_set(img, &sem);
}
static int dump_ipc_sem(struct cr_img *img)
{
int i, maxid;
struct seminfo info;
int slot;
maxid = semctl(0, 0, SEM_INFO, &info);
if (maxid < 0) {
pr_perror("semctl failed");
return -errno;
}
pr_info("IPC semaphore sets: %d\n", info.semusz);
for (i = 0, slot = 0; i <= maxid; i++) {
struct semid_ds ds;
int id, ret;
id = semctl(i, 0, SEM_STAT, &ds);
if (id < 0) {
if (errno == EINVAL)
continue;
pr_perror("Failed to get stats for IPC semaphore set");
break;
}
ret = dump_ipc_sem_desc(img, id, &ds);
if (!ret)
slot++;
}
if (slot != info.semusz) {
pr_err("Failed to collect %d (only %d succeeded)\n", info.semusz, slot);
return -EFAULT;
}
return info.semusz;
}
static void pr_info_ipc_msg(int nr, const IpcMsg *msg)
{
pr_info(" %-5d: type: %-20" PRId64 " size: %-10d\n", nr++, msg->mtype, msg->msize);
}
static void pr_info_ipc_msg_entry(const IpcMsgEntry *msg)
{
pr_ipc_desc_entry(msg->desc);
pr_info("qbytes: %-10d qnum: %-10d\n", msg->qbytes, msg->qnum);
}
static int dump_ipc_msg_queue_messages(struct cr_img *img, const IpcMsgEntry *msq, unsigned int msg_nr)
{
struct msgbuf *message = NULL;
unsigned int msgmax;
int ret, msg_cnt = 0;
struct sysctl_req req[] = {
{ "kernel/msgmax", &msgmax, CTL_U32 },
};
ret = sysctl_op(req, ARRAY_SIZE(req), CTL_READ, CLONE_NEWIPC);
if (ret < 0) {
pr_err("Failed to read max IPC message size\n");
goto err;
}
msgmax += sizeof(struct msgbuf);
message = xmalloc(round_up(msgmax, sizeof(u64)));
if (message == NULL) {
pr_err("Failed to allocate memory for IPC message\n");
return -ENOMEM;
}
for (msg_cnt = 0; msg_cnt < msg_nr; msg_cnt++) {
IpcMsg msg = IPC_MSG__INIT;
size_t rounded;
ret = msgrcv(msq->desc->id, message, msgmax, msg_cnt, IPC_NOWAIT | MSG_COPY);
if (ret < 0) {
pr_perror("Failed to copy IPC message");
goto err;
}
msg.msize = ret;
msg.mtype = message->mtype;
pr_info_ipc_msg(msg_cnt, &msg);
ret = pb_write_one(img, &msg, PB_IPCNS_MSG);
if (ret < 0) {
pr_err("Failed to write IPC message header\n");
break;
}
rounded = round_up(msg.msize, sizeof(u64));
memzero(((void *)message->mtext + msg.msize), rounded - msg.msize);
ret = write_img_buf(img, message->mtext, rounded);
if (ret < 0) {
pr_err("Failed to write IPC message data\n");
break;
}
}
ret = 0;
err:
xfree(message);
return ret;
}
static int dump_ipc_msg_queue(struct cr_img *img, int id, const struct msqid_ds *ds)
{
IpcMsgEntry msg = IPC_MSG_ENTRY__INIT;
IpcDescEntry desc = IPC_DESC_ENTRY__INIT;
int ret;
msg.desc = &desc;
fill_ipc_desc(id, msg.desc, &ds->msg_perm);
msg.qbytes = ds->msg_qbytes;
msg.qnum = ds->msg_qnum;
pr_info_ipc_msg_entry(&msg);
ret = pb_write_one(img, &msg, PB_IPCNS_MSG_ENT);
if (ret < 0) {
pr_err("Failed to write IPC message queue\n");
return ret;
}
return dump_ipc_msg_queue_messages(img, &msg, ds->msg_qnum);
}
static int dump_ipc_msg(struct cr_img *img)
{
int i, maxid;
struct msginfo info;
int slot;
maxid = msgctl(0, MSG_INFO, (struct msqid_ds *)&info);
if (maxid < 0) {
pr_perror("msgctl failed");
return -errno;
}
pr_info("IPC message queues: %d\n", info.msgpool);
for (i = 0, slot = 0; i <= maxid; i++) {
struct msqid_ds ds;
int id, ret;
id = msgctl(i, MSG_STAT, &ds);
if (id < 0) {
if (errno == EINVAL)
continue;
pr_perror("Failed to get stats for IPC message queue");
break;
}
ret = dump_ipc_msg_queue(img, id, &ds);
if (!ret)
slot++;
}
if (slot != info.msgpool) {
pr_err("Failed to collect %d message queues (only %d succeeded)\n", info.msgpool, slot);
return -EFAULT;
}
return info.msgpool;
}
static void pr_info_ipc_shm(const IpcShmEntry *shm)
{
pr_ipc_desc_entry(shm->desc);
pr_info("size: %-10" PRIu64 "\n", shm->size);
}
#define NR_MANDATORY_IPC_SYSCTLS 9
static int ipc_sysctl_req(IpcVarEntry *e, int op)
{
int i;
struct sysctl_req req[] = {
{ "kernel/sem", e->sem_ctls, CTL_U32A(e->n_sem_ctls) },
{ "kernel/msgmax", &e->msg_ctlmax, CTL_U32 },
{ "kernel/msgmnb", &e->msg_ctlmnb, CTL_U32 },
{ "kernel/auto_msgmni", &e->auto_msgmni, CTL_U32 },
{ "kernel/msgmni", &e->msg_ctlmni, CTL_U32 },
{ "kernel/shmmax", &e->shm_ctlmax, CTL_U64 },
{ "kernel/shmall", &e->shm_ctlall, CTL_U64 },
{ "kernel/shmmni", &e->shm_ctlmni, CTL_U32 },
{ "kernel/shm_rmid_forced", &e->shm_rmid_forced, CTL_U32 },
/* We have 9 mandatory sysctls above and 8 optional below */
{ "fs/mqueue/queues_max", &e->mq_queues_max, CTL_U32 },
{ "fs/mqueue/msg_max", &e->mq_msg_max, CTL_U32 },
{ "fs/mqueue/msgsize_max", &e->mq_msgsize_max, CTL_U32 },
{ "fs/mqueue/msg_default", &e->mq_msg_default, CTL_U32 },
{ "fs/mqueue/msgsize_default", &e->mq_msgsize_default, CTL_U32 },
{ "kernel/msg_next_id", &e->msg_next_id, CTL_U32 },
{ "kernel/sem_next_id", &e->sem_next_id, CTL_U32 },
{ "kernel/shm_next_id", &e->shm_next_id, CTL_U32 },
};
int nr = NR_MANDATORY_IPC_SYSCTLS;
/* Skip sysctls which can't be set or haven't existed on dump */
if (access("/proc/sys/fs/mqueue", X_OK))
pr_info("Mqueue sysctls are missing\n");
else {
nr += 3;
if (e->has_mq_msg_default) {
req[nr++] = req[12];
req[nr++] = req[13];
}
}
if (e->has_msg_next_id)
req[nr++] = req[14];
if (e->has_sem_next_id)
req[nr++] = req[15];
if (e->has_shm_next_id)
req[nr++] = req[16];
for (i = 0; i < nr; i++)
req[i].flags = CTL_FLAGS_IPC_EACCES_SKIP;
return sysctl_op(req, nr, op, CLONE_NEWIPC);
}
static int dump_ipc_shm_pages(const IpcShmEntry *shm)
{
int ret;
void *data;
data = shmat(shm->desc->id, NULL, SHM_RDONLY);
if (data == (void *)-1) {
pr_perror("Failed to attach IPC shared memory");
return -errno;
}
ret = dump_one_sysv_shmem(data, shm->size, shm->desc->id);
if (shmdt(data)) {
pr_perror("Failed to detach IPC shared memory");
return -errno;
}
return ret;
}
static int dump_shm_hugetlb_flag(IpcShmEntry *shm, int id, unsigned long size)
{
void *addr;
int ret, hugetlb_flag, exit_code = -1;
struct stat st;
char path[64];
addr = shmat(id, NULL, SHM_RDONLY);
if (addr == (void *)-1) {
pr_perror("Failed to attach shm");
return -1;
}
/* The shm segment size may not be aligned,
* we need to align it up to next page size
*/
size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
snprintf(path, sizeof(path), "/proc/self/map_files/%lx-%lx", (unsigned long)addr, (unsigned long)addr + size);
ret = stat(path, &st);
if (ret < 0) {
pr_perror("Can't stat map_files");
goto detach;
}
if (is_hugetlb_dev(st.st_dev, &hugetlb_flag)) {
shm->has_hugetlb_flag = true;
shm->hugetlb_flag = hugetlb_flag | SHM_HUGETLB;
}
exit_code = 0;
detach:
shmdt(addr);
return exit_code;
}
static int dump_ipc_shm_seg(struct cr_img *img, int id, const struct shmid_ds *ds)
{
IpcShmEntry shm = IPC_SHM_ENTRY__INIT;
IpcDescEntry desc = IPC_DESC_ENTRY__INIT;
int ret;
shm.desc = &desc;
shm.size = ds->shm_segsz;
shm.has_in_pagemaps = true;
shm.in_pagemaps = true;
if (dump_shm_hugetlb_flag(&shm, id, ds->shm_segsz))
return -1;
fill_ipc_desc(id, shm.desc, &ds->shm_perm);
pr_info_ipc_shm(&shm);
ret = pb_write_one(img, &shm, PB_IPC_SHM);
if (ret < 0) {
pr_err("Failed to write IPC shared memory segment\n");
return ret;
}
return dump_ipc_shm_pages(&shm);
}
static int dump_ipc_shm(struct cr_img *img)
{
int i, maxid, slot;
struct shm_info info;
maxid = shmctl(0, SHM_INFO, (void *)&info);
if (maxid < 0) {
pr_perror("shmctl(SHM_INFO) failed");
return -errno;
}
pr_info("IPC shared memory segments: %d\n", info.used_ids);
for (i = 0, slot = 0; i <= maxid; i++) {
struct shmid_ds ds;
int id, ret;
id = shmctl(i, SHM_STAT, &ds);
if (id < 0) {
if (errno == EINVAL)
continue;
pr_perror("Failed to get stats for IPC shared memory");
break;
}
ret = dump_ipc_shm_seg(img, id, &ds);
if (ret < 0)
return ret;
slot++;
}
if (slot != info.used_ids) {
pr_err("Failed to collect %d (only %d succeeded)\n", info.used_ids, slot);
return -EFAULT;
}
return 0;
}
static int dump_ipc_var(struct cr_img *img)
{
IpcVarEntry var = IPC_VAR_ENTRY__INIT;
int ret = -1;
var.n_sem_ctls = 4;
var.sem_ctls = xmalloc(pb_repeated_size(&var, sem_ctls));
if (!var.sem_ctls)
goto err;
var.has_mq_msg_default = true;
var.has_mq_msgsize_default = true;
var.has_msg_next_id = true;
var.has_sem_next_id = true;
var.has_shm_next_id = true;
ret = ipc_sysctl_req(&var, CTL_READ);
if (ret < 0) {
pr_err("Failed to read IPC variables\n");
goto err;
}
/*
* One can not write to msg_next_xxx sysctls -1,
* which is their initial value
*/
if (var.msg_next_id == -1)
var.has_msg_next_id = false;
if (var.sem_next_id == -1)
var.has_sem_next_id = false;
if (var.shm_next_id == -1)
var.has_shm_next_id = false;
ret = pb_write_one(img, &var, PB_IPC_VAR);
if (ret < 0) {
pr_err("Failed to write IPC variables\n");
goto err;
}
err:
xfree(var.sem_ctls);
return ret;
}
static int dump_ipc_data(const struct cr_imgset *imgset)
{
int ret;
ret = dump_ipc_var(img_from_set(imgset, CR_FD_IPC_VAR));
if (ret < 0)
return ret;
ret = dump_ipc_shm(img_from_set(imgset, CR_FD_IPCNS_SHM));
if (ret < 0)
return ret;
ret = dump_ipc_msg(img_from_set(imgset, CR_FD_IPCNS_MSG));
if (ret < 0)
return ret;
ret = dump_ipc_sem(img_from_set(imgset, CR_FD_IPCNS_SEM));
if (ret < 0)
return ret;
return 0;
}
int dump_ipc_ns(int ns_id)
{
int ret;
struct cr_imgset *imgset;
imgset = cr_imgset_open(ns_id, IPCNS, O_DUMP);
if (imgset == NULL)
return -1;
ret = dump_ipc_data(imgset);
if (ret < 0) {
pr_err("Failed to write IPC namespace data\n");
goto err;
}
err:
close_cr_imgset(&imgset);
return ret < 0 ? -1 : 0;
}
static int prepare_ipc_sem_values(struct cr_img *img, const IpcSemEntry *sem)
{
int ret, size;
u16 *values;
size = round_up(sizeof(u16) * sem->nsems, sizeof(u64));
values = xmalloc(size);
if (values == NULL) {
pr_err("Failed to allocate memory for semaphores set values\n");
ret = -ENOMEM;
goto out;
}
ret = read_img_buf(img, values, size);
if (ret < 0) {
pr_err("Failed to allocate memory for semaphores set values\n");
ret = -ENOMEM;
goto out;
}
pr_info_ipc_sem_array(sem->nsems, values);
ret = semctl(sem->desc->id, 0, SETALL, values);
if (ret < 0) {
pr_perror("Failed to set semaphores set values");
ret = -errno;
}
out:
xfree(values);
return ret;
}
static int prepare_ipc_sem_desc(struct cr_img *img, const IpcSemEntry *sem)
{
int ret, id;
struct sysctl_req req[] = {
{ "kernel/sem_next_id", &sem->desc->id, CTL_U32, CTL_FLAGS_IPC_EACCES_SKIP },
};
struct semid_ds semid;
ret = sysctl_op(req, ARRAY_SIZE(req), CTL_WRITE, CLONE_NEWIPC);
if (ret < 0) {
pr_err("Failed to set desired IPC sem ID\n");
return ret;
}
id = semget(sem->desc->key, sem->nsems, sem->desc->mode | IPC_CREAT | IPC_EXCL);
if (id == -1) {
pr_perror("Failed to create sem set");
return -errno;
}
if (id != sem->desc->id) {
pr_err("Failed to restore sem id (%d instead of %d)\n", id, sem->desc->id);
return -EFAULT;
}
ret = semctl(id, sem->nsems, IPC_STAT, &semid);
if (ret == -1) {
pr_err("Failed to get sem stat structure\n");
return -EFAULT;
}
semid.sem_perm.uid = sem->desc->uid;
semid.sem_perm.gid = sem->desc->gid;
ret = semctl(id, sem->nsems, IPC_SET, &semid);
if (ret == -1) {
pr_err("Failed to set sem uid and gid\n");
return -EFAULT;
}
ret = prepare_ipc_sem_values(img, sem);
if (ret < 0) {
pr_err("Failed to update sem pages\n");
return ret;
}
return 0;
}
static int prepare_ipc_sem(int pid)
{
int ret;
struct cr_img *img;
pr_info("Restoring IPC semaphores sets\n");
img = open_image(CR_FD_IPCNS_SEM, O_RSTR, pid);
if (!img)
return -1;
while (1) {
IpcSemEntry *sem;
ret = pb_read_one_eof(img, &sem, PB_IPC_SEM);
if (ret < 0) {
ret = -EIO;
goto err;
}
if (ret == 0)
break;
pr_info_ipc_sem_entry(sem);
ret = prepare_ipc_sem_desc(img, sem);
ipc_sem_entry__free_unpacked(sem, NULL);
if (ret < 0) {
pr_err("Failed to prepare semaphores set\n");
goto err;
}
}
close_image(img);
return 0;
err:
close_image(img);
return ret;
}
static int prepare_ipc_msg_queue_messages(struct cr_img *img, const IpcMsgEntry *msq)
{
IpcMsg *msg = NULL;
int msg_nr = 0;
int ret = 0;
while (msg_nr < msq->qnum) {
struct msgbuf {
long mtype;
char mtext[MSGMAX];
} data;
ret = pb_read_one(img, &msg, PB_IPCNS_MSG);
if (ret <= 0)
return -EIO;
pr_info_ipc_msg(msg_nr, msg);
if (msg->msize > MSGMAX) {
ret = -1;
pr_err("Unsupported message size: %d (MAX: %d)\n", msg->msize, MSGMAX);
break;
}
ret = read_img_buf(img, data.mtext, round_up(msg->msize, sizeof(u64)));
if (ret < 0) {
pr_err("Failed to read IPC message data\n");
break;
}
data.mtype = msg->mtype;
ret = msgsnd(msq->desc->id, &data, msg->msize, IPC_NOWAIT);
if (ret < 0) {
pr_perror("Failed to send IPC message");
ret = -errno;
break;
}
msg_nr++;
}
if (msg)
ipc_msg__free_unpacked(msg, NULL);
return ret;
}
static int prepare_ipc_msg_queue(struct cr_img *img, const IpcMsgEntry *msq)
{
int ret, id;
struct sysctl_req req[] = {
{ "kernel/msg_next_id", &msq->desc->id, CTL_U32, CTL_FLAGS_IPC_EACCES_SKIP },
};
struct msqid_ds msqid;
ret = sysctl_op(req, ARRAY_SIZE(req), CTL_WRITE, CLONE_NEWIPC);
if (ret < 0) {
pr_err("Failed to set desired IPC msg ID\n");
return ret;
}
id = msgget(msq->desc->key, msq->desc->mode | IPC_CREAT | IPC_EXCL);
if (id == -1) {
pr_perror("Failed to create msg set");
return -errno;
}
if (id != msq->desc->id) {
pr_err("Failed to restore msg id (%d instead of %d)\n", id, msq->desc->id);
return -EFAULT;
}
ret = msgctl(id, IPC_STAT, &msqid);
if (ret == -1) {
pr_err("Failed to get msq stat structure\n");
return -EFAULT;
}
msqid.msg_perm.uid = msq->desc->uid;
msqid.msg_perm.gid = msq->desc->gid;
ret = msgctl(id, IPC_SET, &msqid);
if (ret == -1) {
pr_err("Failed to set msq queue uid and gid\n");
return -EFAULT;
}
ret = prepare_ipc_msg_queue_messages(img, msq);
if (ret < 0) {
pr_err("Failed to update message queue messages\n");
return ret;
}
return 0;
}
static int prepare_ipc_msg(int pid)
{
int ret;
struct cr_img *img;
pr_info("Restoring IPC message queues\n");
img = open_image(CR_FD_IPCNS_MSG, O_RSTR, pid);
if (!img)
return -1;
while (1) {
IpcMsgEntry *msq;
ret = pb_read_one_eof(img, &msq, PB_IPCNS_MSG_ENT);
if (ret < 0) {
pr_err("Failed to read IPC messages queue\n");
ret = -EIO;
goto err;
}
if (ret == 0)
break;
pr_info_ipc_msg_entry(msq);
ret = prepare_ipc_msg_queue(img, msq);
ipc_msg_entry__free_unpacked(msq, NULL);
if (ret < 0) {
pr_err("Failed to prepare messages queue\n");
goto err;
}
}
close_image(img);
return 0;
err:
close_image(img);
return ret;
}
static int restore_content(void *data, struct cr_img *img, const IpcShmEntry *shm)
{
int ifd;
ssize_t size, off;
ifd = img_raw_fd(img);
if (ifd < 0) {
pr_err("Failed getting raw image fd\n");
return -1;
}
size = round_up(shm->size, sizeof(u32));
off = 0;
do {
ssize_t ret;
ret = read(ifd, data + off, size - off);
if (ret <= 0) {
pr_perror("Failed to write IPC shared memory data");
return (int)ret;
}
off += ret;
} while (off < size);
return 0;
}
static int prepare_ipc_shm_pages(struct cr_img *img, const IpcShmEntry *shm)
{
int ret;
void *data;
data = shmat(shm->desc->id, NULL, 0);
if (data == (void *)-1) {
pr_perror("Failed to attach IPC shared memory");
return -errno;
}
if (shm->has_in_pagemaps && shm->in_pagemaps)
ret = restore_sysv_shmem_content(data, shm->size, shm->desc->id);
else
ret = restore_content(data, img, shm);
if (shmdt(data)) {
pr_perror("Failed to detach IPC shared memory");
return -errno;
}
return ret;
}
static int prepare_ipc_shm_seg(struct cr_img *img, const IpcShmEntry *shm)
{
int ret, id, hugetlb_flag = 0;
struct sysctl_req req[] = {
{ "kernel/shm_next_id", &shm->desc->id, CTL_U32, CTL_FLAGS_IPC_EACCES_SKIP },
};
struct shmid_ds shmid;
if (collect_sysv_shmem(shm->desc->id, shm->size))
return -1;
ret = sysctl_op(req, ARRAY_SIZE(req), CTL_WRITE, CLONE_NEWIPC);
if (ret < 0) {
pr_err("Failed to set desired IPC shm ID\n");
return ret;
}
if (shm->has_hugetlb_flag)
hugetlb_flag = shm->hugetlb_flag;
id = shmget(shm->desc->key, shm->size, hugetlb_flag | shm->desc->mode | IPC_CREAT | IPC_EXCL);
if (id == -1) {
pr_perror("Failed to create shm set");
return -errno;
}
if (id != shm->desc->id) {
pr_err("Failed to restore shm id (%d instead of %d)\n", id, shm->desc->id);
return -EFAULT;
}
ret = shmctl(id, IPC_STAT, &shmid);
if (ret == -1) {
pr_err("Failed to get shm stat structure\n");
return -EFAULT;
}
shmid.shm_perm.uid = shm->desc->uid;
shmid.shm_perm.gid = shm->desc->gid;
ret = shmctl(id, IPC_SET, &shmid);
if (ret == -1) {
pr_err("Failed to set shm uid and gid\n");
return -EFAULT;
}
ret = prepare_ipc_shm_pages(img, shm);
if (ret < 0) {
pr_err("Failed to update shm pages\n");
return ret;
}
return 0;
}
static int prepare_ipc_shm(int pid)
{
int ret;
struct cr_img *img;
pr_info("Restoring IPC shared memory\n");
img = open_image(CR_FD_IPCNS_SHM, O_RSTR, pid);
if (!img)
return -1;
while (1) {
IpcShmEntry *shm;
ret = pb_read_one_eof(img, &shm, PB_IPC_SHM);
if (ret < 0) {
pr_err("Failed to read IPC shared memory segment\n");
ret = -EIO;
goto err;
}
if (ret == 0)
break;
pr_info_ipc_shm(shm);
ret = prepare_ipc_shm_seg(img, shm);
ipc_shm_entry__free_unpacked(shm, NULL);
if (ret < 0) {
pr_err("Failed to prepare shm segment\n");
goto err;
}
}
close_image(img);
return 0;
err:
close_image(img);
return ret;
}
static int prepare_ipc_var(int pid)
{
int ret;
struct cr_img *img;
IpcVarEntry *var;
pr_info("Restoring IPC variables\n");
img = open_image(CR_FD_IPC_VAR, O_RSTR, pid);
if (!img)
return -1;
ret = pb_read_one(img, &var, PB_IPC_VAR);
close_image(img);
if (ret <= 0) {
pr_err("Failed to read IPC namespace variables\n");
return -EFAULT;
}
ret = ipc_sysctl_req(var, CTL_WRITE);
ipc_var_entry__free_unpacked(var, NULL);
if (ret < 0) {
pr_err("Failed to prepare IPC namespace variables\n");
return -EFAULT;
}
return 0;
}
int prepare_ipc_ns(int pid)
{
int ret;
pr_info("Restoring IPC namespace\n");
ret = prepare_ipc_var(pid);
if (ret < 0)
return ret;
ret = prepare_ipc_shm(pid);
if (ret < 0)
return ret;
ret = prepare_ipc_msg(pid);
if (ret < 0)
return ret;
ret = prepare_ipc_sem(pid);
if (ret < 0)
return ret;
return 0;
}
struct ns_desc ipc_ns_desc = NS_DESC_ENTRY(CLONE_NEWIPC, "ipc");
| 21,512 | 20.752275 | 111 |
c
|
criu
|
criu-master/criu/kcmp-ids.c
|
#include <unistd.h>
#include <stdlib.h>
#include <sys/syscall.h>
#include "log.h"
#include "xmalloc.h"
#include "common/compiler.h"
#include "common/bug.h"
#include "rbtree.h"
#include "kcmp-ids.h"
/*
* We track shared files by global rbtree, where each node might
* be a root for subtree. The reason for that is the nature of data
* we obtain from operating system.
*
* Basically OS provides us two ways to distinguish files
*
* - information obtained from fstat call
* - shiny new sys_kcmp system call (which may compare the file descriptor
* pointers inside the kernel and provide us order info)
*
* So, to speedup procedure of searching for shared file descriptors
* we use both techniques. From fstat call we get that named general file
* IDs (genid) which are carried in the main rbtree.
*
* In case if two genid are the same -- we need to use a second way and
* call for sys_kcmp. Thus, if kernel tells us that files have identical
* genid but in real they are different from kernel point of view -- we assign
* a second unique key (subid) to such file descriptor and put it into a subtree.
*
* So the tree will look like
*
* (root)
* genid-1
* / \
* genid-2 genid-3
* / \ / \
*
* Where each genid node might be a sub-rbtree as well
*
* (genid-N)
* / \
* subid-1 subid-2
* / \ / \
*
* Carrying two rbtree at once allow us to minimize the number
* of sys_kcmp syscalls, also to collect and dump file descriptors
* in one pass.
*/
struct kid_entry {
struct rb_node node;
struct rb_root subtree_root;
struct rb_node subtree_node;
uint32_t subid; /* subid is always unique */
struct kid_elem elem;
} __aligned(sizeof(long));
static struct kid_entry *alloc_kid_entry(struct kid_tree *tree, struct kid_elem *elem)
{
struct kid_entry *e;
e = xmalloc(sizeof(*e));
if (!e)
goto err;
e->subid = tree->subid++;
e->elem = *elem;
/* Make sure no overflow here */
BUG_ON(!e->subid);
rb_init_node(&e->node);
rb_init_node(&e->subtree_node);
e->subtree_root = RB_ROOT;
rb_link_and_balance(&e->subtree_root, &e->subtree_node, NULL, &e->subtree_root.rb_node);
err:
return e;
}
static uint32_t kid_generate_sub(struct kid_tree *tree, struct kid_entry *e, struct kid_elem *elem, int *new_id)
{
struct rb_node *node = e->subtree_root.rb_node;
struct kid_entry *sub = NULL;
struct rb_node **new = &e->subtree_root.rb_node;
struct rb_node *parent = NULL;
BUG_ON(!node);
while (node) {
struct kid_entry *this = rb_entry(node, struct kid_entry, subtree_node);
int ret = syscall(SYS_kcmp, this->elem.pid, elem->pid, tree->kcmp_type, this->elem.idx, elem->idx);
parent = *new;
if (ret == 1)
node = node->rb_left, new = &((*new)->rb_left);
else if (ret == 2)
node = node->rb_right, new = &((*new)->rb_right);
else if (ret == 0)
return this->subid;
else {
pr_perror("kcmp failed: pid (%d %d) type %u idx (%u %u)", this->elem.pid, elem->pid,
tree->kcmp_type, this->elem.idx, elem->idx);
return 0;
}
}
sub = alloc_kid_entry(tree, elem);
if (!sub)
return 0;
rb_link_and_balance(&e->subtree_root, &sub->subtree_node, parent, new);
*new_id = 1;
return sub->subid;
}
uint32_t kid_generate_gen(struct kid_tree *tree, struct kid_elem *elem, int *new_id)
{
struct rb_node *node = tree->root.rb_node;
struct kid_entry *e = NULL;
struct rb_node **new = &tree->root.rb_node;
struct rb_node *parent = NULL;
while (node) {
struct kid_entry *this = rb_entry(node, struct kid_entry, node);
parent = *new;
if (elem->genid < this->elem.genid)
node = node->rb_left, new = &((*new)->rb_left);
else if (elem->genid > this->elem.genid)
node = node->rb_right, new = &((*new)->rb_right);
else
return kid_generate_sub(tree, this, elem, new_id);
}
e = alloc_kid_entry(tree, elem);
if (!e)
return 0;
rb_link_and_balance(&tree->root, &e->node, parent, new);
*new_id = 1;
return e->subid;
}
static struct kid_elem *kid_lookup_epoll_tfd_sub(struct kid_tree *tree, struct kid_entry *e, struct kid_elem *elem,
kcmp_epoll_slot_t *slot)
{
struct rb_node *node = e->subtree_root.rb_node;
struct rb_node **new = &e->subtree_root.rb_node;
BUG_ON(!node);
while (node) {
struct kid_entry *this = rb_entry(node, struct kid_entry, subtree_node);
int ret = syscall(SYS_kcmp, this->elem.pid, elem->pid, KCMP_EPOLL_TFD, this->elem.idx, slot);
if (ret == 1)
node = node->rb_left, new = &((*new)->rb_left);
else if (ret == 2)
node = node->rb_right, new = &((*new)->rb_right);
else if (ret == 0)
return &this->elem;
else {
pr_perror("kcmp-epoll failed: pid (%d %d) type %u idx (%u %u)", this->elem.pid, elem->pid,
KCMP_EPOLL_TFD, this->elem.idx, elem->idx);
return NULL;
}
}
return NULL;
}
struct kid_elem *kid_lookup_epoll_tfd(struct kid_tree *tree, struct kid_elem *elem, kcmp_epoll_slot_t *slot)
{
struct rb_node *node = tree->root.rb_node;
struct rb_node **new = &tree->root.rb_node;
while (node) {
struct kid_entry *this = rb_entry(node, struct kid_entry, node);
if (elem->genid < this->elem.genid)
node = node->rb_left, new = &((*new)->rb_left);
else if (elem->genid > this->elem.genid)
node = node->rb_right, new = &((*new)->rb_right);
else
return kid_lookup_epoll_tfd_sub(tree, this, elem, slot);
}
return NULL;
}
| 5,430 | 26.429293 | 115 |
c
|
criu
|
criu-master/criu/libnetlink.c
|
#include <linux/types.h>
#include <sys/socket.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <libnl3/netlink/attr.h>
#include <libnl3/netlink/msg.h>
#include <string.h>
#include <unistd.h>
#include "libnetlink.h"
#include "util.h"
static int nlmsg_receive(char *buf, int len, int (*cb)(struct nlmsghdr *, struct ns_id *ns, void *),
int (*err_cb)(int, struct ns_id *, void *), struct ns_id *ns, void *arg)
{
struct nlmsghdr *hdr;
for (hdr = (struct nlmsghdr *)buf; NLMSG_OK(hdr, len); hdr = NLMSG_NEXT(hdr, len)) {
if (hdr->nlmsg_seq != CR_NLMSG_SEQ)
continue;
if (hdr->nlmsg_type == NLMSG_DONE) {
int *length = (int *)NLMSG_DATA(hdr);
if (*length < 0)
return err_cb(*length, ns, arg);
return 0;
}
if (hdr->nlmsg_type == NLMSG_ERROR) {
struct nlmsgerr *err = (struct nlmsgerr *)NLMSG_DATA(hdr);
if (hdr->nlmsg_len - sizeof(*hdr) < sizeof(struct nlmsgerr)) {
pr_err("ERROR truncated\n");
return -1;
}
if (err->error == 0)
return 0;
return err_cb(err->error, ns, arg);
}
if (cb(hdr, ns, arg))
return -1;
}
return 1;
}
/*
* Default error handler: just point our an error
* and pass up to caller.
*/
static int rtnl_return_err(int err, struct ns_id *ns, void *arg)
{
errno = -err;
pr_perror("%d reported by netlink", err);
return err;
}
int do_rtnl_req(int nl, void *req, int size, int (*receive_callback)(struct nlmsghdr *h, struct ns_id *ns, void *),
int (*error_callback)(int err, struct ns_id *ns, void *arg), struct ns_id *ns, void *arg)
{
struct msghdr msg;
struct sockaddr_nl nladdr;
struct iovec iov;
static char buf[16384];
int err;
if (!error_callback)
error_callback = rtnl_return_err;
memset(&msg, 0, sizeof(msg));
msg.msg_name = &nladdr;
msg.msg_namelen = sizeof(nladdr);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
memset(&nladdr, 0, sizeof(nladdr));
nladdr.nl_family = AF_NETLINK;
iov.iov_base = req;
iov.iov_len = size;
if (sendmsg(nl, &msg, 0) < 0) {
err = -errno;
pr_perror("Can't send request message");
goto err;
}
iov.iov_base = buf;
iov.iov_len = sizeof(buf);
while (1) {
memset(&msg, 0, sizeof(msg));
msg.msg_name = &nladdr;
msg.msg_namelen = sizeof(nladdr);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
err = recvmsg(nl, &msg, 0);
if (err < 0) {
if (errno == EINTR)
continue;
else {
err = -errno;
pr_perror("Error receiving nl report");
goto err;
}
}
if (err == 0)
break;
if (msg.msg_flags & MSG_TRUNC) {
pr_err("Message truncated\n");
err = -EMSGSIZE;
goto err;
}
err = nlmsg_receive(buf, err, receive_callback, error_callback, ns, arg);
if (err < 0)
goto err;
if (err == 0)
break;
}
return 0;
err:
return err;
}
int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data, int alen)
{
int len = nla_attr_size(alen);
struct rtattr *rta;
if (NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len) > maxlen) {
pr_err("addattr_l ERROR: message exceeded bound of %d\n", maxlen);
return -1;
}
rta = NLMSG_TAIL(n);
rta->rta_type = type;
rta->rta_len = len;
memcpy(RTA_DATA(rta), data, alen);
n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len);
return 0;
}
/*
* Here is a workaround for a bug in libnl-3:
* 6a8d90f5fec4 "attr: Allow attribute type 0
*/
/**
* Create attribute index based on a stream of attributes.
* @arg tb Index array to be filled (maxtype+1 elements).
* @arg maxtype Maximum attribute type expected and accepted.
* @arg head Head of attribute stream.
* @arg len Length of attribute stream.
* @arg policy Attribute validation policy.
*
* Iterates over the stream of attributes and stores a pointer to each
* attribute in the index array using the attribute type as index to
* the array. Attribute with a type greater than the maximum type
* specified will be silently ignored in order to maintain backwards
* compatibility. If \a policy is not NULL, the attribute will be
* validated using the specified policy.
*
* @see nla_validate
* @return 0 on success or a negative error code.
*/
int __wrap_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, struct nla_policy *policy)
{
struct nlattr *nla;
int rem;
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
nla_for_each_attr(nla, head, len, rem) {
int type = nla_type(nla);
if (type > maxtype)
continue;
if (tb[type])
pr_warn("Attribute of type %#x found multiple times in message, "
"previous attribute is being ignored.\n",
type);
tb[type] = nla;
}
if (rem > 0)
pr_warn("netlink: %d bytes leftover after parsing "
"attributes.\n",
rem);
return 0;
}
/**
* parse attributes of a netlink message
* @arg nlh netlink message header
* @arg hdrlen length of family specific header
* @arg tb destination array with maxtype+1 elements
* @arg maxtype maximum attribute type to be expected
* @arg policy validation policy
*
* See nla_parse()
*/
int __wrap_nlmsg_parse(struct nlmsghdr *nlh, int hdrlen, struct nlattr *tb[], int maxtype, struct nla_policy *policy)
{
if (!nlmsg_valid_hdr(nlh, hdrlen))
return -NLE_MSG_TOOSHORT;
return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), policy);
}
int32_t nla_get_s32(const struct nlattr *nla)
{
return *(const int32_t *)nla_data(nla);
}
| 5,342 | 23.067568 | 117 |
c
|
criu
|
criu-master/criu/log.c
|
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <errno.h>
#include <unistd.h>
#include <stdbool.h>
#include <limits.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/utsname.h>
#include <fcntl.h>
#include "page.h"
#include "common/compiler.h"
#include "util.h"
#include "cr_options.h"
#include "servicefd.h"
#include "rst-malloc.h"
#include "common/lock.h"
#include "string.h"
#include "version.h"
#include "../soccr/soccr.h"
#include "compel/log.h"
#define DEFAULT_LOGFD STDERR_FILENO
/* Enable timestamps if verbosity is increased from default */
#define LOG_TIMESTAMP (DEFAULT_LOGLEVEL + 1)
#define LOG_BUF_LEN (8 * 1024)
#define EARLY_LOG_BUF_LEN 1024
static unsigned int current_loglevel = DEFAULT_LOGLEVEL;
static void vprint_on_level(unsigned int, const char *, va_list);
static char buffer[LOG_BUF_LEN];
static char buf_off = 0;
/*
* The early_log_buffer is used to store log messages before
* logging is set up to make sure no logs are lost.
*/
static char early_log_buffer[EARLY_LOG_BUF_LEN];
static unsigned int early_log_buf_off = 0;
/* If this is 0 the logging has not been set up yet. */
static int init_done = 0;
static struct timeval start;
/*
* Manual buf len as sprintf will _always_ put '\0' at the end,
* but we want a "constant" pid to be there on restore
*/
#define TS_BUF_OFF 12
static void timediff(struct timeval *from, struct timeval *to)
{
to->tv_sec -= from->tv_sec;
if (to->tv_usec >= from->tv_usec)
to->tv_usec -= from->tv_usec;
else {
to->tv_sec--;
to->tv_usec += USEC_PER_SEC - from->tv_usec;
}
}
static void print_ts(void)
{
struct timeval t;
gettimeofday(&t, NULL);
timediff(&start, &t);
snprintf(buffer, TS_BUF_OFF, "(%02u.%06u", (unsigned)t.tv_sec, (unsigned)t.tv_usec);
buffer[TS_BUF_OFF - 2] = ')'; /* this will overwrite the last digit if tv_sec>=100 */
buffer[TS_BUF_OFF - 1] = ' '; /* kill the '\0' produced by snprintf */
}
int log_get_fd(void)
{
int fd = get_service_fd(LOG_FD_OFF);
return fd < 0 ? DEFAULT_LOGFD : fd;
}
void log_get_logstart(struct timeval *s)
{
if (current_loglevel >= LOG_TIMESTAMP)
*s = start;
else {
s->tv_sec = 0;
s->tv_usec = 0;
}
}
static void reset_buf_off(void)
{
if (current_loglevel >= LOG_TIMESTAMP)
/* reserve space for a timestamp */
buf_off = TS_BUF_OFF;
else
buf_off = 0;
}
/*
* Keeping the very first error message for RPC to report back.
*/
struct str_and_lock {
mutex_t l;
char s[1024];
};
static struct str_and_lock *first_err;
int log_keep_err(void)
{
first_err = shmalloc(sizeof(struct str_and_lock));
if (first_err == NULL)
return -1;
mutex_init(&first_err->l);
first_err->s[0] = '\0';
return 0;
}
static void log_note_err(char *msg)
{
if (first_err && first_err->s[0] == '\0') {
/*
* In any action other than restore this locking is
* actually not required, but ... it's error path
* anyway, so it doesn't make much sense to try hard
* and optimize this out.
*/
mutex_lock(&first_err->l);
if (first_err->s[0] == '\0')
__strlcpy(first_err->s, msg, sizeof(first_err->s));
mutex_unlock(&first_err->l);
}
}
char *log_first_err(void)
{
if (!first_err)
return NULL;
if (first_err->s[0] == '\0')
return NULL;
return first_err->s;
}
static void print_versions(void)
{
struct utsname buf;
pr_info("Version: %s (gitid %s)\n", CRIU_VERSION, CRIU_GITID);
if (uname(&buf) < 0) {
pr_perror("Reading kernel version failed!");
/* This pretty unlikely, just keep on running. */
return;
}
pr_info("Running on %s %s %s %s %s\n", buf.nodename, buf.sysname, buf.release, buf.version, buf.machine);
}
struct early_log_hdr {
uint16_t level;
uint16_t len;
};
void flush_early_log_buffer(int fd)
{
unsigned int pos = 0;
int ret;
while (pos < early_log_buf_off) {
/*
* The early_log_buffer contains all messages written
* before logging was set up. We only want to print
* out messages which correspond to the requested
* log_level. Therefore the early_log_buffer also contains
* the log_level and the size. This writes one messages,
* depending on the log_level, to the logging fd. Start
* with reading the log_level.
*/
struct early_log_hdr *hdr = (void *)early_log_buffer + pos;
pos += sizeof(hdr);
if (hdr->level <= current_loglevel) {
size_t size = 0;
while (size < hdr->len) {
ret = write(fd, early_log_buffer + pos + size, hdr->len - size);
if (ret <= 0)
break;
size += ret;
}
}
pos += hdr->len;
}
if (early_log_buf_off == EARLY_LOG_BUF_LEN)
pr_warn("The early log buffer is full, some messages may have been lost\n");
early_log_buf_off = 0;
}
int log_init(const char *output)
{
int new_logfd, fd;
gettimeofday(&start, NULL);
reset_buf_off();
if (output && !strncmp(output, "-", 2)) {
new_logfd = dup(STDOUT_FILENO);
if (new_logfd < 0) {
pr_perror("Can't dup stdout stream");
return -1;
}
} else if (output) {
new_logfd = open(output, O_CREAT | O_TRUNC | O_WRONLY | O_APPEND, 0600);
if (new_logfd < 0) {
pr_perror("Can't create log file %s", output);
return -1;
}
} else {
new_logfd = dup(DEFAULT_LOGFD);
if (new_logfd < 0) {
pr_perror("Can't dup log file");
return -1;
}
}
fd = install_service_fd(LOG_FD_OFF, new_logfd);
if (fd < 0)
goto err;
init_done = 1;
/*
* Once logging is setup this write out all early log messages.
* Only those messages which have to correct log level are printed.
*/
flush_early_log_buffer(fd);
print_versions();
return 0;
err:
pr_perror("Log engine failure, can't duplicate descriptor");
return -1;
}
int log_init_by_pid(pid_t pid)
{
char path[PATH_MAX];
/*
* reset buf_off as this fn is called on each fork while
* restoring process tree
*/
reset_buf_off();
if (!opts.log_file_per_pid) {
buf_off += snprintf(buffer + buf_off, sizeof buffer - buf_off, "%6d: ", pid);
return 0;
}
if (!opts.output)
return 0;
snprintf(path, PATH_MAX, "%s.%d", opts.output, pid);
return log_init(path);
}
void log_fini(void)
{
close_service_fd(LOG_FD_OFF);
}
static void soccr_print_on_level(unsigned int loglevel, const char *format, ...)
{
va_list args;
int lv;
switch (loglevel) {
case SOCCR_LOG_DBG:
lv = LOG_DEBUG;
break;
case SOCCR_LOG_ERR:
lv = LOG_ERROR;
break;
default:
lv = LOG_INFO;
break;
}
va_start(args, format);
vprint_on_level(lv, format, args);
va_end(args);
}
void log_set_loglevel(unsigned int level)
{
current_loglevel = level;
libsoccr_set_log(level, soccr_print_on_level);
compel_log_init(vprint_on_level, level);
}
unsigned int log_get_loglevel(void)
{
return current_loglevel;
}
static void early_vprint(const char *format, unsigned int loglevel, va_list params)
{
unsigned int log_size = 0;
struct early_log_hdr *hdr;
if ((early_log_buf_off + sizeof(hdr)) >= EARLY_LOG_BUF_LEN)
return;
/* Save loglevel */
hdr = (void *)early_log_buffer + early_log_buf_off;
hdr->level = loglevel;
/* Skip the log entry size */
early_log_buf_off += sizeof(hdr);
if (loglevel >= LOG_TIMESTAMP) {
/*
* If logging is not yet setup we just write zeros
* instead of a real timestamp. This way we can
* keep the same format as the other messages on
* log levels with timestamps (>=LOG_TIMESTAMP).
*/
log_size = snprintf(early_log_buffer + early_log_buf_off, sizeof(early_log_buffer) - early_log_buf_off,
"(00.000000) ");
}
log_size += vsnprintf(early_log_buffer + early_log_buf_off + log_size,
sizeof(early_log_buffer) - early_log_buf_off - log_size, format, params);
/* Save log entry size */
hdr->len = log_size;
early_log_buf_off += log_size;
}
static void vprint_on_level(unsigned int loglevel, const char *format, va_list params)
{
int fd, size, ret, off = 0;
int _errno = errno;
if (unlikely(loglevel == LOG_MSG)) {
fd = STDOUT_FILENO;
off = buf_off; /* skip dangling timestamp */
} else {
/*
* If logging has not yet been initialized (init_done == 0)
* make sure all messages are written to the early_log_buffer.
*/
if (!init_done) {
early_vprint(format, loglevel, params);
return;
}
if (loglevel > current_loglevel)
return;
fd = log_get_fd();
if (current_loglevel >= LOG_TIMESTAMP)
print_ts();
}
size = vsnprintf(buffer + buf_off, sizeof buffer - buf_off, format, params);
size += buf_off;
while (off < size) {
ret = write(fd, buffer + off, size - off);
if (ret <= 0)
break;
off += ret;
}
/* This is missing for messages in the early_log_buffer. */
if (loglevel == LOG_ERROR)
log_note_err(buffer + buf_off);
errno = _errno;
}
void print_on_level(unsigned int loglevel, const char *format, ...)
{
va_list params;
va_start(params, format);
vprint_on_level(loglevel, format, params);
va_end(params);
}
int write_pidfile(int pid)
{
int fd, ret, exit_code = -1;
fd = open(opts.pidfile, O_WRONLY | O_EXCL | O_CREAT, 0600);
if (fd == -1) {
pr_perror("pidfile: Can't open %s", opts.pidfile);
return -1;
}
ret = dprintf(fd, "%d", pid);
if (ret < 0) {
pr_perror("pidfile: Can't write pid %d to %s", pid, opts.pidfile);
goto close;
}
if (ret == 0) {
pr_err("pidfile: Can't write pid %d to %s\n", pid, opts.pidfile);
goto close;
}
pr_debug("pidfile: Wrote pid %d to %s (%d bytes)\n", pid, opts.pidfile, ret);
exit_code = 0;
close:
close(fd);
return exit_code;
}
| 9,386 | 21.035211 | 106 |
c
|
criu
|
criu-master/criu/memfd.c
|
#include <unistd.h>
#include <linux/memfd.h>
#include "common/compiler.h"
#include "common/lock.h"
#include "memfd.h"
#include "fdinfo.h"
#include "imgset.h"
#include "image.h"
#include "util.h"
#include "log.h"
#include "files.h"
#include "fs-magic.h"
#include "kerndat.h"
#include "files-reg.h"
#include "rst-malloc.h"
#include "fdstore.h"
#include "file-ids.h"
#include "namespaces.h"
#include "shmem.h"
#include "hugetlb.h"
#include "protobuf.h"
#include "images/memfd.pb-c.h"
#define MEMFD_PREFIX "/memfd:"
#define MEMFD_PREFIX_LEN (sizeof(MEMFD_PREFIX) - 1)
#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
#define F_SEAL_WRITE 0x0008 /* prevent writes */
/* Linux 5.1+ */
#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
struct memfd_dump_inode {
struct list_head list;
u32 id;
u32 dev;
u32 ino;
};
struct memfd_restore_inode {
struct list_head list;
mutex_t lock;
int fdstore_id;
unsigned int pending_seals;
MemfdInodeEntry *mie;
};
static LIST_HEAD(memfd_inodes);
/*
* Dump only
*/
static u32 memfd_inode_ids = 1;
int is_memfd(dev_t dev)
{
return dev == kdat.shmem_dev;
}
static int dump_memfd_inode(int fd, struct memfd_dump_inode *inode, const char *name, const struct stat *st)
{
MemfdInodeEntry mie = MEMFD_INODE_ENTRY__INIT;
int ret = -1, flag;
u32 shmid;
/*
* shmids are chosen as the inode number of the corresponding mmapped
* file. See handle_vma() in proc_parse.c.
* It works for memfd too, because we share the same device as the
* shmem device.
*/
shmid = inode->ino;
pr_info("Dumping memfd:%s contents (id %#x, shmid: %#x, size: %" PRIu64 ")\n", name, inode->id, shmid,
st->st_size);
if (dump_one_memfd_shmem(fd, shmid, st->st_size) < 0)
goto out;
mie.inode_id = inode->id;
mie.uid = userns_uid(st->st_uid);
mie.gid = userns_gid(st->st_gid);
mie.name = (char *)name;
mie.size = st->st_size;
mie.shmid = shmid;
if (is_hugetlb_dev(inode->dev, &flag)) {
mie.has_hugetlb_flag = true;
mie.hugetlb_flag = flag | MFD_HUGETLB;
}
mie.seals = fcntl(fd, F_GET_SEALS);
if (mie.seals == -1)
goto out;
if (pb_write_one(img_from_set(glob_imgset, CR_FD_MEMFD_INODE), &mie, PB_MEMFD_INODE))
goto out;
ret = 0;
out:
return ret;
}
static struct memfd_dump_inode *dump_unique_memfd_inode(int lfd, const char *name, const struct stat *st)
{
struct memfd_dump_inode *inode;
int fd;
list_for_each_entry(inode, &memfd_inodes, list)
if ((inode->dev == st->st_dev) && (inode->ino == st->st_ino))
return inode;
inode = xmalloc(sizeof(*inode));
if (inode == NULL)
return NULL;
inode->dev = st->st_dev;
inode->ino = st->st_ino;
inode->id = memfd_inode_ids++;
fd = open_proc(PROC_SELF, "fd/%d", lfd);
if (fd < 0) {
xfree(inode);
return NULL;
}
if (dump_memfd_inode(fd, inode, name, st)) {
close(fd);
xfree(inode);
return NULL;
}
close(fd);
list_add_tail(&inode->list, &memfd_inodes);
return inode;
}
static int dump_one_memfd(int lfd, u32 id, const struct fd_parms *p)
{
MemfdFileEntry mfe = MEMFD_FILE_ENTRY__INIT;
FileEntry fe = FILE_ENTRY__INIT;
struct memfd_dump_inode *inode;
struct fd_link _link, *link;
const char *name;
if (!p->link) {
if (fill_fdlink(lfd, p, &_link))
return -1;
link = &_link;
} else
link = p->link;
link_strip_deleted(link);
/* link->name is always started with "." which has to be skipped. */
if (strncmp(link->name + 1, MEMFD_PREFIX, MEMFD_PREFIX_LEN) == 0)
name = &link->name[1 + MEMFD_PREFIX_LEN];
else
name = link->name + 1;
inode = dump_unique_memfd_inode(lfd, name, &p->stat);
if (!inode)
return -1;
mfe.id = id;
mfe.flags = p->flags;
mfe.pos = p->pos;
mfe.fown = (FownEntry *)&p->fown;
mfe.inode_id = inode->id;
fe.type = FD_TYPES__MEMFD;
fe.id = mfe.id;
fe.memfd = &mfe;
return pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE);
}
int dump_one_memfd_cond(int lfd, u32 *id, struct fd_parms *parms)
{
if (fd_id_generate_special(parms, id))
return dump_one_memfd(lfd, *id, parms);
return 0;
}
const struct fdtype_ops memfd_dump_ops = {
.type = FD_TYPES__MEMFD,
.dump = dump_one_memfd,
};
/*
* Restore only
*/
struct memfd_info {
MemfdFileEntry *mfe;
struct file_desc d;
struct memfd_restore_inode *inode;
};
static struct memfd_restore_inode *memfd_alloc_inode(int id)
{
struct memfd_restore_inode *inode;
list_for_each_entry(inode, &memfd_inodes, list)
if (inode->mie->inode_id == id)
return inode;
pr_err("Unable to find the %d memfd inode\n", id);
return NULL;
}
static int collect_one_memfd_inode(void *o, ProtobufCMessage *base, struct cr_img *i)
{
MemfdInodeEntry *mie = pb_msg(base, MemfdInodeEntry);
struct memfd_restore_inode *inode = o;
inode->mie = mie;
mutex_init(&inode->lock);
inode->fdstore_id = -1;
inode->pending_seals = 0;
list_add_tail(&inode->list, &memfd_inodes);
return 0;
}
static struct collect_image_info memfd_inode_cinfo = {
.fd_type = CR_FD_MEMFD_INODE,
.pb_type = PB_MEMFD_INODE,
.priv_size = sizeof(struct memfd_restore_inode),
.collect = collect_one_memfd_inode,
.flags = COLLECT_SHARED | COLLECT_NOFREE,
};
int prepare_memfd_inodes(void)
{
return collect_image(&memfd_inode_cinfo);
}
static int memfd_open_inode_nocache(struct memfd_restore_inode *inode)
{
MemfdInodeEntry *mie = NULL;
int fd = -1;
int ret = -1;
int flags;
mie = inode->mie;
if (mie->seals == F_SEAL_SEAL) {
inode->pending_seals = 0;
flags = 0;
} else {
/* Seals are applied later due to F_SEAL_FUTURE_WRITE */
inode->pending_seals = mie->seals;
flags = MFD_ALLOW_SEALING;
}
if (mie->has_hugetlb_flag)
flags |= mie->hugetlb_flag;
fd = memfd_create(mie->name, flags);
if (fd < 0) {
pr_perror("Can't create memfd:%s", mie->name);
goto out;
}
if (restore_memfd_shmem_content(fd, mie->shmid, mie->size))
goto out;
if (fchown(fd, mie->uid, mie->gid)) {
pr_perror("Can't change uid %d gid %d of memfd:%s", (int)mie->uid, (int)mie->gid, mie->name);
goto out;
}
inode->fdstore_id = fdstore_add(fd);
if (inode->fdstore_id < 0)
goto out;
ret = fd;
fd = -1;
out:
if (fd != -1)
close(fd);
return ret;
}
static int memfd_open_inode(struct memfd_restore_inode *inode)
{
int fd;
if (inode->fdstore_id != -1)
return fdstore_get(inode->fdstore_id);
mutex_lock(&inode->lock);
if (inode->fdstore_id != -1)
fd = fdstore_get(inode->fdstore_id);
else
fd = memfd_open_inode_nocache(inode);
mutex_unlock(&inode->lock);
return fd;
}
int memfd_open(struct file_desc *d, u32 *fdflags)
{
struct memfd_info *mfi;
MemfdFileEntry *mfe;
int fd, _fd;
u32 flags;
mfi = container_of(d, struct memfd_info, d);
mfe = mfi->mfe;
if (inherited_fd(d, &fd))
return fd;
pr_info("Restoring memfd id=%d\n", mfe->id);
fd = memfd_open_inode(mfi->inode);
if (fd < 0)
goto err;
/* Reopen the fd with original permissions */
flags = fdflags ? *fdflags : mfe->flags;
/*
* Ideally we should call compat version open() to not force the
* O_LARGEFILE file flag with regular open(). It doesn't seem that
* important though.
*/
_fd = __open_proc(PROC_SELF, 0, flags, "fd/%d", fd);
if (_fd < 0) {
pr_perror("Can't reopen memfd id=%d", mfe->id);
goto err;
}
close(fd);
fd = _fd;
if (restore_fown(fd, mfe->fown) < 0)
goto err;
if (lseek(fd, mfe->pos, SEEK_SET) < 0) {
pr_perror("Can't restore file position of memfd id=%d", mfe->id);
goto err;
}
return fd;
err:
if (fd >= 0)
close(fd);
return -1;
}
static int memfd_open_fe_fd(struct file_desc *fd, int *new_fd)
{
int tmp;
tmp = memfd_open(fd, NULL);
if (tmp < 0)
return -1;
*new_fd = tmp;
return 0;
}
static char *memfd_d_name(struct file_desc *d, char *buf, size_t s)
{
MemfdInodeEntry *mie = NULL;
struct memfd_info *mfi;
mfi = container_of(d, struct memfd_info, d);
mie = mfi->inode->mie;
if (snprintf(buf, s, "%s%s", MEMFD_PREFIX, mie->name) >= s) {
pr_err("Buffer too small for memfd name %s\n", mie->name);
return NULL;
}
return buf;
}
static struct file_desc_ops memfd_desc_ops = {
.type = FD_TYPES__MEMFD,
.open = memfd_open_fe_fd,
.name = memfd_d_name,
};
static int collect_one_memfd(void *o, ProtobufCMessage *msg, struct cr_img *i)
{
struct memfd_info *info = o;
info->mfe = pb_msg(msg, MemfdFileEntry);
info->inode = memfd_alloc_inode(info->mfe->inode_id);
if (!info->inode)
return -1;
return file_desc_add(&info->d, info->mfe->id, &memfd_desc_ops);
}
struct collect_image_info memfd_cinfo = {
.fd_type = CR_FD_MEMFD_FILE,
.pb_type = PB_MEMFD_FILE,
.priv_size = sizeof(struct memfd_info),
.collect = collect_one_memfd,
};
struct file_desc *collect_memfd(u32 id)
{
struct file_desc *fdesc;
fdesc = find_file_desc_raw(FD_TYPES__MEMFD, id);
if (fdesc == NULL)
pr_err("No entry for memfd %#x\n", id);
return fdesc;
}
int apply_memfd_seals(void)
{
/*
* We apply the seals after all the mappings are done because the seal
* F_SEAL_FUTURE_WRITE prevents future write access (added in
* Linux 5.1). Thus we must make sure all writable mappings are opened
* before applying this seal.
*/
int ret, fd;
struct memfd_restore_inode *inode;
list_for_each_entry(inode, &memfd_inodes, list) {
if (!inode->pending_seals)
continue;
fd = memfd_open_inode(inode);
if (fd < 0)
return -1;
ret = fcntl(fd, F_ADD_SEALS, inode->pending_seals);
close(fd);
if (ret < 0) {
pr_perror("Cannot apply seals on memfd");
return -1;
}
}
return 0;
}
| 9,579 | 20.19469 | 108 |
c
|
criu
|
criu-master/criu/netfilter.c
|
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <string.h>
#include <sys/wait.h>
#include <stdlib.h>
#if defined(CONFIG_HAS_NFTABLES_LIB_API_0) || defined(CONFIG_HAS_NFTABLES_LIB_API_1)
#include <nftables/libnftables.h>
#endif
#include "../soccr/soccr.h"
#include "util.h"
#include "common/list.h"
#include "files.h"
#include "netfilter.h"
#include "sockets.h"
#include "sk-inet.h"
#include "kerndat.h"
#include "pstree.h"
static char buf[512];
#define NFTABLES_CONN_CMD "add element %s conns%c { %s . %d . %s . %d }"
/*
* Need to configure simple netfilter rules for blocking connections
* Any brave soul to write it using xtables-devel?
*/
#define IPTABLES_CONN_CMD \
"%s %s -t filter %s %s --protocol tcp " \
"-m mark ! --mark " __stringify(SOCCR_MARK) " --source %s --sport %d --destination %s --dport %d -j DROP"
static char iptable_cmd_ipv4[] = "iptables";
static char iptable_cmd_ipv6[] = "ip6tables";
void preload_netfilter_modules(void)
{
int fd = -1;
/* same as socket modules, ip_tables and ip6_tables will be loaded by
* CRIU, so we should try and preload these as well.
*/
fd = open("/dev/null", O_RDWR);
if (fd < 0) {
fd = -1;
pr_perror("failed to open /dev/null, using log fd for net module preload");
}
cr_system(fd, fd, fd, iptable_cmd_ipv4, (char *[]){ iptable_cmd_ipv4, "-L", "-n", NULL }, 0);
cr_system(fd, fd, fd, iptable_cmd_ipv6, (char *[]){ iptable_cmd_ipv6, "-L", "-n", NULL }, 0);
close_safe(&fd);
}
/* IPv4-Mapped IPv6 Addresses */
static int ipv6_addr_mapped(u32 *addr)
{
return (addr[2] == htonl(0x0000ffff));
}
static int iptables_connection_switch_raw(int family, u32 *src_addr, u16 src_port, u32 *dst_addr, u16 dst_port,
bool input, bool lock)
{
char sip[INET_ADDR_LEN], dip[INET_ADDR_LEN];
char *cmd;
char *argv[4] = { "sh", "-c", buf, NULL };
int ret;
if (family == AF_INET6 && ipv6_addr_mapped(dst_addr)) {
family = AF_INET;
src_addr = &src_addr[3];
dst_addr = &dst_addr[3];
}
switch (family) {
case AF_INET:
cmd = iptable_cmd_ipv4;
break;
case AF_INET6:
cmd = iptable_cmd_ipv6;
break;
default:
pr_err("Unknown socket family %d\n", family);
return -1;
};
if (!inet_ntop(family, (void *)src_addr, sip, INET_ADDR_LEN) ||
!inet_ntop(family, (void *)dst_addr, dip, INET_ADDR_LEN)) {
pr_perror("nf: Can't translate ip addr");
return -1;
}
snprintf(buf, sizeof(buf), IPTABLES_CONN_CMD, cmd, kdat.has_xtlocks ? "-w" : "", lock ? "-I" : "-D",
input ? "INPUT" : "OUTPUT", dip, (int)dst_port, sip, (int)src_port);
pr_debug("\tRunning iptables [%s]\n", buf);
/*
* cr_system is used here, because it blocks SIGCHLD before waiting
* a child and the child can't be waited from SIGCHLD handler.
*/
ret = cr_system(-1, -1, -1, "sh", argv, 0);
if (ret < 0 || !WIFEXITED(ret) || WEXITSTATUS(ret)) {
pr_err("Iptables configuration failed\n");
return -1;
}
pr_info("%s %s:%d - %s:%d connection\n", lock ? "Locked" : "Unlocked", sip, (int)src_port, dip, (int)dst_port);
return 0;
}
static int iptables_connection_switch(struct inet_sk_desc *sk, bool lock)
{
int ret = 0;
ret = iptables_connection_switch_raw(sk->sd.family, sk->src_addr, sk->src_port, sk->dst_addr, sk->dst_port,
true, lock);
if (ret)
return -1;
ret = iptables_connection_switch_raw(sk->sd.family, sk->dst_addr, sk->dst_port, sk->src_addr, sk->src_port,
false, lock);
if (ret) /* rollback */
iptables_connection_switch_raw(sk->sd.family, sk->src_addr, sk->src_port, sk->dst_addr, sk->dst_port,
true, !lock);
return ret;
}
int iptables_lock_connection(struct inet_sk_desc *sk)
{
return iptables_connection_switch(sk, true);
}
int iptables_unlock_connection(struct inet_sk_desc *sk)
{
return iptables_connection_switch(sk, false);
}
int iptables_unlock_connection_info(struct inet_sk_info *si)
{
int ret = 0;
ret |= iptables_connection_switch_raw(si->ie->family, si->ie->src_addr, si->ie->src_port, si->ie->dst_addr,
si->ie->dst_port, true, false);
ret |= iptables_connection_switch_raw(si->ie->family, si->ie->dst_addr, si->ie->dst_port, si->ie->src_addr,
si->ie->src_port, false, false);
/*
* rollback nothing in case of any error,
* because nobody checks errors of this function
*/
return ret;
}
int nftables_init_connection_lock(void)
{
#if defined(CONFIG_HAS_NFTABLES_LIB_API_0) || defined(CONFIG_HAS_NFTABLES_LIB_API_1)
struct nft_ctx *nft;
int ret = 0;
char table[32];
if (nftables_get_table(table, sizeof(table)))
return -1;
nft = nft_ctx_new(NFT_CTX_DEFAULT);
if (!nft)
return -1;
snprintf(buf, sizeof(buf), "create table %s", table);
if (NFT_RUN_CMD(nft, buf))
goto err2;
snprintf(buf, sizeof(buf), "add chain %s output { type filter hook output priority 0; }", table);
if (NFT_RUN_CMD(nft, buf))
goto err1;
snprintf(buf, sizeof(buf), "add rule %s output meta mark " __stringify(SOCCR_MARK) " accept", table);
if (NFT_RUN_CMD(nft, buf))
goto err1;
snprintf(buf, sizeof(buf), "add chain %s input { type filter hook input priority 0; }", table);
if (NFT_RUN_CMD(nft, buf))
goto err1;
snprintf(buf, sizeof(buf), "add rule %s input meta mark " __stringify(SOCCR_MARK) " accept", table);
if (NFT_RUN_CMD(nft, buf))
goto err1;
/* IPv4 */
snprintf(buf, sizeof(buf), "add set %s conns4 { type ipv4_addr . inet_service . ipv4_addr . inet_service; }",
table);
if (NFT_RUN_CMD(nft, buf))
goto err1;
snprintf(buf, sizeof(buf), "add rule %s output ip saddr . tcp sport . ip daddr . tcp dport @conns4 drop",
table);
if (NFT_RUN_CMD(nft, buf))
goto err1;
snprintf(buf, sizeof(buf), "add rule %s input ip saddr . tcp sport . ip daddr . tcp dport @conns4 drop", table);
if (NFT_RUN_CMD(nft, buf))
goto err1;
/* IPv6 */
snprintf(buf, sizeof(buf), "add set %s conns6 { type ipv6_addr . inet_service . ipv6_addr . inet_service; }",
table);
if (NFT_RUN_CMD(nft, buf))
goto err1;
snprintf(buf, sizeof(buf), "add rule %s output ip6 saddr . tcp sport . ip6 daddr . tcp dport @conns6 drop",
table);
if (NFT_RUN_CMD(nft, buf))
goto err1;
snprintf(buf, sizeof(buf), "add rule %s input ip6 saddr . tcp sport . ip6 daddr . tcp dport @conns6 drop",
table);
if (NFT_RUN_CMD(nft, buf))
goto err1;
goto out;
err1:
snprintf(buf, sizeof(buf), "delete table %s", table);
NFT_RUN_CMD(nft, buf);
pr_err("Locking network failed using nftables\n");
err2:
ret = -1;
out:
nft_ctx_free(nft);
return ret;
#else
pr_err("CRIU was built without libnftables support\n");
return -1;
#endif
}
static int nftables_lock_connection_raw(int family, u32 *src_addr, u16 src_port, u32 *dst_addr, u16 dst_port)
{
#if defined(CONFIG_HAS_NFTABLES_LIB_API_0) || defined(CONFIG_HAS_NFTABLES_LIB_API_1)
struct nft_ctx *nft;
int ret = 0;
char sip[INET_ADDR_LEN], dip[INET_ADDR_LEN];
char table[32];
if (nftables_get_table(table, sizeof(table)))
return -1;
if (family == AF_INET6 && ipv6_addr_mapped(dst_addr)) {
family = AF_INET;
src_addr = &src_addr[3];
dst_addr = &dst_addr[3];
}
if (!inet_ntop(family, (void *)src_addr, sip, INET_ADDR_LEN)) {
pr_perror("nf: Can't convert src ip addr");
return -1;
}
if (!inet_ntop(family, (void *)dst_addr, dip, INET_ADDR_LEN)) {
pr_perror("nf: Can't convert dst ip addr");
return -1;
}
nft = nft_ctx_new(NFT_CTX_DEFAULT);
if (!nft)
return -1;
snprintf(buf, sizeof(buf), NFTABLES_CONN_CMD, table, family == AF_INET ? '4' : '6', dip, (int)dst_port, sip,
(int)src_port);
pr_debug("\tRunning nftables [%s]\n", buf);
if (NFT_RUN_CMD(nft, buf)) {
ret = -1;
pr_err("Locking connection failed using nftables\n");
}
nft_ctx_free(nft);
return ret;
#else
pr_err("CRIU was built without libnftables support\n");
return -1;
#endif
}
int nftables_lock_connection(struct inet_sk_desc *sk)
{
int ret = 0;
ret = nftables_lock_connection_raw(sk->sd.family, sk->src_addr, sk->src_port, sk->dst_addr, sk->dst_port);
if (ret)
return -1;
ret = nftables_lock_connection_raw(sk->sd.family, sk->dst_addr, sk->dst_port, sk->src_addr, sk->src_port);
return ret;
}
int nftables_get_table(char *table, int n)
{
if (snprintf(table, n, "inet CRIU-%d", root_item->pid->real) < 0) {
pr_err("Cannot generate CRIU's nftables table name\n");
return -1;
}
return 0;
}
| 8,308 | 25.977273 | 113 |
c
|
criu
|
criu-master/criu/page-pipe.c
|
#include <unistd.h>
#undef LOG_PREFIX
#define LOG_PREFIX "page-pipe: "
#include "common/config.h"
#include "page.h"
#include "util.h"
#include "criu-log.h"
#include "page-pipe.h"
#include "fcntl.h"
#include "stats.h"
#include "cr_options.h"
/* can existing iov accumulate the page? */
static inline bool iov_grow_page(struct iovec *iov, unsigned long addr)
{
if ((unsigned long)iov->iov_base + iov->iov_len == addr) {
iov->iov_len += PAGE_SIZE;
return true;
}
return false;
}
static inline void iov_init(struct iovec *iov, unsigned long addr)
{
iov->iov_base = (void *)addr;
iov->iov_len = PAGE_SIZE;
}
static int __ppb_resize_pipe(struct page_pipe_buf *ppb, unsigned long new_size)
{
int ret;
ret = fcntl(ppb->p[0], F_SETPIPE_SZ, new_size * PAGE_SIZE);
if (ret < 0)
return -1;
ret /= PAGE_SIZE;
BUG_ON(ret < ppb->pipe_size);
pr_debug("Grow pipe %x -> %x\n", ppb->pipe_size, ret);
ppb->pipe_size = ret;
return 0;
}
static inline int ppb_resize_pipe(struct page_pipe_buf *ppb)
{
unsigned long new_size = ppb->pipe_size << 1;
int ret;
if (ppb->pages_in + ppb->pipe_off < ppb->pipe_size)
return 0;
if (new_size > PIPE_MAX_SIZE) {
if (ppb->pipe_size < PIPE_MAX_SIZE)
new_size = PIPE_MAX_SIZE;
else
return 1;
}
ret = __ppb_resize_pipe(ppb, new_size);
if (ret < 0)
return 1; /* need to add another buf */
return 0;
}
static struct page_pipe_buf *pp_prev_ppb(struct page_pipe *pp, unsigned int ppb_flags)
{
int type = 0;
/* don't allow to reuse a pipe in the PP_CHUNK_MODE mode */
if (pp->flags & PP_CHUNK_MODE)
return NULL;
if (list_empty(&pp->bufs))
return NULL;
if (ppb_flags & PPB_LAZY && opts.lazy_pages)
type = 1;
return pp->prev[type];
}
static void pp_update_prev_ppb(struct page_pipe *pp, struct page_pipe_buf *ppb, unsigned int ppb_flags)
{
int type = 0;
if (ppb_flags & PPB_LAZY && opts.lazy_pages)
type = 1;
pp->prev[type] = ppb;
}
static struct page_pipe_buf *ppb_alloc(struct page_pipe *pp, unsigned int ppb_flags)
{
struct page_pipe_buf *prev = pp_prev_ppb(pp, ppb_flags);
struct page_pipe_buf *ppb;
ppb = xmalloc(sizeof(*ppb));
if (!ppb)
return NULL;
cnt_add(CNT_PAGE_PIPE_BUFS, 1);
if (prev && ppb_resize_pipe(prev) == 0) {
/* The previous pipe isn't full and we can continue to use it. */
ppb->p[0] = prev->p[0];
ppb->p[1] = prev->p[1];
ppb->pipe_off = prev->pages_in + prev->pipe_off;
ppb->pipe_size = prev->pipe_size;
} else {
if (pipe(ppb->p)) {
xfree(ppb);
pr_perror("Can't make pipe for page-pipe");
return NULL;
}
cnt_add(CNT_PAGE_PIPES, 1);
ppb->pipe_off = 0;
ppb->pipe_size = fcntl(ppb->p[0], F_GETPIPE_SZ, 0) / PAGE_SIZE;
pp->nr_pipes++;
}
list_add_tail(&ppb->l, &pp->bufs);
pp_update_prev_ppb(pp, ppb, ppb_flags);
return ppb;
}
static void ppb_destroy(struct page_pipe_buf *ppb)
{
/* Check whether a pipe is shared with another ppb */
if (ppb->pipe_off == 0) {
close(ppb->p[0]);
close(ppb->p[1]);
}
xfree(ppb);
}
static void ppb_init(struct page_pipe_buf *ppb, unsigned int pages_in, unsigned int nr_segs, unsigned int flags,
struct iovec *iov)
{
ppb->pages_in = pages_in;
ppb->nr_segs = nr_segs;
ppb->flags = flags;
ppb->iov = iov;
}
static int page_pipe_grow(struct page_pipe *pp, unsigned int flags)
{
struct page_pipe_buf *ppb;
struct iovec *free_iov;
pr_debug("Will grow page pipe (iov off is %u)\n", pp->free_iov);
if (!list_empty(&pp->free_bufs)) {
ppb = list_first_entry(&pp->free_bufs, struct page_pipe_buf, l);
list_move_tail(&ppb->l, &pp->bufs);
goto out;
}
if ((pp->flags & PP_CHUNK_MODE) && (pp->nr_pipes == NR_PIPES_PER_CHUNK))
return -EAGAIN;
ppb = ppb_alloc(pp, flags);
if (!ppb)
return -1;
out:
free_iov = &pp->iovs[pp->free_iov];
ppb_init(ppb, 0, 0, flags, free_iov);
return 0;
}
struct page_pipe *create_page_pipe(unsigned int nr_segs, struct iovec *iovs, unsigned flags)
{
struct page_pipe *pp;
pr_debug("Create page pipe for %u segs\n", nr_segs);
pp = xzalloc(sizeof(*pp));
if (!pp)
return NULL;
INIT_LIST_HEAD(&pp->free_bufs);
INIT_LIST_HEAD(&pp->bufs);
pp->nr_iovs = nr_segs;
pp->flags = flags;
if (!iovs) {
iovs = xmalloc(sizeof(*iovs) * nr_segs);
if (!iovs)
goto err_free_pp;
pp->flags |= PP_OWN_IOVS;
}
pp->iovs = iovs;
if (page_pipe_grow(pp, 0))
goto err_free_iovs;
return pp;
err_free_iovs:
if (pp->flags & PP_OWN_IOVS)
xfree(iovs);
err_free_pp:
xfree(pp);
return NULL;
}
void destroy_page_pipe(struct page_pipe *pp)
{
struct page_pipe_buf *ppb, *n;
pr_debug("Killing page pipe\n");
list_splice(&pp->free_bufs, &pp->bufs);
list_for_each_entry_safe(ppb, n, &pp->bufs, l)
ppb_destroy(ppb);
if (pp->flags & PP_OWN_IOVS)
xfree(pp->iovs);
xfree(pp);
}
void page_pipe_reinit(struct page_pipe *pp)
{
struct page_pipe_buf *ppb, *n;
BUG_ON(!(pp->flags & PP_CHUNK_MODE));
pr_debug("Clean up page pipe\n");
list_for_each_entry_safe(ppb, n, &pp->bufs, l)
list_move(&ppb->l, &pp->free_bufs);
pp->free_hole = 0;
if (page_pipe_grow(pp, 0))
BUG(); /* It can't fail, because ppb is in free_bufs */
}
static inline int try_add_page_to(struct page_pipe *pp, struct page_pipe_buf *ppb, unsigned long addr,
unsigned int flags)
{
if (ppb->flags != flags)
return 1;
if (ppb_resize_pipe(ppb) == 1)
return 1;
if (ppb->nr_segs && iov_grow_page(&ppb->iov[ppb->nr_segs - 1], addr))
goto out;
pr_debug("Add iov to page pipe (%u iovs, %u/%u total)\n", ppb->nr_segs, pp->free_iov, pp->nr_iovs);
iov_init(&ppb->iov[ppb->nr_segs++], addr);
pp->free_iov++;
BUG_ON(pp->free_iov > pp->nr_iovs);
out:
ppb->pages_in++;
return 0;
}
static inline int try_add_page(struct page_pipe *pp, unsigned long addr, unsigned int flags)
{
BUG_ON(list_empty(&pp->bufs));
return try_add_page_to(pp, list_entry(pp->bufs.prev, struct page_pipe_buf, l), addr, flags);
}
int page_pipe_add_page(struct page_pipe *pp, unsigned long addr, unsigned int flags)
{
int ret;
ret = try_add_page(pp, addr, flags);
if (ret <= 0)
return ret;
ret = page_pipe_grow(pp, flags);
if (ret < 0)
return ret;
ret = try_add_page(pp, addr, flags);
BUG_ON(ret > 0);
return ret;
}
#define PP_HOLES_BATCH 32
int page_pipe_add_hole(struct page_pipe *pp, unsigned long addr, unsigned int flags)
{
if (pp->free_hole >= pp->nr_holes) {
size_t new_size = (pp->nr_holes + PP_HOLES_BATCH) * sizeof(struct iovec);
if (xrealloc_safe(&pp->holes, new_size))
return -1;
new_size = (pp->nr_holes + PP_HOLES_BATCH) * sizeof(unsigned int);
if (xrealloc_safe(&pp->hole_flags, new_size))
return -1;
pp->nr_holes += PP_HOLES_BATCH;
}
if (pp->free_hole && pp->hole_flags[pp->free_hole - 1] == flags &&
iov_grow_page(&pp->holes[pp->free_hole - 1], addr))
goto out;
iov_init(&pp->holes[pp->free_hole++], addr);
pp->hole_flags[pp->free_hole - 1] = flags;
out:
return 0;
}
/*
* Get ppb and iov that contain addr and count amount of data between
* beginning of the pipe belonging to the ppb and addr
*/
static struct page_pipe_buf *get_ppb(struct page_pipe *pp, unsigned long addr, struct iovec **iov_ret,
unsigned long *len)
{
struct page_pipe_buf *ppb;
int i;
list_for_each_entry(ppb, &pp->bufs, l) {
for (i = 0, *len = 0; i < ppb->nr_segs; i++) {
struct iovec *iov = &ppb->iov[i];
unsigned long base = (unsigned long)iov->iov_base;
if (addr < base || addr >= base + iov->iov_len) {
*len += iov->iov_len;
continue;
}
/* got iov that contains the addr */
*len += (addr - base);
*iov_ret = iov;
list_move(&ppb->l, &pp->bufs);
return ppb;
}
}
return NULL;
}
int pipe_read_dest_init(struct pipe_read_dest *prd)
{
int ret;
if (pipe(prd->p)) {
pr_perror("Cannot create pipe for reading from page-pipe");
return -1;
}
ret = fcntl(prd->p[0], F_SETPIPE_SZ, PIPE_MAX_SIZE * PAGE_SIZE);
if (ret < 0)
return -1;
prd->sink_fd = open("/dev/null", O_WRONLY);
if (prd->sink_fd < 0) {
pr_perror("Cannot open sink for reading from page-pipe");
return -1;
}
ret = fcntl(prd->p[0], F_GETPIPE_SZ, 0);
pr_debug("Created tee pipe size %d\n", ret);
return 0;
}
int page_pipe_read(struct page_pipe *pp, struct pipe_read_dest *prd, unsigned long addr, unsigned int *nr_pages,
unsigned int ppb_flags)
{
struct page_pipe_buf *ppb;
struct iovec *iov = NULL;
unsigned long skip = 0, len;
ssize_t ret;
/*
* Get ppb that contains addr and count length of data between
* the beginning of the pipe and addr. If no ppb is found, the
* requested page is mapped to zero pfn
*/
ppb = get_ppb(pp, addr, &iov, &skip);
if (!ppb) {
*nr_pages = 0;
return 0;
}
if (!(ppb->flags & ppb_flags)) {
pr_err("PPB flags mismatch: %x %x\n", ppb_flags, ppb->flags);
return false;
}
/* clamp the request if it passes the end of iovec */
len = min((unsigned long)iov->iov_base + iov->iov_len - addr, (unsigned long)(*nr_pages) * PAGE_SIZE);
*nr_pages = len / PAGE_SIZE;
skip += ppb->pipe_off * PAGE_SIZE;
/* we should tee() the requested length + the beginning of the pipe */
len += skip;
ret = tee(ppb->p[0], prd->p[1], len, 0);
if (ret != len) {
pr_perror("tee: %zd", ret);
return -1;
}
ret = splice(prd->p[0], NULL, prd->sink_fd, NULL, skip, 0);
if (ret != skip) {
pr_perror("splice: %zd", ret);
return -1;
}
return 0;
}
void page_pipe_destroy_ppb(struct page_pipe_buf *ppb)
{
list_del(&ppb->l);
ppb_destroy(ppb);
}
void debug_show_page_pipe(struct page_pipe *pp)
{
struct page_pipe_buf *ppb;
int i;
struct iovec *iov;
if (pr_quelled(LOG_DEBUG))
return;
pr_debug("Page pipe:\n");
pr_debug("* %u pipes %u/%u iovs:\n", pp->nr_pipes, pp->free_iov, pp->nr_iovs);
list_for_each_entry(ppb, &pp->bufs, l) {
pr_debug("\tbuf %u pages, %u iovs, flags: %x pipe_off: %x :\n", ppb->pages_in, ppb->nr_segs, ppb->flags,
ppb->pipe_off);
for (i = 0; i < ppb->nr_segs; i++) {
iov = &ppb->iov[i];
pr_debug("\t\t%p %lu\n", iov->iov_base, iov->iov_len / PAGE_SIZE);
}
}
pr_debug("* %u holes:\n", pp->free_hole);
for (i = 0; i < pp->free_hole; i++) {
iov = &pp->holes[i];
pr_debug("\t%p %lu\n", iov->iov_base, iov->iov_len / PAGE_SIZE);
}
}
| 10,130 | 21.217105 | 112 |
c
|
criu
|
criu-master/criu/pagemap-cache.c
|
#include <unistd.h>
#include <fcntl.h>
#include "page.h"
#include "pagemap-cache.h"
#include "common/compiler.h"
#include "xmalloc.h"
#include "util.h"
#include "log.h"
#include "vma.h"
#include "mem.h"
#include "kerndat.h"
#undef LOG_PREFIX
#define LOG_PREFIX "pagemap-cache: "
/* To carry up to 2M of physical memory */
#define PMC_SHIFT (21)
#define PMC_SIZE (1ul << PMC_SHIFT)
#define PMC_MASK (~(PMC_SIZE - 1))
#define PMC_SIZE_GAP (PMC_SIZE / 4)
#define PAGEMAP_LEN(addr) (PAGE_PFN(addr) * sizeof(u64))
/*
* It's a workaround for a kernel bug. In the 3.19 kernel when pagemap are read
* for a few vma-s for one read call, it returns incorrect data.
* https://github.com/checkpoint-restore/criu/issues/207
*/
static bool pagemap_cache_disabled;
static inline void pmc_reset(pmc_t *pmc)
{
memzero(pmc, sizeof(*pmc));
pmc->fd = -1;
}
static inline void pmc_zap(pmc_t *pmc)
{
pmc->start = pmc->end = 0;
}
int pmc_init(pmc_t *pmc, pid_t pid, const struct list_head *vma_head, size_t size)
{
size_t map_size = max(size, (size_t)PMC_SIZE);
pmc_reset(pmc);
BUG_ON(!vma_head);
pmc->pid = pid;
pmc->map_len = PAGEMAP_LEN(map_size);
pmc->vma_head = vma_head;
pmc->map = xmalloc(pmc->map_len);
if (!pmc->map)
goto err;
if (pagemap_cache_disabled)
pr_warn_once("The pagemap cache is disabled\n");
if (kdat.pmap == PM_DISABLED) {
/*
* FIXME We might need to implement greedy
* mode via reading all pages available inside
* parasite.
*
* Actually since linux-4.4 the pagemap file
* is available for usernamespace with hiding
* PFNs but providing page attributes, so other
* option simply require kernel 4.4 and above
* for usernamespace support.
*/
pr_err("No pagemap for %d available\n", pid);
goto err;
} else {
pmc->fd = open_proc(pid, "pagemap");
if (pmc->fd < 0)
goto err;
}
pr_debug("created for pid %d (takes %zu bytes)\n", pid, pmc->map_len);
return 0;
err:
pr_err("Failed to init pagemap for %d\n", pid);
pmc_fini(pmc);
return -1;
}
static inline u64 *__pmc_get_map(pmc_t *pmc, unsigned long addr)
{
return &pmc->map[PAGE_PFN(addr - pmc->start)];
}
static int pmc_fill_cache(pmc_t *pmc, const struct vma_area *vma)
{
unsigned long low = vma->e->start & PMC_MASK;
unsigned long high = low + PMC_SIZE;
size_t len = vma_area_len(vma);
size_t size_map;
if (high > kdat.task_size)
high = kdat.task_size;
pmc->start = vma->e->start;
pmc->end = vma->e->end;
pr_debug("%d: filling VMA %lx-%lx (%zuK) [l:%lx h:%lx]\n", pmc->pid, (long)vma->e->start, (long)vma->e->end,
len >> 10, low, high);
/*
* If we meet a small VMA, lets try to fit 2M cache
* window at least 75% full, otherwise left as a plain
* "one vma at a time" read. Note the VMAs in cache must
* fit in solid manner, iow -- either the whole vma fits
* the cache window, either plain read is used.
*
* The benefit (apart redusing the number of read() calls)
* is to walk page tables less.
*/
if (!pagemap_cache_disabled && len < PMC_SIZE && (vma->e->start - low) < PMC_SIZE_GAP) {
size_t size_cov = len;
size_t nr_vmas = 1;
pr_debug("\t%d: %16lx-%-16lx nr:%-5zu cov:%zu\n", pmc->pid, (long)vma->e->start, (long)vma->e->end,
nr_vmas, size_cov);
list_for_each_entry_continue(vma, pmc->vma_head, list) {
if (vma->e->start > high || vma->e->end > high)
break;
BUG_ON(vma->e->start < low);
size_cov += vma_area_len(vma);
nr_vmas++;
pr_debug("\t%d: %16lx-%-16lx nr:%-5zu cov:%zu\n", pmc->pid, (long)vma->e->start,
(long)vma->e->end, nr_vmas, size_cov);
}
if (nr_vmas > 1) {
/*
* Note we don't touch low bound since it's set
* to first VMA start already and not updating it
* allows us to save a couple of code bytes.
*/
pmc->end = high;
pr_debug("\t%d: cache mode [l:%lx h:%lx]\n", pmc->pid, pmc->start, pmc->end);
} else
pr_debug("\t%d: simple mode [l:%lx h:%lx]\n", pmc->pid, pmc->start, pmc->end);
}
size_map = PAGEMAP_LEN(pmc->end - pmc->start);
BUG_ON(pmc->map_len < size_map);
BUG_ON(pmc->fd < 0);
if (pread(pmc->fd, pmc->map, size_map, PAGEMAP_PFN_OFF(pmc->start)) != size_map) {
pmc_zap(pmc);
pr_perror("Can't read %d's pagemap file", pmc->pid);
return -1;
}
return 0;
}
u64 *pmc_get_map(pmc_t *pmc, const struct vma_area *vma)
{
/* Hit */
if (likely(pmc->start <= vma->e->start && pmc->end >= vma->e->end))
return __pmc_get_map(pmc, vma->e->start);
/* Miss, refill the cache */
if (pmc_fill_cache(pmc, vma)) {
pr_err("Failed to fill cache for %d (%lx-%lx)\n", pmc->pid, (long)vma->e->start, (long)vma->e->end);
return NULL;
}
/* Hit for sure */
return __pmc_get_map(pmc, vma->e->start);
}
void pmc_fini(pmc_t *pmc)
{
close_safe(&pmc->fd);
xfree(pmc->map);
pmc_reset(pmc);
}
static void __attribute__((constructor)) pagemap_cache_init(void)
{
pagemap_cache_disabled = (getenv("CRIU_PMC_OFF") != NULL);
}
| 4,908 | 24.567708 | 109 |
c
|
criu
|
criu-master/criu/pagemap.c
|
#include <fcntl.h>
#include <stdio.h>
#include <unistd.h>
#include <linux/falloc.h>
#include <sys/uio.h>
#include <limits.h>
#include "types.h"
#include "image.h"
#include "cr_options.h"
#include "servicefd.h"
#include "pagemap.h"
#include "restorer.h"
#include "rst-malloc.h"
#include "page-xfer.h"
#include "fault-injection.h"
#include "xmalloc.h"
#include "protobuf.h"
#include "images/pagemap.pb-c.h"
#ifndef SEEK_DATA
#define SEEK_DATA 3
#define SEEK_HOLE 4
#endif
#define MAX_BUNCH_SIZE 256
/*
* One "job" for the preadv() syscall in pagemap.c
*/
struct page_read_iov {
off_t from; /* offset in pi file where to start reading from */
off_t end; /* the end of the read == sum to.iov_len -s */
struct iovec *to; /* destination iovs */
unsigned int nr; /* their number */
struct list_head l;
};
static inline bool can_extend_bunch(struct iovec *bunch, unsigned long off, unsigned long len)
{
return /* The next region is the continuation of the existing */
((unsigned long)bunch->iov_base + bunch->iov_len == off) &&
/* The resulting region is non empty and is small enough */
(bunch->iov_len == 0 || bunch->iov_len + len < MAX_BUNCH_SIZE * PAGE_SIZE);
}
static int punch_hole(struct page_read *pr, unsigned long off, unsigned long len, bool cleanup)
{
int ret;
struct iovec *bunch = &pr->bunch;
if (!cleanup && can_extend_bunch(bunch, off, len)) {
pr_debug("pr%lu-%u:Extend bunch len from %zu to %lu\n", pr->img_id, pr->id, bunch->iov_len,
bunch->iov_len + len);
bunch->iov_len += len;
} else {
if (bunch->iov_len > 0) {
pr_debug("Punch!/%p/%zu/\n", bunch->iov_base, bunch->iov_len);
ret = fallocate(img_raw_fd(pr->pi), FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
(unsigned long)bunch->iov_base, bunch->iov_len);
if (ret != 0) {
pr_perror("Error punching hole");
return -1;
}
}
bunch->iov_base = (void *)off;
bunch->iov_len = len;
pr_debug("pr%lu-%u:New bunch/%p/%zu/\n", pr->img_id, pr->id, bunch->iov_base, bunch->iov_len);
}
return 0;
}
int dedup_one_iovec(struct page_read *pr, unsigned long off, unsigned long len)
{
unsigned long iov_end;
iov_end = off + len;
while (1) {
int ret;
unsigned long piov_end;
struct page_read *prp;
ret = pr->seek_pagemap(pr, off);
if (ret == 0) {
if (off < pr->cvaddr && pr->cvaddr < iov_end) {
pr_debug("pr%lu-%u:No range %lx-%lx in pagemap\n", pr->img_id, pr->id, off, pr->cvaddr);
off = pr->cvaddr;
} else {
pr_debug("pr%lu-%u:No range %lx-%lx in pagemap\n", pr->img_id, pr->id, off, iov_end);
return 0;
}
}
if (!pr->pe)
return -1;
piov_end = pr->pe->vaddr + pagemap_len(pr->pe);
if (!pagemap_in_parent(pr->pe)) {
ret = punch_hole(pr, pr->pi_off, min(piov_end, iov_end) - off, false);
if (ret == -1)
return ret;
}
prp = pr->parent;
if (prp) {
/* recursively */
pr_debug("pr%lu-%u:Go to next parent level\n", pr->img_id, pr->id);
len = min(piov_end, iov_end) - off;
ret = dedup_one_iovec(prp, off, len);
if (ret != 0)
return -1;
}
if (piov_end < iov_end) {
off = piov_end;
continue;
} else
return 0;
}
return 0;
}
static int advance(struct page_read *pr)
{
pr->curr_pme++;
if (pr->curr_pme >= pr->nr_pmes)
return 0;
pr->pe = pr->pmes[pr->curr_pme];
pr->cvaddr = pr->pe->vaddr;
return 1;
}
static void skip_pagemap_pages(struct page_read *pr, unsigned long len)
{
if (!len)
return;
if (pagemap_present(pr->pe))
pr->pi_off += len;
pr->cvaddr += len;
}
static int seek_pagemap(struct page_read *pr, unsigned long vaddr)
{
if (!pr->pe)
goto adv;
do {
unsigned long start = pr->pe->vaddr;
unsigned long end = start + pagemap_len(pr->pe);
if (vaddr < pr->cvaddr)
break;
if (vaddr >= start && vaddr < end) {
skip_pagemap_pages(pr, vaddr - pr->cvaddr);
return 1;
}
if (end <= vaddr)
skip_pagemap_pages(pr, end - pr->cvaddr);
adv:; /* otherwise "label at end of compound stmt" gcc error */
} while (advance(pr));
return 0;
}
static inline void pagemap_bound_check(PagemapEntry *pe, unsigned long vaddr, int nr)
{
if (vaddr < pe->vaddr || (vaddr - pe->vaddr) / PAGE_SIZE + nr > pe->nr_pages) {
pr_err("Page read err %" PRIx64 ":%u vs %lx:%u\n", pe->vaddr, pe->nr_pages, vaddr, nr);
BUG();
}
}
static int read_parent_page(struct page_read *pr, unsigned long vaddr, int nr, void *buf, unsigned flags)
{
struct page_read *ppr = pr->parent;
int ret;
if (!ppr) {
pr_err("No parent for snapshot pagemap\n");
return -1;
}
/*
* Parent pagemap at this point entry may be shorter
* than the current vaddr:nr needs, so we have to
* carefully 'split' the vaddr:nr into pieces and go
* to parent page-read with the longest requests it
* can handle.
*/
do {
int p_nr;
pr_debug("\tpr%lu-%u Read from parent\n", pr->img_id, pr->id);
ret = ppr->seek_pagemap(ppr, vaddr);
if (ret <= 0) {
pr_err("Missing %lx in parent pagemap\n", vaddr);
return -1;
}
/*
* This is how many pages we have in the parent
* page_read starting from vaddr. Go ahead and
* read as much as we can.
*/
p_nr = ppr->pe->nr_pages - (vaddr - ppr->pe->vaddr) / PAGE_SIZE;
pr_info("\tparent has %u pages in\n", p_nr);
if (p_nr > nr)
p_nr = nr;
ret = ppr->read_pages(ppr, vaddr, p_nr, buf, flags);
if (ret == -1)
return ret;
/*
* OK, let's see how much data we have left and go
* to parent page-read again for the next pagemap
* entry.
*/
nr -= p_nr;
vaddr += p_nr * PAGE_SIZE;
buf += p_nr * PAGE_SIZE;
} while (nr);
return 0;
}
static int read_local_page(struct page_read *pr, unsigned long vaddr, unsigned long len, void *buf)
{
int fd;
ssize_t ret;
size_t curr = 0;
fd = img_raw_fd(pr->pi);
if (fd < 0) {
pr_err("Failed getting raw image fd\n");
return -1;
}
/*
* Flush any pending async requests if any not to break the
* linear reading from the pages.img file.
*/
if (pr->sync(pr))
return -1;
pr_debug("\tpr%lu-%u Read page from self %lx/%" PRIx64 "\n", pr->img_id, pr->id, pr->cvaddr, pr->pi_off);
while (1) {
ret = pread(fd, buf + curr, len - curr, pr->pi_off + curr);
if (ret < 1) {
pr_perror("Can't read mapping page %zd", ret);
return -1;
}
curr += ret;
if (curr == len)
break;
}
if (opts.auto_dedup) {
ret = punch_hole(pr, pr->pi_off, len, false);
if (ret == -1)
return -1;
}
return 0;
}
static int enqueue_async_iov(struct page_read *pr, void *buf, unsigned long len, struct list_head *to)
{
struct page_read_iov *pr_iov;
struct iovec *iov;
pr_iov = xzalloc(sizeof(*pr_iov));
if (!pr_iov)
return -1;
pr_iov->from = pr->pi_off;
pr_iov->end = pr->pi_off + len;
iov = xzalloc(sizeof(*iov));
if (!iov) {
xfree(pr_iov);
return -1;
}
iov->iov_base = buf;
iov->iov_len = len;
pr_iov->to = iov;
pr_iov->nr = 1;
list_add_tail(&pr_iov->l, to);
return 0;
}
int pagemap_render_iovec(struct list_head *from, struct task_restore_args *ta)
{
struct page_read_iov *piov;
ta->vma_ios = (struct restore_vma_io *)rst_mem_align_cpos(RM_PRIVATE);
ta->vma_ios_n = 0;
list_for_each_entry(piov, from, l) {
struct restore_vma_io *rio;
pr_info("`- render %d iovs (%p:%zd...)\n", piov->nr, piov->to[0].iov_base, piov->to[0].iov_len);
rio = rst_mem_alloc(RIO_SIZE(piov->nr), RM_PRIVATE);
if (!rio)
return -1;
rio->nr_iovs = piov->nr;
rio->off = piov->from;
memcpy(rio->iovs, piov->to, piov->nr * sizeof(struct iovec));
ta->vma_ios_n++;
}
return 0;
}
int pagemap_enqueue_iovec(struct page_read *pr, void *buf, unsigned long len, struct list_head *to)
{
struct page_read_iov *cur_async = NULL;
struct iovec *iov;
if (!list_empty(to))
cur_async = list_entry(to->prev, struct page_read_iov, l);
/*
* We don't have any async requests or we have new read
* request that should happen at pos _after_ some hole from
* the previous one.
* Start the new preadv request here.
*/
if (!cur_async || pr->pi_off != cur_async->end)
return enqueue_async_iov(pr, buf, len, to);
/*
* This read is pure continuation of the previous one. Let's
* just add another IOV (or extend one of the existing).
*/
iov = &cur_async->to[cur_async->nr - 1];
if (iov->iov_base + iov->iov_len == buf) {
/* Extendable */
iov->iov_len += len;
} else {
/* Need one more target iovec */
unsigned int n_iovs = cur_async->nr + 1;
if (n_iovs >= IOV_MAX)
return enqueue_async_iov(pr, buf, len, to);
iov = xrealloc(cur_async->to, n_iovs * sizeof(*iov));
if (!iov)
return -1;
cur_async->to = iov;
iov += cur_async->nr;
iov->iov_base = buf;
iov->iov_len = len;
cur_async->nr = n_iovs;
}
cur_async->end += len;
return 0;
}
static int maybe_read_page_local(struct page_read *pr, unsigned long vaddr, int nr, void *buf, unsigned flags)
{
int ret;
unsigned long len = nr * PAGE_SIZE;
/*
* There's no API in the kernel to start asynchronous
* cached read (or write), so in case someone is asking
* for us for urgent async read, just do the regular
* cached read.
*/
if ((flags & (PR_ASYNC | PR_ASAP)) == PR_ASYNC)
ret = pagemap_enqueue_iovec(pr, buf, len, &pr->async);
else {
ret = read_local_page(pr, vaddr, len, buf);
if (ret == 0 && pr->io_complete)
ret = pr->io_complete(pr, vaddr, nr);
}
pr->pi_off += len;
return ret;
}
/*
* We cannot use maybe_read_page_local() for streaming images as it uses
* pread(), seeking in the file. Instead, we use this custom page reader.
*/
static int maybe_read_page_img_streamer(struct page_read *pr, unsigned long vaddr, int nr, void *buf, unsigned flags)
{
unsigned long len = nr * PAGE_SIZE;
int fd;
int ret;
size_t curr = 0;
fd = img_raw_fd(pr->pi);
if (fd < 0) {
pr_err("Getting raw FD failed\n");
return -1;
}
pr_debug("\tpr%lu-%u Read page from self %lx/%" PRIx64 "\n", pr->img_id, pr->id, pr->cvaddr, pr->pi_off);
/* We can't seek. The requested address better match */
BUG_ON(pr->cvaddr != vaddr);
while (1) {
ret = read(fd, buf + curr, len - curr);
if (ret == 0) {
pr_err("Reached EOF unexpectedly while reading page from image\n");
return -1;
} else if (ret < 0) {
pr_perror("Can't read mapping page %d", ret);
return -1;
}
curr += ret;
if (curr == len)
break;
}
if (opts.auto_dedup)
pr_warn_once("Can't dedup when streaming images\n");
if (pr->io_complete)
ret = pr->io_complete(pr, vaddr, nr);
pr->pi_off += len;
return ret;
}
static int read_page_complete(unsigned long img_id, unsigned long vaddr, int nr_pages, void *priv)
{
int ret = 0;
struct page_read *pr = priv;
if (pr->img_id != img_id) {
pr_err("Out of order read completed (want %lu have %lu)\n", pr->img_id, img_id);
return -1;
}
if (pr->io_complete)
ret = pr->io_complete(pr, vaddr, nr_pages);
else
pr_warn_once("Remote page read w/o io_complete!\n");
return ret;
}
static int maybe_read_page_remote(struct page_read *pr, unsigned long vaddr, int nr, void *buf, unsigned flags)
{
int ret;
/* We always do PR_ASAP mode here (FIXME?) */
ret = request_remote_pages(pr->img_id, vaddr, nr);
if (!ret)
ret = page_server_start_read(buf, nr, read_page_complete, pr, flags);
return ret;
}
static int read_pagemap_page(struct page_read *pr, unsigned long vaddr, int nr, void *buf, unsigned flags)
{
pr_info("pr%lu-%u Read %lx %u pages\n", pr->img_id, pr->id, vaddr, nr);
pagemap_bound_check(pr->pe, vaddr, nr);
if (pagemap_in_parent(pr->pe)) {
if (read_parent_page(pr, vaddr, nr, buf, flags) < 0)
return -1;
} else {
if (pr->maybe_read_page(pr, vaddr, nr, buf, flags) < 0)
return -1;
}
pr->cvaddr += nr * PAGE_SIZE;
return 1;
}
static void free_pagemaps(struct page_read *pr)
{
int i;
for (i = 0; i < pr->nr_pmes; i++)
pagemap_entry__free_unpacked(pr->pmes[i], NULL);
xfree(pr->pmes);
pr->pmes = NULL;
}
static void advance_piov(struct page_read_iov *piov, ssize_t len)
{
ssize_t olen = len;
int onr = piov->nr;
piov->from += len;
while (len) {
struct iovec *cur = piov->to;
if (cur->iov_len <= len) {
piov->to++;
piov->nr--;
len -= cur->iov_len;
continue;
}
cur->iov_base += len;
cur->iov_len -= len;
break;
}
pr_debug("Advanced iov %zu bytes, %d->%d iovs, %zu tail\n", olen, onr, piov->nr, len);
}
static int process_async_reads(struct page_read *pr)
{
int fd, ret = 0;
struct page_read_iov *piov, *n;
fd = img_raw_fd(pr->pi);
list_for_each_entry_safe(piov, n, &pr->async, l) {
ssize_t ret;
struct iovec *iovs = piov->to;
pr_debug("Read piov iovs %d, from %ju, len %ju, first %p:%zu\n", piov->nr, piov->from,
piov->end - piov->from, piov->to->iov_base, piov->to->iov_len);
more:
ret = preadv(fd, piov->to, piov->nr, piov->from);
if (fault_injected(FI_PARTIAL_PAGES)) {
/*
* We might have read everything, but for debug
* purposes let's try to force the advance_piov()
* and re-read tail.
*/
if (ret > 0 && piov->nr >= 2) {
pr_debug("`- trim preadv %zu\n", ret);
ret /= 2;
}
}
if (ret < 0) {
pr_err("Can't read async pr bytes (%zd / %ju read, %ju off, %d iovs)\n", ret,
piov->end - piov->from, piov->from, piov->nr);
return -1;
}
if (opts.auto_dedup && punch_hole(pr, piov->from, ret, false))
return -1;
if (ret != piov->end - piov->from) {
/*
* The preadv() can return less than requested. It's
* valid and doesn't mean error or EOF. We should advance
* the iovecs and continue
*
* Modify the piov in-place, we're going to drop this one
* anyway.
*/
advance_piov(piov, ret);
goto more;
}
BUG_ON(pr->io_complete); /* FIXME -- implement once needed */
list_del(&piov->l);
xfree(iovs);
xfree(piov);
}
if (pr->parent)
ret = process_async_reads(pr->parent);
return ret;
}
static void close_page_read(struct page_read *pr)
{
int ret;
BUG_ON(!list_empty(&pr->async));
if (pr->bunch.iov_len > 0) {
ret = punch_hole(pr, 0, 0, true);
if (ret == -1)
return;
pr->bunch.iov_len = 0;
}
if (pr->parent) {
close_page_read(pr->parent);
xfree(pr->parent);
}
if (pr->pmi)
close_image(pr->pmi);
if (pr->pi)
close_image(pr->pi);
if (pr->pmes)
free_pagemaps(pr);
}
static void reset_pagemap(struct page_read *pr)
{
pr->cvaddr = 0;
pr->pi_off = 0;
pr->curr_pme = -1;
pr->pe = NULL;
/* FIXME: take care of bunch */
if (pr->parent)
reset_pagemap(pr->parent);
}
static int try_open_parent(int dfd, unsigned long id, struct page_read *pr, int pr_flags)
{
int pfd, ret;
struct page_read *parent = NULL;
/* Image streaming lacks support for incremental images */
if (opts.stream)
goto out;
if (open_parent(dfd, &pfd))
goto err;
if (pfd < 0)
goto out;
parent = xmalloc(sizeof(*parent));
if (!parent)
goto err_cl;
ret = open_page_read_at(pfd, id, parent, pr_flags);
if (ret < 0)
goto err_free;
if (!ret) {
xfree(parent);
parent = NULL;
}
close(pfd);
out:
pr->parent = parent;
return 0;
err_free:
xfree(parent);
err_cl:
close(pfd);
err:
return -1;
}
static void init_compat_pagemap_entry(PagemapEntry *pe)
{
/*
* pagemap image generated with older version will either
* contain a hole because the pages are in the parent
* snapshot or a pagemap that should be marked with
* PE_PRESENT
*/
if (pe->has_in_parent && pe->in_parent)
pe->flags |= PE_PARENT;
else if (!pe->has_flags)
pe->flags = PE_PRESENT;
}
/*
* The pagemap entry size is at least 8 bytes for small mappings with
* low address and may get to 18 bytes or even more for large mappings
* with high address and in_parent flag set. 16 seems to be nice round
* number to minimize {over,under}-allocations
*/
#define PAGEMAP_ENTRY_SIZE_ESTIMATE 16
static int init_pagemaps(struct page_read *pr)
{
off_t fsize;
int nr_pmes, nr_realloc;
if (opts.stream) {
/*
* TODO - There is no easy way to estimate the size of the
* pagemap that is still to be read from the pipe. Possible
* solution is to ask the image streamer for the size of the
* image. 1024 is a wild guess (more space is allocated if
* needed).
*/
fsize = 1024;
} else {
fsize = img_raw_size(pr->pmi);
}
if (fsize < 0)
return -1;
nr_pmes = fsize / PAGEMAP_ENTRY_SIZE_ESTIMATE + 1;
nr_realloc = nr_pmes / 2;
pr->pmes = xzalloc(nr_pmes * sizeof(*pr->pmes));
if (!pr->pmes)
return -1;
pr->nr_pmes = 0;
pr->curr_pme = -1;
while (1) {
int ret = pb_read_one_eof(pr->pmi, &pr->pmes[pr->nr_pmes], PB_PAGEMAP);
if (ret < 0)
goto free_pagemaps;
if (ret == 0)
break;
init_compat_pagemap_entry(pr->pmes[pr->nr_pmes]);
pr->nr_pmes++;
if (pr->nr_pmes >= nr_pmes) {
PagemapEntry **new;
nr_pmes += nr_realloc;
new = xrealloc(pr->pmes, nr_pmes * sizeof(*pr->pmes));
if (!new)
goto free_pagemaps;
pr->pmes = new;
}
}
close_image(pr->pmi);
pr->pmi = NULL;
return 0;
free_pagemaps:
free_pagemaps(pr);
return -1;
}
int open_page_read_at(int dfd, unsigned long img_id, struct page_read *pr, int pr_flags)
{
int flags, i_typ;
static unsigned ids = 1;
bool remote = pr_flags & PR_REMOTE;
/*
* Only the top-most page-read can be remote, all the
* others are always local.
*/
pr_flags &= ~PR_REMOTE;
if (opts.auto_dedup)
pr_flags |= PR_MOD;
if (pr_flags & PR_MOD)
flags = O_RDWR;
else
flags = O_RSTR;
switch (pr_flags & PR_TYPE_MASK) {
case PR_TASK:
i_typ = CR_FD_PAGEMAP;
break;
case PR_SHMEM:
i_typ = CR_FD_SHMEM_PAGEMAP;
break;
default:
BUG();
return -1;
}
INIT_LIST_HEAD(&pr->async);
pr->pe = NULL;
pr->parent = NULL;
pr->cvaddr = 0;
pr->pi_off = 0;
pr->bunch.iov_len = 0;
pr->bunch.iov_base = NULL;
pr->pmes = NULL;
pr->pieok = false;
pr->pmi = open_image_at(dfd, i_typ, O_RSTR, img_id);
if (!pr->pmi)
return -1;
if (empty_image(pr->pmi)) {
close_image(pr->pmi);
return 0;
}
if (try_open_parent(dfd, img_id, pr, pr_flags)) {
close_image(pr->pmi);
return -1;
}
pr->pi = open_pages_image_at(dfd, flags, pr->pmi, &pr->pages_img_id);
if (!pr->pi) {
close_page_read(pr);
return -1;
}
if (init_pagemaps(pr)) {
close_page_read(pr);
return -1;
}
pr->read_pages = read_pagemap_page;
pr->advance = advance;
pr->close = close_page_read;
pr->skip_pages = skip_pagemap_pages;
pr->sync = process_async_reads;
pr->seek_pagemap = seek_pagemap;
pr->reset = reset_pagemap;
pr->io_complete = NULL; /* set up by the client if needed */
pr->id = ids++;
pr->img_id = img_id;
if (remote)
pr->maybe_read_page = maybe_read_page_remote;
else if (opts.stream)
pr->maybe_read_page = maybe_read_page_img_streamer;
else {
pr->maybe_read_page = maybe_read_page_local;
if (!pr->parent && !opts.lazy_pages)
pr->pieok = true;
}
pr_debug("Opened %s page read %u (parent %u)\n", remote ? "remote" : "local", pr->id,
pr->parent ? pr->parent->id : 0);
return 1;
}
int open_page_read(unsigned long img_id, struct page_read *pr, int pr_flags)
{
return open_page_read_at(get_service_fd(IMG_FD_OFF), img_id, pr, pr_flags);
}
#define DUP_IDS_BASE 1000
void dup_page_read(struct page_read *src, struct page_read *dst)
{
static int dup_ids = 1;
memcpy(dst, src, sizeof(*dst));
INIT_LIST_HEAD(&dst->async);
dst->id = src->id + DUP_IDS_BASE * dup_ids++;
dst->reset(dst);
}
| 19,208 | 21.232639 | 117 |
c
|
criu
|
criu-master/criu/parasite-syscall.c
|
#include <unistd.h>
#include <inttypes.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include "common/config.h"
#include "common/compiler.h"
#include "types.h"
#include "protobuf.h"
#include "images/sa.pb-c.h"
#include "images/timer.pb-c.h"
#include "images/creds.pb-c.h"
#include "images/core.pb-c.h"
#include "images/pagemap.pb-c.h"
#include "imgset.h"
#include "parasite-syscall.h"
#include "parasite.h"
#include "crtools.h"
#include "namespaces.h"
#include "kerndat.h"
#include "pstree.h"
#include "posix-timer.h"
#include "mem.h"
#include "criu-log.h"
#include "vma.h"
#include "proc_parse.h"
#include "aio.h"
#include "fault-injection.h"
#include <compel/plugins/std/syscall-codes.h>
#include "signal.h"
#include "sigframe.h"
#include <string.h>
#include <stdlib.h>
#include <elf.h>
#include "dump.h"
#include "restorer.h"
#include "infect.h"
#include "infect-rpc.h"
#include "pie/parasite-blob.h"
unsigned long get_exec_start(struct vm_area_list *vmas)
{
struct vma_area *vma_area;
list_for_each_entry(vma_area, &vmas->h, list) {
unsigned long len;
if (vma_area->e->start >= kdat.task_size)
continue;
if (!(vma_area->e->prot & PROT_EXEC))
continue;
len = vma_area_len(vma_area);
if (len < PARASITE_START_AREA_MIN) {
pr_warn("Suspiciously short VMA @%#lx\n", (unsigned long)vma_area->e->start);
continue;
}
return vma_area->e->start;
}
return 0;
}
/*
* We need to detect parasite crashes not to hang on socket operations.
* Since CRIU holds parasite with ptrace, it will receive SIGCHLD if the
* latter would crash.
*
* This puts a restriction on how to execute a sub-process on dump stage.
* One should use the cr_system helper, that blocks sigcild and waits
* for the spawned program to finish.
*/
static void sigchld_handler(int signal, siginfo_t *siginfo, void *data)
{
int pid, status;
pid = waitpid(-1, &status, WNOHANG);
if (pid <= 0)
return;
pr_err("si_code=%d si_pid=%d si_status=%d\n", siginfo->si_code, siginfo->si_pid, siginfo->si_status);
if (WIFEXITED(status))
pr_err("%d exited with %d unexpectedly\n", pid, WEXITSTATUS(status));
else if (WIFSIGNALED(status))
pr_err("%d was killed by %d unexpectedly: %s\n", pid, WTERMSIG(status), strsignal(WTERMSIG(status)));
else if (WIFSTOPPED(status))
pr_err("%d was stopped by %d unexpectedly\n", pid, WSTOPSIG(status));
exit(1);
}
static int alloc_groups_copy_creds(CredsEntry *ce, struct parasite_dump_creds *c)
{
BUILD_BUG_ON(sizeof(ce->groups[0]) != sizeof(c->groups[0]));
BUILD_BUG_ON(sizeof(ce->cap_inh[0]) != sizeof(c->cap_inh[0]));
BUILD_BUG_ON(sizeof(ce->cap_prm[0]) != sizeof(c->cap_prm[0]));
BUILD_BUG_ON(sizeof(ce->cap_eff[0]) != sizeof(c->cap_eff[0]));
BUILD_BUG_ON(sizeof(ce->cap_bnd[0]) != sizeof(c->cap_bnd[0]));
BUG_ON(ce->n_cap_inh != CR_CAP_SIZE);
BUG_ON(ce->n_cap_prm != CR_CAP_SIZE);
BUG_ON(ce->n_cap_eff != CR_CAP_SIZE);
BUG_ON(ce->n_cap_bnd != CR_CAP_SIZE);
memcpy(ce->cap_inh, c->cap_inh, sizeof(c->cap_inh[0]) * CR_CAP_SIZE);
memcpy(ce->cap_prm, c->cap_prm, sizeof(c->cap_prm[0]) * CR_CAP_SIZE);
memcpy(ce->cap_eff, c->cap_eff, sizeof(c->cap_eff[0]) * CR_CAP_SIZE);
memcpy(ce->cap_bnd, c->cap_bnd, sizeof(c->cap_bnd[0]) * CR_CAP_SIZE);
ce->secbits = c->secbits;
ce->n_groups = c->ngroups;
ce->groups = xmemdup(c->groups, sizeof(c->groups[0]) * c->ngroups);
ce->uid = c->uids[0];
ce->gid = c->gids[0];
ce->euid = c->uids[1];
ce->egid = c->gids[1];
ce->suid = c->uids[2];
ce->sgid = c->gids[2];
ce->fsuid = c->uids[3];
ce->fsgid = c->gids[3];
return ce->groups ? 0 : -ENOMEM;
}
static void init_parasite_rseq_arg(struct parasite_check_rseq *rseq)
{
rseq->has_rseq = kdat.has_rseq;
rseq->has_ptrace_get_rseq_conf = kdat.has_ptrace_get_rseq_conf;
rseq->rseq_inited = false;
}
int parasite_dump_thread_leader_seized(struct parasite_ctl *ctl, int pid, CoreEntry *core)
{
ThreadCoreEntry *tc = core->thread_core;
struct parasite_dump_thread *args;
struct parasite_dump_creds *pc;
int ret;
args = compel_parasite_args(ctl, struct parasite_dump_thread);
pc = args->creds;
pc->cap_last_cap = kdat.last_cap;
init_parasite_rseq_arg(&args->rseq);
ret = compel_rpc_call_sync(PARASITE_CMD_DUMP_THREAD, ctl);
if (ret < 0)
return ret;
ret = alloc_groups_copy_creds(tc->creds, pc);
if (ret) {
pr_err("Can't copy creds for thread leader %d\n", pid);
return -1;
}
compel_arch_get_tls_task(ctl, &args->tls);
return dump_thread_core(pid, core, args);
}
int parasite_dump_thread_seized(struct parasite_thread_ctl *tctl, struct parasite_ctl *ctl, int id, struct pid *tid,
CoreEntry *core)
{
struct parasite_dump_thread *args;
pid_t pid = tid->real;
ThreadCoreEntry *tc = core->thread_core;
CredsEntry *creds = tc->creds;
struct parasite_dump_creds *pc;
int ret;
BUG_ON(id == 0); /* Leader is dumped in dump_task_core_all */
args = compel_parasite_args(ctl, struct parasite_dump_thread);
pc = args->creds;
pc->cap_last_cap = kdat.last_cap;
tc->has_blk_sigset = true;
#ifdef CONFIG_MIPS
memcpy(&tc->blk_sigset, (unsigned long *)compel_thread_sigmask(tctl), sizeof(tc->blk_sigset));
memcpy(&tc->blk_sigset_extended, (unsigned long *)compel_thread_sigmask(tctl) + 1, sizeof(tc->blk_sigset));
#else
memcpy(&tc->blk_sigset, compel_thread_sigmask(tctl), sizeof(k_rtsigset_t));
#endif
ret = compel_get_thread_regs(tctl, save_task_regs, core);
if (ret) {
pr_err("Can't obtain regs for thread %d\n", pid);
return -1;
}
ret = compel_arch_fetch_thread_area(tctl);
if (ret) {
pr_err("Can't obtain thread area of %d\n", pid);
return -1;
}
compel_arch_get_tls_thread(tctl, &args->tls);
init_parasite_rseq_arg(&args->rseq);
ret = compel_run_in_thread(tctl, PARASITE_CMD_DUMP_THREAD);
if (ret) {
pr_err("Can't init thread in parasite %d\n", pid);
return -1;
}
ret = alloc_groups_copy_creds(creds, pc);
if (ret) {
pr_err("Can't copy creds for thread %d\n", pid);
return -1;
}
tid->ns[0].virt = args->tid;
return dump_thread_core(pid, core, args);
}
int parasite_dump_sigacts_seized(struct parasite_ctl *ctl, struct pstree_item *item)
{
TaskCoreEntry *tc = item->core[0]->tc;
struct parasite_dump_sa_args *args;
int ret, sig;
SaEntry *sa, **psa;
args = compel_parasite_args(ctl, struct parasite_dump_sa_args);
ret = compel_rpc_call_sync(PARASITE_CMD_DUMP_SIGACTS, ctl);
if (ret < 0)
return ret;
psa = xmalloc((SIGMAX - 2) * (sizeof(SaEntry *) + sizeof(SaEntry)));
if (!psa)
return -1;
sa = (SaEntry *)(psa + SIGMAX - 2);
tc->n_sigactions = SIGMAX - 2;
tc->sigactions = psa;
for (sig = 1; sig <= SIGMAX; sig++) {
int i = sig - 1;
if (sig == SIGSTOP || sig == SIGKILL)
continue;
sa_entry__init(sa);
ASSIGN_TYPED(sa->sigaction, encode_pointer(args->sas[i].rt_sa_handler));
ASSIGN_TYPED(sa->flags, args->sas[i].rt_sa_flags);
ASSIGN_TYPED(sa->restorer, encode_pointer(args->sas[i].rt_sa_restorer));
#ifdef CONFIG_MIPS
sa->has_mask_extended = 1;
BUILD_BUG_ON(sizeof(sa->mask) * 2 != sizeof(args->sas[0].rt_sa_mask.sig));
memcpy(&sa->mask, &(args->sas[i].rt_sa_mask.sig[0]), sizeof(sa->mask));
memcpy(&sa->mask_extended, &(args->sas[i].rt_sa_mask.sig[1]), sizeof(sa->mask));
#else
BUILD_BUG_ON(sizeof(sa->mask) != sizeof(args->sas[0].rt_sa_mask.sig));
memcpy(&sa->mask, args->sas[i].rt_sa_mask.sig, sizeof(sa->mask));
#endif
sa->has_compat_sigaction = true;
sa->compat_sigaction = !compel_mode_native(ctl);
*(psa++) = sa++;
}
return 0;
}
static void encode_itimer(struct itimerval *v, ItimerEntry *ie)
{
ie->isec = v->it_interval.tv_sec;
ie->iusec = v->it_interval.tv_usec;
ie->vsec = v->it_value.tv_sec;
ie->vusec = v->it_value.tv_usec;
}
int parasite_dump_itimers_seized(struct parasite_ctl *ctl, struct pstree_item *item)
{
CoreEntry *core = item->core[0];
struct parasite_dump_itimers_args *args;
int ret;
args = compel_parasite_args(ctl, struct parasite_dump_itimers_args);
ret = compel_rpc_call_sync(PARASITE_CMD_DUMP_ITIMERS, ctl);
if (ret < 0)
return ret;
encode_itimer((&args->real), (core->tc->timers->real));
encode_itimer((&args->virt), (core->tc->timers->virt));
encode_itimer((&args->prof), (core->tc->timers->prof));
return 0;
}
static int core_alloc_posix_timers(TaskTimersEntry *tte, int n, PosixTimerEntry **pte)
{
int sz;
/*
* Will be free()-ed in core_entry_free()
*/
sz = n * (sizeof(PosixTimerEntry *) + sizeof(PosixTimerEntry));
tte->posix = xmalloc(sz);
if (!tte->posix)
return -1;
tte->n_posix = n;
*pte = (PosixTimerEntry *)(tte->posix + n);
return 0;
}
static int encode_notify_thread_id(pid_t rtid, struct pstree_item *item, PosixTimerEntry *pte)
{
pid_t vtid = 0;
int i;
if (rtid == 0)
return 0;
if (!(root_ns_mask & CLONE_NEWPID)) {
/* Non-pid-namespace case */
pte->notify_thread_id = rtid;
pte->has_notify_thread_id = true;
return 0;
}
/* Pid-namespace case */
if (!kdat.has_nspid) {
pr_err("Have no NSpid support to dump notify thread id in pid namespace\n");
return -1;
}
for (i = 0; i < item->nr_threads; i++) {
if (item->threads[i].real != rtid)
continue;
vtid = item->threads[i].ns[0].virt;
break;
}
if (vtid == 0) {
pr_err("Unable to convert the notify thread id %d\n", rtid);
return -1;
}
pte->notify_thread_id = vtid;
pte->has_notify_thread_id = true;
return 0;
}
static int encode_posix_timer(struct pstree_item *item, struct posix_timer *v, struct proc_posix_timer *vp,
PosixTimerEntry *pte)
{
pte->it_id = vp->spt.it_id;
pte->clock_id = vp->spt.clock_id;
pte->si_signo = vp->spt.si_signo;
pte->it_sigev_notify = vp->spt.it_sigev_notify;
pte->sival_ptr = encode_pointer(vp->spt.sival_ptr);
pte->overrun = v->overrun;
pte->isec = v->val.it_interval.tv_sec;
pte->insec = v->val.it_interval.tv_nsec;
pte->vsec = v->val.it_value.tv_sec;
pte->vnsec = v->val.it_value.tv_nsec;
if (encode_notify_thread_id(vp->spt.notify_thread_id, item, pte))
return -1;
return 0;
}
int parasite_dump_posix_timers_seized(struct proc_posix_timers_stat *proc_args, struct parasite_ctl *ctl,
struct pstree_item *item)
{
CoreEntry *core = item->core[0];
TaskTimersEntry *tte = core->tc->timers;
PosixTimerEntry *pte;
struct proc_posix_timer *temp;
struct parasite_dump_posix_timers_args *args;
int ret, exit_code = -1;
int args_size;
int i;
if (core_alloc_posix_timers(tte, proc_args->timer_n, &pte))
return -1;
args_size = posix_timers_dump_size(proc_args->timer_n);
args = compel_parasite_args_s(ctl, args_size);
args->timer_n = proc_args->timer_n;
i = 0;
list_for_each_entry(temp, &proc_args->timers, list) {
args->timer[i].it_id = temp->spt.it_id;
i++;
}
ret = compel_rpc_call_sync(PARASITE_CMD_DUMP_POSIX_TIMERS, ctl);
if (ret < 0)
goto end_posix;
i = 0;
list_for_each_entry(temp, &proc_args->timers, list) {
posix_timer_entry__init(&pte[i]);
if (encode_posix_timer(item, &args->timer[i], temp, &pte[i]))
goto end_posix;
tte->posix[i] = &pte[i];
i++;
}
exit_code = 0;
end_posix:
free_posix_timers(proc_args);
return exit_code;
}
int parasite_dump_misc_seized(struct parasite_ctl *ctl, struct parasite_dump_misc *misc)
{
struct parasite_dump_misc *ma;
ma = compel_parasite_args(ctl, struct parasite_dump_misc);
if (compel_rpc_call_sync(PARASITE_CMD_DUMP_MISC, ctl) < 0)
return -1;
*misc = *ma;
return 0;
}
struct parasite_tty_args *parasite_dump_tty(struct parasite_ctl *ctl, int fd, int type)
{
struct parasite_tty_args *p;
p = compel_parasite_args(ctl, struct parasite_tty_args);
p->fd = fd;
p->type = type;
if (compel_rpc_call_sync(PARASITE_CMD_DUMP_TTY, ctl) < 0)
return NULL;
return p;
}
int parasite_drain_fds_seized(struct parasite_ctl *ctl, struct parasite_drain_fd *dfds, int nr_fds, int off, int *lfds,
struct fd_opts *opts)
{
int ret = -1, size, sk;
struct parasite_drain_fd *args;
size = drain_fds_size(dfds);
args = compel_parasite_args_s(ctl, size);
args->nr_fds = nr_fds;
memcpy(&args->fds, dfds->fds + off, sizeof(int) * nr_fds);
ret = compel_rpc_call(PARASITE_CMD_DRAIN_FDS, ctl);
if (ret) {
pr_err("Parasite failed to drain descriptors\n");
goto err;
}
sk = compel_rpc_sock(ctl);
ret = recv_fds(sk, lfds, nr_fds, opts, sizeof(struct fd_opts));
if (ret)
pr_err("Can't retrieve FDs from socket\n");
ret |= compel_rpc_sync(PARASITE_CMD_DRAIN_FDS, ctl);
err:
return ret;
}
int parasite_get_proc_fd_seized(struct parasite_ctl *ctl)
{
int ret = -1, fd, sk;
ret = compel_rpc_call(PARASITE_CMD_GET_PROC_FD, ctl);
if (ret) {
pr_err("Parasite failed to get proc fd\n");
return ret;
}
sk = compel_rpc_sock(ctl);
fd = recv_fd(sk);
if (fd < 0)
pr_err("Can't retrieve FD from socket\n");
if (compel_rpc_sync(PARASITE_CMD_GET_PROC_FD, ctl)) {
close_safe(&fd);
return -1;
}
return fd;
}
/* This is officially the 50000'th line in the CRIU source code */
int parasite_dump_cgroup(struct parasite_ctl *ctl, struct parasite_dump_cgroup_args *cgroup)
{
int ret;
struct parasite_dump_cgroup_args *ca;
ca = compel_parasite_args(ctl, struct parasite_dump_cgroup_args);
memcpy(ca->thread_cgrp, cgroup->thread_cgrp, sizeof(ca->thread_cgrp));
ret = compel_rpc_call_sync(PARASITE_CMD_DUMP_CGROUP, ctl);
if (ret) {
pr_err("Parasite failed to dump /proc/self/cgroup\n");
return ret;
}
*cgroup = *ca;
return 0;
}
static unsigned long parasite_args_size = PARASITE_ARG_SIZE_MIN;
void parasite_ensure_args_size(unsigned long sz)
{
if (parasite_args_size < sz)
parasite_args_size = sz;
}
static int make_sigframe(void *arg, struct rt_sigframe *sf, struct rt_sigframe *rtsf, k_rtsigset_t *bs)
{
return construct_sigframe(sf, rtsf, bs, (CoreEntry *)arg);
}
static int parasite_prepare_threads(struct parasite_ctl *ctl, struct pstree_item *item)
{
struct parasite_thread_ctl **thread_ctls;
uint64_t *thread_sp;
int i;
thread_ctls = xzalloc(sizeof(*thread_ctls) * item->nr_threads);
if (!thread_ctls)
return -1;
thread_sp = xzalloc(sizeof(*thread_sp) * item->nr_threads);
if (!thread_sp)
goto free_ctls;
for (i = 0; i < item->nr_threads; i++) {
struct pid *tid = &item->threads[i];
if (item->pid->real == tid->real) {
thread_sp[i] = compel_get_leader_sp(ctl);
continue;
}
thread_ctls[i] = compel_prepare_thread(ctl, tid->real);
if (!thread_ctls[i])
goto free_sp;
thread_sp[i] = compel_get_thread_sp(thread_ctls[i]);
}
dmpi(item)->thread_ctls = thread_ctls;
dmpi(item)->thread_sp = thread_sp;
return 0;
free_sp:
xfree(thread_sp);
free_ctls:
xfree(thread_ctls);
return -1;
}
struct parasite_ctl *parasite_infect_seized(pid_t pid, struct pstree_item *item, struct vm_area_list *vma_area_list)
{
struct parasite_ctl *ctl;
struct infect_ctx *ictx;
unsigned long p;
int ret;
BUG_ON(item->threads[0].real != pid);
p = get_exec_start(vma_area_list);
if (!p) {
pr_err("No suitable VM found\n");
return NULL;
}
ctl = compel_prepare_noctx(pid);
if (!ctl)
return NULL;
ret = parasite_prepare_threads(ctl, item);
if (ret)
return NULL;
ictx = compel_infect_ctx(ctl);
ictx->open_proc = do_open_proc;
ictx->child_handler = sigchld_handler;
ictx->orig_handler.sa_handler = SIG_DFL;
ictx->orig_handler.sa_flags = SA_SIGINFO | SA_RESTART;
sigemptyset(&ictx->orig_handler.sa_mask);
sigaddset(&ictx->orig_handler.sa_mask, SIGCHLD);
ictx->sock = dmpi(item)->netns->net.seqsk;
ictx->save_regs = save_task_regs;
ictx->make_sigframe = make_sigframe;
ictx->regs_arg = item->core[0];
ictx->task_size = kdat.task_size;
ictx->syscall_ip = p;
pr_debug("Parasite syscall_ip at %#lx\n", p);
if (fault_injected(FI_NO_MEMFD))
ictx->flags |= INFECT_NO_MEMFD;
if (fault_injected(FI_PARASITE_CONNECT))
ictx->flags |= INFECT_FAIL_CONNECT;
if (fault_injected(FI_NO_BREAKPOINTS))
ictx->flags |= INFECT_NO_BREAKPOINTS;
if (kdat.compat_cr)
ictx->flags |= INFECT_COMPATIBLE;
if (kdat.x86_has_ptrace_fpu_xsave_bug)
ictx->flags |= INFECT_X86_PTRACE_MXCSR_BUG;
if (fault_injected(FI_CORRUPT_EXTREGS))
ictx->flags |= INFECT_CORRUPT_EXTREGS;
ictx->log_fd = log_get_fd();
parasite_setup_c_header(ctl);
parasite_ensure_args_size(dump_pages_args_size(vma_area_list));
parasite_ensure_args_size(aio_rings_args_size(vma_area_list));
if (compel_infect(ctl, item->nr_threads, parasite_args_size) < 0) {
if (compel_cure(ctl))
pr_warn("Can't cure failed infection\n");
return NULL;
}
parasite_args_size = PARASITE_ARG_SIZE_MIN; /* reset for next task */
#ifdef CONFIG_MIPS
memcpy(&item->core[0]->tc->blk_sigset, (unsigned long *)compel_task_sigmask(ctl),
sizeof(item->core[0]->tc->blk_sigset));
memcpy(&item->core[0]->tc->blk_sigset_extended, (unsigned long *)compel_task_sigmask(ctl) + 1,
sizeof(item->core[0]->tc->blk_sigset));
#else
memcpy(&item->core[0]->tc->blk_sigset, compel_task_sigmask(ctl), sizeof(k_rtsigset_t));
#endif
dmpi(item)->parasite_ctl = ctl;
return ctl;
}
| 16,870 | 24.875767 | 119 |
c
|
criu
|
criu-master/criu/path.c
|
#include <string.h>
#include <stdio.h>
#include <stdbool.h>
#include "int.h"
#include "mount.h"
#include "path.h"
#include "log.h"
#include "util.h"
#include "common/bug.h"
char *cut_root_for_bind(char *target_root, char *source_root)
{
int tok = 0;
char *path = NULL;
/*
* Cut common part of root.
* For non-root binds the source is always "/" (checked)
* so this will result in this slash removal only.
*/
while (target_root[tok] == source_root[tok]) {
tok++;
if (source_root[tok] == '\0') {
path = target_root + tok;
goto out;
}
if (target_root[tok] == '\0') {
path = source_root + tok;
goto out;
}
}
return NULL;
out:
BUG_ON(path == NULL);
if (path[0] == '/')
path++;
return path;
}
char *mnt_get_sibling_path(struct mount_info *m, struct mount_info *p, char *buf, int len)
{
struct mount_info *pa = m->parent;
char *rpath, fsrpath[PATH_MAX];
if (pa == NULL)
return NULL;
rpath = get_relative_path(m->ns_mountpoint, pa->ns_mountpoint);
if (!rpath) {
pr_warn("child - parent mountpoint mismatch %s - %s\n", m->ns_mountpoint, pa->ns_mountpoint);
return NULL;
}
if (snprintf(fsrpath, sizeof(fsrpath), "%s/%s", pa->root, rpath) >= sizeof(fsrpath)) {
pr_warn("snrptintf truncation \"%s / %s\"\n", pa->root, rpath);
return NULL;
}
rpath = get_relative_path(fsrpath, p->root);
if (!rpath)
return NULL;
if (snprintf(buf, len, "%s/%s", p->ns_mountpoint, rpath) >= sizeof(fsrpath)) {
pr_warn("snrptintf truncation \"%s / %s\"\n", p->ns_mountpoint, rpath);
return NULL;
}
return buf;
}
| 1,560 | 20.680556 | 95 |
c
|
criu
|
criu-master/criu/pidfd-store.c
|
#include <sys/socket.h>
#include <unistd.h>
#include <limits.h>
#include <errno.h>
#include <stdio.h>
#include <poll.h>
#include "compel/plugins/std/syscall-codes.h"
#include "cr_options.h"
#include "common/scm.h"
#include "common/list.h"
#include "kerndat.h"
#include "log.h"
#include "util.h"
#include "pidfd-store.h"
#include "sockets.h"
struct pidfd_entry {
pid_t pid;
int pidfd;
struct hlist_node hash; /* To lookup pidfd by pid */
};
static int pidfd_store_sk = -1;
#define PIDFD_HASH_SIZE 32
static struct hlist_head pidfd_hash[PIDFD_HASH_SIZE];
/*
* Steal (sk) from remote RPC client (pid) and prepare it to
* be used as the pidfd storage socket.
*/
int init_pidfd_store_sk(pid_t pid, int sk)
{
int pidfd;
int sock_type;
socklen_t len;
struct sockaddr_un addr;
unsigned int addrlen;
/* In kernel a bufsize has type int and a value is doubled. */
uint32_t buf[2] = { INT_MAX / 2, INT_MAX / 2 };
if (!kdat.has_pidfd_open) {
pr_err("pidfd_open syscall is not supported\n");
return -1;
}
if (!kdat.has_pidfd_getfd) {
pr_err("pidfd_getfd syscall is not supported\n");
return -1;
}
/* Steal pidfd store socket from RPC client */
pidfd = syscall(SYS_pidfd_open, pid, 0);
if (pidfd == -1) {
pr_perror("Can't get pidfd of (pid: %d)", pid);
goto err;
}
close_safe(&pidfd_store_sk);
pidfd_store_sk = syscall(SYS_pidfd_getfd, pidfd, sk, 0);
if (pidfd_store_sk == -1) {
pr_perror("Can't steal fd %d using pidfd_getfd", sk);
close(pidfd);
goto err;
}
close(pidfd);
/* Check that stolen socket is a connectionless unix domain socket */
len = sizeof(sock_type);
if (getsockopt(pidfd_store_sk, SOL_SOCKET, SO_TYPE, &sock_type, &len)) {
pr_perror("Can't get socket type (fd: %d)", pidfd_store_sk);
goto err;
}
if (sock_type != SOCK_DGRAM) {
pr_err("Pidfd store socket must be of type SOCK_DGRAM\n");
goto err;
}
addrlen = sizeof(addr);
if (getsockname(pidfd_store_sk, (struct sockaddr *)&addr, &addrlen)) {
pr_perror("Can't get socket bound name (fd: %d)", pidfd_store_sk);
goto err;
}
if (addr.sun_family != AF_UNIX) {
pr_err("Pidfd store socket must be AF_UNIX\n");
goto err;
}
/*
* Unnamed socket needs to be initialized and connected to itself.
* This only occurs once in the first predump, after the socket is
* bound, addrlen will be sizeof(struct sockaddr_un).
* This is similar to how fdstore_init() works.
*/
if (addrlen == sizeof(sa_family_t)) {
if (sk_setbufs(pidfd_store_sk, buf)) {
goto err;
}
addrlen = snprintf(addr.sun_path, sizeof(addr.sun_path), "X/criu-pidfd-store-%d-%d-%" PRIx64, pid, sk,
criu_run_id);
addrlen += sizeof(addr.sun_family);
addr.sun_path[0] = 0;
if (bind(pidfd_store_sk, (struct sockaddr *)&addr, addrlen)) {
pr_perror("Unable to bind a socket");
goto err;
}
if (connect(pidfd_store_sk, (struct sockaddr *)&addr, addrlen)) {
pr_perror("Unable to connect a socket");
goto err;
}
}
return 0;
err:
close_safe(&pidfd_store_sk);
return -1;
}
void free_pidfd_store(void)
{
int i;
struct pidfd_entry *entry;
struct hlist_node *tmp;
for (i = 0; i < PIDFD_HASH_SIZE; i++) {
hlist_for_each_entry_safe(entry, tmp, &pidfd_hash[i], hash) {
close(entry->pidfd);
xfree(entry);
}
INIT_HLIST_HEAD(&pidfd_hash[i]);
}
close_safe(&pidfd_store_sk);
}
int init_pidfd_store_hash(void)
{
int i, cnt, ret;
struct pidfd_entry *entry;
for (i = 0; i < PIDFD_HASH_SIZE; i++)
INIT_HLIST_HEAD(&pidfd_hash[i]);
/* Skip building pidfd_hash if pidfd_store_sk is not initialized */
if (pidfd_store_sk == -1)
return 0;
/*
* Drain all queued pidfd entries in pidfd_store_sk from
* the last predump into pidfd_hash.
*/
cnt = 0;
while (1) {
entry = xmalloc(sizeof(struct pidfd_entry));
if (entry == NULL)
goto err;
INIT_HLIST_NODE(&entry->hash);
ret = __recv_fds(pidfd_store_sk, &entry->pidfd, 1, &entry->pid, sizeof(pid_t), MSG_DONTWAIT);
if (ret == -EAGAIN || ret == -EWOULDBLOCK) {
/* No more fds to read */
xfree(entry);
goto check_empty;
} else if (ret) {
pr_perror("Can't read pidfd");
xfree(entry);
goto err;
}
cnt++;
hlist_add_head(&entry->hash, &pidfd_hash[entry->pid % PIDFD_HASH_SIZE]);
}
err:
free_pidfd_store();
return -1;
check_empty:
/*
* If no pidfds exist in pidfd_store. This would cause full page
* dumps which goes against the purpose of the pidfd store.
* This is probably due to sending a different pidfd_store socket.
*/
if (cnt == 0 && opts.img_parent) {
pr_err("No pidfds found in pidfd store\n");
pr_err("The same socket from the previous iteration should be passed\n");
return -1;
}
return 0;
}
static struct pidfd_entry *find_pidfd_entry_by_pid(pid_t pid)
{
struct pidfd_entry *entry;
struct hlist_head *chain;
chain = &pidfd_hash[pid % PIDFD_HASH_SIZE];
hlist_for_each_entry(entry, chain, hash) {
if (entry->pid == pid)
return entry;
}
return NULL;
}
/*
* 1 - task closed
* 0 - task still running
* -1 - error
*/
static int check_pidfd_entry_state(struct pidfd_entry *entry)
{
struct pollfd pollfd;
int ret, restart_cnt = 0;
const int MAX_RESTARTS = 10; /* Reasonable limit to avoid getting stuck */
/*
* When there is data to read from the pidfd, it means
* that the task associated with this pidfd is closed.
*/
pollfd.fd = entry->pidfd;
pollfd.events = POLLIN;
while (1) {
ret = poll(&pollfd, 1, 0);
if (ret == -1 && errno == EINTR && restart_cnt < MAX_RESTARTS) {
restart_cnt++;
continue; /* restart polling */
}
return ret;
}
}
int pidfd_store_add(pid_t pid)
{
int pidfd, entry_state;
struct pidfd_entry *entry;
/* Skip sending if pidfd_store_sk is not initialized */
if (pidfd_store_sk == -1)
return 0;
/*
* Use existing pidfd entry or create pidfd for task.
* If entry exists with same pid we must check that
* it is not a case of pid reuse (i.e. task is closed).
*/
entry = find_pidfd_entry_by_pid(pid);
if (entry != NULL) {
entry_state = check_pidfd_entry_state(entry);
if (entry_state == -1) {
pr_perror("Can't get state of pidfd entry of pid %d", pid);
return -1;
} else if (entry_state == 1) {
/* Task is closed, We need to create a new pidfd for task. */
entry = NULL;
}
}
if (entry == NULL) {
if (!kdat.has_pidfd_open) {
pr_err("pidfd_open syscall is not supported\n");
return -1;
}
pidfd = syscall(SYS_pidfd_open, pid, 0);
if (pidfd == -1) {
pr_perror("Can't get pidfd of pid %d", pid);
return -1;
}
} else {
pidfd = entry->pidfd;
}
if (send_fds(pidfd_store_sk, NULL, 0, &pidfd, 1, &pid, sizeof(pid_t))) {
pr_perror("Can't send pidfd %d of pid %d", pidfd, pid);
if (!entry)
close(pidfd);
return -1;
}
if (!entry)
close(pidfd);
return 0;
}
/*
* 1 - pid reuse detected
* 0 - task still running
* -1 - error
*/
int pidfd_store_check_pid_reuse(pid_t pid)
{
struct pidfd_entry *entry;
int ret;
entry = find_pidfd_entry_by_pid(pid);
if (entry == NULL) {
/*
* This task was created between two iteration so it
* should be marked as a pid reuse to make a full memory dump.
*/
pr_warn("Pid reuse detected for pid %d\n", pid);
return 1;
}
ret = check_pidfd_entry_state(entry);
if (ret == -1)
pr_err("Failed to get pidfd entry state for pid %d\n", pid);
else if (ret == 1)
pr_warn("Pid reuse detected for pid %d\n", pid);
return ret;
}
bool pidfd_store_ready(void)
{
return pidfd_store_sk != -1;
}
| 7,446 | 21.704268 | 104 |
c
|
criu
|
criu-master/criu/pipes.c
|
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <stdlib.h>
#include <sys/mman.h>
#include "crtools.h"
#include "imgset.h"
#include "image.h"
#include "files.h"
#include "pipes.h"
#include "util-pie.h"
#include "autofs.h"
#include "protobuf.h"
#include "util.h"
#include "images/pipe.pb-c.h"
#include "images/pipe-data.pb-c.h"
#include "fcntl.h"
#include "namespaces.h"
static LIST_HEAD(pipes);
static void show_saved_pipe_fds(struct pipe_info *pi)
{
struct fdinfo_list_entry *fle;
pr_info(" `- ID %p %#x\n", pi, pi->pe->id);
list_for_each_entry(fle, &pi->d.fd_info_head, desc_list)
pr_info(" `- FD %d pid %d\n", fle->fe->fd, fle->pid);
}
static int pipe_data_read(struct cr_img *img, struct pipe_data_rst *r)
{
unsigned long bytes = r->pde->bytes;
if (!bytes)
return 0;
/*
* We potentially allocate more memory than required for data,
* but this is OK. Look at restore_pipe_data -- it vmsplice-s
* this into the kernel with F_GIFT flag (since some time it
* works on non-aligned data), thus just giving this page to
* pipe buffer. And since kernel allocates pipe buffers in pages
* anyway we don't increase memory consumption :)
*/
r->data = mmap(NULL, bytes, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
if (r->data == MAP_FAILED) {
pr_perror("Can't map mem for pipe buffers");
return -1;
}
return read_img_buf(img, r->data, bytes);
}
int do_collect_pipe_data(struct pipe_data_rst *r, ProtobufCMessage *msg, struct cr_img *img,
struct pipe_data_rst **hash)
{
int aux;
r->pde = pb_msg(msg, PipeDataEntry);
aux = pipe_data_read(img, r);
if (aux < 0)
return aux;
aux = r->pde->pipe_id & PIPE_DATA_HASH_MASK;
r->next = hash[aux];
hash[aux] = r;
pr_info("Collected pipe data for %#x (chain %u)\n", r->pde->pipe_id, aux);
return 0;
}
/* Choose who will restore a pipe. */
static int mark_pipe_master_cb(struct pprep_head *ph)
{
LIST_HEAD(head);
pr_info("Pipes:\n");
while (1) {
struct fdinfo_list_entry *fle;
struct pipe_info *pi, *pic, *p;
struct pipe_info *pr = NULL, *pw = NULL;
if (list_empty(&pipes))
break;
pi = list_first_entry(&pipes, struct pipe_info, list);
list_move(&pi->list, &head);
pr_info(" `- PIPE ID %#x\n", pi->pe->pipe_id);
show_saved_pipe_fds(pi);
fle = file_master(&pi->d);
p = pi;
if (!(pi->pe->flags & O_LARGEFILE)) {
if (pi->pe->flags & O_WRONLY) {
if (pw == NULL)
pw = pi;
} else {
if (pr == NULL)
pr = pi;
}
}
list_for_each_entry(pic, &pi->pipe_list, pipe_list) {
struct fdinfo_list_entry *f;
list_move(&pic->list, &head);
f = file_master(&pic->d);
if (fdinfo_rst_prio(f, fle)) {
p = pic;
fle = f;
}
if (!(pic->pe->flags & O_LARGEFILE)) {
if (pic->pe->flags & O_WRONLY) {
if (pw == NULL)
pw = pic;
} else {
if (pr == NULL)
pr = pic;
}
}
show_saved_pipe_fds(pic);
}
p->create = 1;
if (pr)
pr->reopen = 0;
if (pw)
pw->reopen = 0;
pr_info(" by %#x\n", p->pe->id);
}
list_splice(&head, &pipes);
return 0;
}
static MAKE_PPREP_HEAD(mark_pipe_master);
static struct pipe_data_rst *pd_hash_pipes[PIPE_DATA_HASH_SIZE];
int restore_pipe_data(int img_type, int pfd, u32 id, struct pipe_data_rst **hash)
{
int ret;
struct pipe_data_rst *pd;
struct iovec iov;
for (pd = hash[id & PIPE_DATA_HASH_MASK]; pd != NULL; pd = pd->next)
if (pd->pde->pipe_id == id)
break;
if (!pd) { /* no data for this pipe */
pr_info("No data for pipe %#x\n", id);
return 0;
}
if (pd->pde->has_size) {
pr_info("Restoring size %#x for %#x\n", pd->pde->size, pd->pde->pipe_id);
ret = fcntl(pfd, F_SETPIPE_SZ, pd->pde->size);
if (ret < 0) {
pr_perror("Can't restore pipe size");
return -1;
}
}
if (!pd->pde->bytes)
return 0;
if (!pd->data) {
pr_err("Double data restore occurred on %#x\n", id);
return -1;
}
iov.iov_base = pd->data;
iov.iov_len = pd->pde->bytes;
while (iov.iov_len > 0) {
ret = vmsplice(pfd, &iov, 1, SPLICE_F_GIFT | SPLICE_F_NONBLOCK);
if (ret < 0) {
pr_perror("%#x: Error splicing data", id);
return -1;
}
if (ret == 0 || ret > iov.iov_len /* sanity */) {
pr_err("%#x: Wanted to restore %zu bytes, but got %d\n", id, iov.iov_len, ret);
return -1;
}
iov.iov_base += ret;
iov.iov_len -= ret;
}
/*
* 3 reasons for killing the buffer from our address space:
*
* 1. We gifted the pages to the kernel to optimize memory usage, thus
* accidental memory corruption can change the pipe buffer.
* 2. This will make the vmas restoration a bit faster due to less self
* mappings to be unmapped.
* 3. We can catch bugs with double pipe data restore.
*/
munmap(pd->data, pd->pde->bytes);
pd->data = NULL;
return 0;
}
static int userns_reopen(void *_arg, int fd, pid_t pid)
{
char path[PSFDS];
int ret, flags = *(int *)_arg;
sprintf(path, "/proc/self/fd/%d", fd);
ret = open(path, flags);
if (ret < 0)
pr_perror("Unable to reopen the pipe %s", path);
close(fd);
return ret;
}
static int reopen_pipe(int fd, int flags)
{
int ret;
char path[PSFDS];
sprintf(path, "/proc/self/fd/%d", fd);
ret = open(path, flags);
if (ret < 0) {
if (errno == EACCES) {
/* It may be an external pipe from an another userns */
ret = userns_call(userns_reopen, UNS_FDOUT, &flags, sizeof(flags), fd);
} else
pr_perror("Unable to reopen the pipe %s", path);
}
close(fd);
return ret;
}
static int recv_pipe_fd(struct pipe_info *pi, int *new_fd)
{
int tmp, fd, ret;
ret = recv_desc_from_peer(&pi->d, &tmp);
if (ret != 0) {
if (ret != 1)
pr_err("Can't get fd %d\n", tmp);
return ret;
}
if (pi->reopen)
fd = reopen_pipe(tmp, pi->pe->flags);
else
fd = tmp;
if (fd >= 0) {
if (rst_file_params(fd, pi->pe->fown, pi->pe->flags)) {
close(fd);
return -1;
}
*new_fd = fd;
}
return fd < 0 ? -1 : 0;
}
static char *pipe_d_name(struct file_desc *d, char *buf, size_t s)
{
struct pipe_info *pi;
pi = container_of(d, struct pipe_info, d);
if (snprintf(buf, s, "pipe:[%u]", pi->pe->pipe_id) >= s) {
pr_err("Not enough room for pipe %u identifier string\n", pi->pe->pipe_id);
return NULL;
}
return buf;
}
int open_pipe(struct file_desc *d, int *new_fd)
{
struct pipe_info *pi, *p;
int ret, tmp;
int pfd[2];
pi = container_of(d, struct pipe_info, d);
pr_info("\t\tCreating pipe pipe_id=%#x id=%#x\n", pi->pe->pipe_id, pi->pe->id);
if (inherited_fd(d, &tmp)) {
if (tmp < 0)
return tmp;
pi->reopen = 1;
goto reopen;
}
if (!pi->create)
return recv_pipe_fd(pi, new_fd);
if (pipe(pfd) < 0) {
pr_perror("Can't create pipe");
return -1;
}
ret = restore_pipe_data(CR_FD_PIPES_DATA, pfd[1], pi->pe->pipe_id, pd_hash_pipes);
if (ret)
return -1;
list_for_each_entry(p, &pi->pipe_list, pipe_list) {
int fd = pfd[p->pe->flags & O_WRONLY];
if (send_desc_to_peer(fd, &p->d)) {
pr_perror("Can't send file descriptor");
return -1;
}
}
close(pfd[!(pi->pe->flags & O_WRONLY)]);
tmp = pfd[pi->pe->flags & O_WRONLY];
reopen:
if (pi->reopen)
tmp = reopen_pipe(tmp, pi->pe->flags);
if (tmp >= 0)
if (rst_file_params(tmp, pi->pe->fown, pi->pe->flags))
return -1;
if (tmp < 0)
return -1;
*new_fd = tmp;
return 0;
}
static struct file_desc_ops pipe_desc_ops = {
.type = FD_TYPES__PIPE,
.open = open_pipe,
.name = pipe_d_name,
};
int collect_one_pipe_ops(void *o, ProtobufCMessage *base, struct file_desc_ops *ops)
{
struct pipe_info *pi = o, *tmp;
pi->pe = pb_msg(base, PipeEntry);
pi->create = 0;
pi->reopen = 1;
pr_info("Collected pipe entry ID %#x PIPE ID %#x\n", pi->pe->id, pi->pe->pipe_id);
if (file_desc_add(&pi->d, pi->pe->id, ops))
return -1;
INIT_LIST_HEAD(&pi->pipe_list);
if (!inherited_fd(&pi->d, NULL)) {
list_for_each_entry(tmp, &pipes, list)
if (pi->pe->pipe_id == tmp->pe->pipe_id)
break;
if (&tmp->list != &pipes)
list_add(&pi->pipe_list, &tmp->pipe_list);
}
add_post_prepare_cb_once(&mark_pipe_master);
list_add_tail(&pi->list, &pipes);
return 0;
}
static int collect_one_pipe(void *o, ProtobufCMessage *base, struct cr_img *i)
{
return collect_one_pipe_ops(o, base, &pipe_desc_ops);
}
struct collect_image_info pipe_cinfo = {
.fd_type = CR_FD_PIPES,
.pb_type = PB_PIPE,
.priv_size = sizeof(struct pipe_info),
.collect = collect_one_pipe,
};
static int collect_pipe_data(void *obj, ProtobufCMessage *msg, struct cr_img *img)
{
return do_collect_pipe_data(obj, msg, img, pd_hash_pipes);
}
struct collect_image_info pipe_data_cinfo = {
.fd_type = CR_FD_PIPES_DATA,
.pb_type = PB_PIPE_DATA,
.priv_size = sizeof(struct pipe_data_rst),
.collect = collect_pipe_data,
};
int dump_one_pipe_data(struct pipe_data_dump *pd, int lfd, const struct fd_parms *p)
{
struct cr_img *img;
int pipe_size, i, bytes;
int steal_pipe[2];
int ret = -1;
PipeDataEntry pde = PIPE_DATA_ENTRY__INIT;
if (p->flags & O_WRONLY)
return 0;
/* Maybe we've dumped it already */
for (i = 0; i < pd->nr; i++) {
if (pd->ids[i] == pipe_id(p))
return 0;
}
pr_info("Dumping data from pipe %#x fd %d\n", pipe_id(p), lfd);
if (pd->nr >= NR_PIPES_WITH_DATA) {
pr_err("OOM storing pipe\n");
return -1;
}
img = img_from_set(glob_imgset, pd->img_type);
pd->ids[pd->nr++] = pipe_id(p);
pipe_size = fcntl(lfd, F_GETPIPE_SZ);
if (pipe_size < 0) {
pr_err("Can't obtain piped data size\n");
goto err;
}
if (pipe(steal_pipe) < 0) {
pr_perror("Can't create pipe for stealing data");
goto err;
}
/* steal_pipe has to be able to fit all data from a target pipe */
if (fcntl(steal_pipe[1], F_SETPIPE_SZ, pipe_size) < 0) {
pr_perror("Unable to set a pipe size");
goto err;
}
bytes = tee(lfd, steal_pipe[1], pipe_size, SPLICE_F_NONBLOCK);
if (bytes < 0) {
if (errno != EAGAIN) {
pr_perror("Can't pick pipe data");
goto err_close;
}
bytes = 0;
}
pde.pipe_id = pipe_id(p);
pde.bytes = bytes;
pde.has_size = true;
pde.size = pipe_size;
if (pb_write_one(img, &pde, PB_PIPE_DATA))
goto err_close;
while (bytes > 0) {
int wrote;
wrote = splice(steal_pipe[0], NULL, img_raw_fd(img), NULL, bytes, 0);
if (wrote < 0) {
pr_perror("Can't push pipe data");
goto err_close;
} else if (wrote == 0)
break;
bytes -= wrote;
}
ret = 0;
err_close:
close(steal_pipe[0]);
close(steal_pipe[1]);
err:
return ret;
}
static struct pipe_data_dump pd_pipes = {
.img_type = CR_FD_PIPES_DATA,
};
static int dump_one_pipe(int lfd, u32 id, const struct fd_parms *p)
{
FileEntry fe = FILE_ENTRY__INIT;
PipeEntry pe = PIPE_ENTRY__INIT;
pr_info("Dumping pipe %d with id %#x pipe_id %#x\n", lfd, id, pipe_id(p));
if ((p->flags & O_DIRECT) && !is_autofs_pipe(pipe_id(p))) {
pr_err("The packetized mode for pipes is not supported yet\n");
return -1;
}
pe.id = id;
pe.pipe_id = pipe_id(p);
pe.flags = p->flags & ~O_DIRECT;
pe.fown = (FownEntry *)&p->fown;
fe.type = FD_TYPES__PIPE;
fe.id = pe.id;
fe.pipe = &pe;
if (pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE))
return -1;
return dump_one_pipe_data(&pd_pipes, lfd, p);
}
const struct fdtype_ops pipe_dump_ops = {
.type = FD_TYPES__PIPE,
.dump = dump_one_pipe,
};
| 11,194 | 20.822612 | 92 |
c
|
criu
|
criu-master/criu/plugin.c
|
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <dirent.h>
#include <stdio.h>
#include <errno.h>
#include <dlfcn.h>
#include "cr_options.h"
#include "common/compiler.h"
#include "xmalloc.h"
#include "plugin.h"
#include "servicefd.h"
#include "common/list.h"
#include "log.h"
cr_plugin_ctl_t cr_plugin_ctl = {
.head.next = &cr_plugin_ctl.head,
.head.prev = &cr_plugin_ctl.head,
};
/*
* If we met old version of a plugin, selfgenerate a plugin descriptor for it.
*/
static cr_plugin_desc_t *cr_gen_plugin_desc(void *h, char *path)
{
cr_plugin_desc_t *d;
d = xzalloc(sizeof(*d));
if (!d)
return NULL;
d->name = xstrdup(path);
d->max_hooks = CR_PLUGIN_HOOK__MAX;
d->version = CRIU_PLUGIN_VERSION_OLD;
pr_warn("Generating dynamic descriptor for plugin `%s'."
"Won't work in next version of the program."
"Please update your plugin.\n",
path);
#define __assign_hook(__hook, __name) \
do { \
void *name; \
name = dlsym(h, __name); \
if (name) \
d->hooks[CR_PLUGIN_HOOK__##__hook] = name; \
} while (0)
__assign_hook(DUMP_UNIX_SK, "cr_plugin_dump_unix_sk");
__assign_hook(RESTORE_UNIX_SK, "cr_plugin_restore_unix_sk");
__assign_hook(DUMP_EXT_FILE, "cr_plugin_dump_file");
__assign_hook(RESTORE_EXT_FILE, "cr_plugin_restore_file");
__assign_hook(DUMP_EXT_MOUNT, "cr_plugin_dump_ext_mount");
__assign_hook(RESTORE_EXT_MOUNT, "cr_plugin_restore_ext_mount");
__assign_hook(DUMP_EXT_LINK, "cr_plugin_dump_ext_link");
__assign_hook(HANDLE_DEVICE_VMA, "cr_plugin_handle_device_vma");
__assign_hook(UPDATE_VMA_MAP, "cr_plugin_update_vma_map");
__assign_hook(RESUME_DEVICES_LATE, "cr_plugin_resume_devices_late");
#undef __assign_hook
d->init = dlsym(h, "cr_plugin_init");
d->exit = dlsym(h, "cr_plugin_fini");
return d;
}
static void show_plugin_desc(cr_plugin_desc_t *d)
{
size_t i;
pr_debug("Plugin \"%s\" (version %u hooks %u)\n", d->name, d->version, d->max_hooks);
for (i = 0; i < d->max_hooks; i++) {
if (d->hooks[i])
pr_debug("\t%4zu -> %p\n", i, d->hooks[i]);
}
}
static int verify_plugin(cr_plugin_desc_t *d)
{
if (d->version > CRIU_PLUGIN_VERSION) {
pr_debug("Plugin %s has version %x while max %x supported\n", d->name, d->version, CRIU_PLUGIN_VERSION);
return -1;
}
if (d->max_hooks > CR_PLUGIN_HOOK__MAX) {
pr_debug("Plugin %s has %u assigned while max %u supported\n", d->name, d->max_hooks,
CR_PLUGIN_HOOK__MAX);
return -1;
}
return 0;
}
int criu_get_image_dir(void)
{
return get_service_fd(IMG_FD_OFF);
}
static int cr_lib_load(int stage, char *path)
{
cr_plugin_desc_t *d;
plugin_desc_t *this;
size_t i;
void *h;
bool allocated = false;
h = dlopen(path, RTLD_LAZY);
if (h == NULL) {
pr_err("Unable to load %s: %s\n", path, dlerror());
return -1;
}
/*
* Load plugin descriptor. If plugin is too old -- create
* dynamic plugin descriptor. In most cases this won't
* be a common operation and plugins are not supposed to
* be changing own format frequently.
*/
d = dlsym(h, "CR_PLUGIN_DESC");
if (!d) {
d = cr_gen_plugin_desc(h, path);
if (!d) {
pr_err("Can't load plugin %s\n", path);
goto error_close;
}
allocated = true;
}
this = xzalloc(sizeof(*this));
if (!this)
goto error_close;
if (verify_plugin(d)) {
pr_err("Corrupted plugin %s\n", path);
goto error_free;
}
this->d = d;
this->dlhandle = h;
INIT_LIST_HEAD(&this->list);
for (i = 0; i < d->max_hooks; i++)
INIT_LIST_HEAD(&this->link[i]);
list_add_tail(&this->list, &cr_plugin_ctl.head);
show_plugin_desc(d);
if (d->init && d->init(stage)) {
pr_err("Failed in init(%d) of \"%s\"\n", stage, d->name);
list_del(&this->list);
goto error_free;
}
/*
* Chain hooks into appropriate places for
* fast handler access.
*/
for (i = 0; i < d->max_hooks; i++) {
if (!d->hooks[i])
continue;
list_add_tail(&this->link[i], &cr_plugin_ctl.hook_chain[i]);
}
return 0;
error_free:
xfree(this);
error_close:
dlclose(h);
if (allocated)
xfree(d);
return -1;
}
void cr_plugin_fini(int stage, int ret)
{
plugin_desc_t *this, *tmp;
list_for_each_entry_safe(this, tmp, &cr_plugin_ctl.head, list) {
void *h = this->dlhandle;
size_t i;
list_del(&this->list);
if (this->d->exit)
this->d->exit(stage, ret);
for (i = 0; i < this->d->max_hooks; i++) {
if (!list_empty(&this->link[i]))
list_del(&this->link[i]);
}
if (this->d->version == CRIU_PLUGIN_VERSION_OLD)
xfree(this->d);
dlclose(h);
}
}
int cr_plugin_init(int stage)
{
int exit_code = -1;
char *path;
size_t i;
DIR *d;
INIT_LIST_HEAD(&cr_plugin_ctl.head);
for (i = 0; i < ARRAY_SIZE(cr_plugin_ctl.hook_chain); i++)
INIT_LIST_HEAD(&cr_plugin_ctl.hook_chain[i]);
if (opts.libdir == NULL) {
path = getenv("CRIU_LIBS_DIR");
if (path)
SET_CHAR_OPTS(libdir, path);
else {
if (access(CR_PLUGIN_DEFAULT, F_OK))
return 0;
SET_CHAR_OPTS(libdir, CR_PLUGIN_DEFAULT);
}
}
d = opendir(opts.libdir);
if (d == NULL) {
pr_perror("Unable to open directory %s", opts.libdir);
return -1;
}
while (1) {
char path[PATH_MAX];
struct dirent *de;
int len;
errno = 0;
de = readdir(d);
if (de == NULL) {
if (errno == 0)
break;
pr_perror("Unable to read the libraries directory");
goto err;
}
len = strlen(de->d_name);
if (len < 3 || strncmp(de->d_name + len - 3, ".so", 3))
continue;
if (snprintf(path, sizeof(path), "%s/%s", opts.libdir, de->d_name) >= sizeof(path)) {
pr_err("Unable to build plugin path\n");
goto err;
}
if (cr_lib_load(stage, path))
goto err;
}
exit_code = 0;
err:
closedir(d);
if (exit_code)
cr_plugin_fini(stage, exit_code);
return exit_code;
}
| 5,866 | 21.056391 | 106 |
c
|
criu
|
criu-master/criu/protobuf-desc.c
|
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <arpa/inet.h>
#include <ctype.h>
#include "common/compiler.h"
#include "log.h"
#include "protobuf-desc.h"
#include "images/inventory.pb-c.h"
#include "images/stats.pb-c.h"
#include "images/regfile.pb-c.h"
#include "images/ext-file.pb-c.h"
#include "images/ns.pb-c.h"
#include "images/eventfd.pb-c.h"
#include "images/eventpoll.pb-c.h"
#include "images/signalfd.pb-c.h"
#include "images/fsnotify.pb-c.h"
#include "images/core.pb-c.h"
#include "images/mm.pb-c.h"
#include "images/pipe.pb-c.h"
#include "images/fifo.pb-c.h"
#include "images/fdinfo.pb-c.h"
#include "images/pipe-data.pb-c.h"
#include "images/pstree.pb-c.h"
#include "images/sa.pb-c.h"
#include "images/sk-unix.pb-c.h"
#include "images/sk-inet.pb-c.h"
#include "images/packet-sock.pb-c.h"
#include "images/sk-packet.pb-c.h"
#include "images/creds.pb-c.h"
#include "images/timer.pb-c.h"
#include "images/utsns.pb-c.h"
#include "images/timens.pb-c.h"
#include "images/pidns.pb-c.h"
#include "images/ipc-var.pb-c.h"
#include "images/ipc-shm.pb-c.h"
#include "images/ipc-msg.pb-c.h"
#include "images/ipc-sem.pb-c.h"
#include "images/fs.pb-c.h"
#include "images/remap-file-path.pb-c.h"
#include "images/ghost-file.pb-c.h"
#include "images/mnt.pb-c.h"
#include "images/netdev.pb-c.h"
#include "images/tcp-stream.pb-c.h"
#include "images/tty.pb-c.h"
#include "images/file-lock.pb-c.h"
#include "images/rlimit.pb-c.h"
#include "images/pagemap.pb-c.h"
#include "images/siginfo.pb-c.h"
#include "images/sk-netlink.pb-c.h"
#include "images/vma.pb-c.h"
#include "images/tun.pb-c.h"
#include "images/cgroup.pb-c.h"
#include "images/timerfd.pb-c.h"
#include "images/cpuinfo.pb-c.h"
#include "images/userns.pb-c.h"
#include "images/seccomp.pb-c.h"
#include "images/binfmt-misc.pb-c.h"
#include "images/autofs.pb-c.h"
#include "images/img-streamer.pb-c.h"
#include "images/bpfmap-file.pb-c.h"
#include "images/bpfmap-data.pb-c.h"
#include "images/apparmor.pb-c.h"
struct cr_pb_message_desc cr_pb_descs[PB_MAX];
#define CR_PB_DESC(__type, __vtype, __ftype) CR_PB_MDESC_INIT(cr_pb_descs[PB_##__type], __vtype##Entry, __ftype##_entry)
#define PB_PACK_TYPECHECK(__o, __fn) \
({ \
if (0) \
__fn##__pack(__o, NULL); \
(pb_pack_t) & __fn##__pack; \
})
#define PB_GPS_TYPECHECK(__o, __fn) \
({ \
if (0) \
__fn##__get_packed_size(__o); \
(pb_getpksize_t) & __fn##__get_packed_size; \
})
#define PB_UNPACK_TYPECHECK(__op, __fn) \
({ \
if (0) \
*__op = __fn##__unpack(NULL, 0, NULL); \
(pb_unpack_t) & __fn##__unpack; \
})
#define PB_FREE_TYPECHECK(__o, __fn) \
({ \
if (0) \
__fn##__free_unpacked(__o, NULL); \
(pb_free_t) & __fn##__free_unpacked; \
})
/*
* This should be explicitly "called" to do type-checking
*/
#define CR_PB_MDESC_INIT(__var, __type, __name) \
do { \
__var.getpksize = PB_GPS_TYPECHECK((__type *)NULL, __name); \
__var.pack = PB_PACK_TYPECHECK((__type *)NULL, __name); \
__var.unpack = PB_UNPACK_TYPECHECK((__type **)NULL, __name); \
__var.free = PB_FREE_TYPECHECK((__type *)NULL, __name); \
__var.pb_desc = &__name##__descriptor; \
} while (0)
void cr_pb_init(void)
{
CR_PB_DESC(IDS, TaskKobjIds, task_kobj_ids);
CR_PB_DESC(SIGACT, Sa, sa);
CR_PB_DESC(SK_QUEUES, SkPacket, sk_packet);
CR_PB_MDESC_INIT(cr_pb_descs[PB_IPCNS_MSG], IpcMsg, ipc_msg);
CR_PB_DESC(IPCNS_MSG_ENT, IpcMsg, ipc_msg);
CR_PB_DESC(REMAP_FPATH, RemapFilePath, remap_file_path);
CR_PB_DESC(NETDEV, NetDevice, net_device);
CR_PB_MDESC_INIT(cr_pb_descs[PB_PAGEMAP_HEAD], PagemapHead, pagemap_head);
#include "protobuf-desc-gen.h"
}
| 4,202 | 32.094488 | 120 |
c
|
criu
|
criu-master/criu/protobuf.c
|
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <ctype.h>
#include <google/protobuf-c/protobuf-c.h>
#include "image.h"
#include "servicefd.h"
#include "common/compiler.h"
#include "log.h"
#include "rst-malloc.h"
#include "string.h"
#include "sockets.h"
#include "cr_options.h"
#include "bfd.h"
#include "protobuf.h"
#include "util.h"
#define image_name(img, buf) __image_name(img, buf, sizeof(buf))
static char *__image_name(struct cr_img *img, char *image_path, size_t image_path_size)
{
int fd = img->_x.fd;
if (lazy_image(img))
return img->path;
else if (empty_image(img))
return "(empty-image)";
else if (fd >= 0 && read_fd_link(fd, image_path, image_path_size) > 0)
return image_path;
return NULL;
}
/*
* Reads PB record (header + packed object) from file @fd and unpack
* it with @unpack procedure to the pointer @pobj
*
* 1 on success
* -1 on error (or EOF met and @eof set to false)
* 0 on EOF and @eof set to true
*
* Don't forget to free memory granted to unpacked object in calling code if needed
*/
int do_pb_read_one(struct cr_img *img, void **pobj, int type, bool eof)
{
char img_name_buf[PATH_MAX];
u8 local[PB_PKOBJ_LOCAL_SIZE];
void *buf = (void *)&local;
u32 size;
int ret;
if (!cr_pb_descs[type].pb_desc) {
pr_err("Wrong object requested %d on %s\n", type, image_name(img, img_name_buf));
return -1;
}
*pobj = NULL;
if (unlikely(empty_image(img)))
ret = 0;
else
ret = bread(&img->_x, &size, sizeof(size));
if (ret == 0) {
if (eof) {
return 0;
} else {
pr_err("Unexpected EOF on %s\n", image_name(img, img_name_buf));
return -1;
}
} else if (ret < sizeof(size)) {
pr_perror("Read %d bytes while %d expected on %s", ret, (int)sizeof(size),
image_name(img, img_name_buf));
return -1;
}
if (size > sizeof(local)) {
ret = -1;
buf = xmalloc(size);
if (!buf)
goto err;
}
ret = bread(&img->_x, buf, size);
if (ret < 0) {
pr_perror("Can't read %d bytes from file %s", size, image_name(img, img_name_buf));
goto err;
} else if (ret != size) {
pr_perror("Read %d bytes while %d expected from %s", ret, size, image_name(img, img_name_buf));
ret = -1;
goto err;
}
*pobj = cr_pb_descs[type].unpack(NULL, size, buf);
if (!*pobj) {
ret = -1;
pr_err("Failed unpacking object %p from %s\n", pobj, image_name(img, img_name_buf));
goto err;
}
ret = 1;
err:
if (buf != (void *)&local)
xfree(buf);
return ret;
}
/*
* Writes PB record (header + packed object pointed by @obj)
* to file @fd, using @getpksize to get packed size and @pack
* to implement packing
*
* 0 on success
* -1 on error
*/
int pb_write_one(struct cr_img *img, void *obj, int type)
{
u8 local[PB_PKOBJ_LOCAL_SIZE];
void *buf = (void *)&local;
u32 size, packed;
int ret = -1;
struct iovec iov[2];
if (!cr_pb_descs[type].pb_desc) {
pr_err("Wrong object requested %d\n", type);
return -1;
}
if (lazy_image(img) && open_image_lazy(img))
return -1;
size = cr_pb_descs[type].getpksize(obj);
if (size > (u32)sizeof(local)) {
buf = xmalloc(size);
if (!buf)
goto err;
}
packed = cr_pb_descs[type].pack(obj, buf);
if (packed != size) {
pr_err("Failed packing PB object %p\n", obj);
goto err;
}
iov[0].iov_base = &size;
iov[0].iov_len = sizeof(size);
iov[1].iov_base = buf;
iov[1].iov_len = size;
ret = bwritev(&img->_x, iov, 2);
if (ret != size + sizeof(size)) {
pr_perror("Can't write %d bytes", (int)(size + sizeof(size)));
goto err;
}
ret = 0;
err:
if (buf != (void *)&local)
xfree(buf);
return ret;
}
int collect_entry(ProtobufCMessage *msg, struct collect_image_info *cinfo)
{
void *obj;
void *(*o_alloc)(size_t size) = malloc;
void (*o_free)(void *ptr) = free;
if (cinfo->flags & COLLECT_SHARED) {
o_alloc = shmalloc;
o_free = shfree_last;
}
if (cinfo->priv_size) {
obj = o_alloc(cinfo->priv_size);
if (!obj)
return -1;
} else
obj = NULL;
cinfo->flags |= COLLECT_HAPPENED;
if (cinfo->collect(obj, msg, NULL) < 0) {
o_free(obj);
cr_pb_descs[cinfo->pb_type].free(msg, NULL);
return -1;
}
if (!cinfo->priv_size && !(cinfo->flags & COLLECT_NOFREE))
cr_pb_descs[cinfo->pb_type].free(msg, NULL);
return 0;
}
int collect_image(struct collect_image_info *cinfo)
{
int ret;
struct cr_img *img;
void *(*o_alloc)(size_t size) = malloc;
void (*o_free)(void *ptr) = free;
pr_info("Collecting %d/%d (flags %x)\n", cinfo->fd_type, cinfo->pb_type, cinfo->flags);
img = open_image(cinfo->fd_type, O_RSTR);
if (!img)
return -1;
if (cinfo->flags & COLLECT_SHARED) {
o_alloc = shmalloc;
o_free = shfree_last;
}
while (1) {
void *obj;
ProtobufCMessage *msg;
if (cinfo->priv_size) {
ret = -1;
obj = o_alloc(cinfo->priv_size);
if (!obj)
break;
} else
obj = NULL;
ret = pb_read_one_eof(img, &msg, cinfo->pb_type);
if (ret <= 0) {
o_free(obj);
break;
}
cinfo->flags |= COLLECT_HAPPENED;
ret = cinfo->collect(obj, msg, img);
if (ret < 0) {
o_free(obj);
cr_pb_descs[cinfo->pb_type].free(msg, NULL);
break;
}
if (!cinfo->priv_size && !(cinfo->flags & COLLECT_NOFREE))
cr_pb_descs[cinfo->pb_type].free(msg, NULL);
}
close_image(img);
pr_debug(" `- ... done\n");
return ret;
}
| 5,308 | 20.151394 | 97 |
c
|
criu
|
criu-master/criu/rbtree.c
|
/*
* RBtree implementation adopted from the Linux kernel sources.
*/
#include <sys/types.h>
#include "rbtree.h"
static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
{
struct rb_node *right = node->rb_right;
struct rb_node *parent = rb_parent(node);
node->rb_right = right->rb_left;
if (node->rb_right)
rb_set_parent(right->rb_left, node);
right->rb_left = node;
rb_set_parent(right, parent);
if (parent) {
if (node == parent->rb_left)
parent->rb_left = right;
else
parent->rb_right = right;
} else
root->rb_node = right;
rb_set_parent(node, right);
}
static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
{
struct rb_node *left = node->rb_left;
struct rb_node *parent = rb_parent(node);
node->rb_left = left->rb_right;
if (node->rb_left)
rb_set_parent(left->rb_right, node);
left->rb_right = node;
rb_set_parent(left, parent);
if (parent) {
if (node == parent->rb_right)
parent->rb_right = left;
else
parent->rb_left = left;
} else
root->rb_node = left;
rb_set_parent(node, left);
}
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
struct rb_node *parent, *gparent;
while ((parent = rb_parent(node)) && rb_is_red(parent)) {
gparent = rb_parent(parent);
if (parent == gparent->rb_left) {
{
register struct rb_node *uncle = gparent->rb_right;
if (uncle && rb_is_red(uncle)) {
rb_set_black(uncle);
rb_set_black(parent);
rb_set_red(gparent);
node = gparent;
continue;
}
}
if (parent->rb_right == node) {
register struct rb_node *tmp;
__rb_rotate_left(parent, root);
tmp = parent;
parent = node;
node = tmp;
}
rb_set_black(parent);
rb_set_red(gparent);
__rb_rotate_right(gparent, root);
} else {
{
register struct rb_node *uncle = gparent->rb_left;
if (uncle && rb_is_red(uncle)) {
rb_set_black(uncle);
rb_set_black(parent);
rb_set_red(gparent);
node = gparent;
continue;
}
}
if (parent->rb_left == node) {
register struct rb_node *tmp;
__rb_rotate_right(parent, root);
tmp = parent;
parent = node;
node = tmp;
}
rb_set_black(parent);
rb_set_red(gparent);
__rb_rotate_left(gparent, root);
}
}
rb_set_black(root->rb_node);
}
static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, struct rb_root *root)
{
struct rb_node *other;
while ((!node || rb_is_black(node)) && node != root->rb_node) {
if (parent->rb_left == node) {
other = parent->rb_right;
if (rb_is_red(other)) {
rb_set_black(other);
rb_set_red(parent);
__rb_rotate_left(parent, root);
other = parent->rb_right;
}
if ((!other->rb_left || rb_is_black(other->rb_left)) &&
(!other->rb_right || rb_is_black(other->rb_right))) {
rb_set_red(other);
node = parent;
parent = rb_parent(node);
} else {
if (!other->rb_right || rb_is_black(other->rb_right)) {
rb_set_black(other->rb_left);
rb_set_red(other);
__rb_rotate_right(other, root);
other = parent->rb_right;
}
rb_set_color(other, rb_color(parent));
rb_set_black(parent);
rb_set_black(other->rb_right);
__rb_rotate_left(parent, root);
node = root->rb_node;
break;
}
} else {
other = parent->rb_left;
if (rb_is_red(other)) {
rb_set_black(other);
rb_set_red(parent);
__rb_rotate_right(parent, root);
other = parent->rb_left;
}
if ((!other->rb_left || rb_is_black(other->rb_left)) &&
(!other->rb_right || rb_is_black(other->rb_right))) {
rb_set_red(other);
node = parent;
parent = rb_parent(node);
} else {
if (!other->rb_left || rb_is_black(other->rb_left)) {
rb_set_black(other->rb_right);
rb_set_red(other);
__rb_rotate_left(other, root);
other = parent->rb_left;
}
rb_set_color(other, rb_color(parent));
rb_set_black(parent);
rb_set_black(other->rb_left);
__rb_rotate_right(parent, root);
node = root->rb_node;
break;
}
}
}
if (node)
rb_set_black(node);
}
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *child, *parent;
int color;
if (!node->rb_left)
child = node->rb_right;
else if (!node->rb_right)
child = node->rb_left;
else {
struct rb_node *old = node, *left;
node = node->rb_right;
while ((left = node->rb_left))
node = left;
if (rb_parent(old)) {
if (rb_parent(old)->rb_left == old)
rb_parent(old)->rb_left = node;
else
rb_parent(old)->rb_right = node;
} else
root->rb_node = node;
child = node->rb_right;
parent = rb_parent(node);
color = rb_color(node);
if (parent == old) {
parent = node;
} else {
if (child)
rb_set_parent(child, parent);
parent->rb_left = child;
node->rb_right = old->rb_right;
rb_set_parent(old->rb_right, node);
}
node->rb_parent_color = old->rb_parent_color;
node->rb_left = old->rb_left;
rb_set_parent(old->rb_left, node);
goto color;
}
parent = rb_parent(node);
color = rb_color(node);
if (child)
rb_set_parent(child, parent);
if (parent) {
if (parent->rb_left == node)
parent->rb_left = child;
else
parent->rb_right = child;
} else
root->rb_node = child;
color:
if (color == RB_BLACK)
__rb_erase_color(child, parent, root);
}
/*
* This function returns the first node (in sort order) of the tree.
*/
struct rb_node *rb_first(const struct rb_root *root)
{
struct rb_node *n;
n = root->rb_node;
if (!n)
return NULL;
while (n->rb_left)
n = n->rb_left;
return n;
}
struct rb_node *rb_last(const struct rb_root *root)
{
struct rb_node *n;
n = root->rb_node;
if (!n)
return NULL;
while (n->rb_right)
n = n->rb_right;
return n;
}
struct rb_node *rb_next(const struct rb_node *node)
{
struct rb_node *parent;
if (rb_parent(node) == node)
return NULL;
/*
* If we have a right-hand child, go down and
* then left as far as we can.
*/
if (node->rb_right) {
node = node->rb_right;
while (node->rb_left)
node = node->rb_left;
return (struct rb_node *)node;
}
/*
* No right-hand children. Everything down and left is
* smaller than us, so any 'next' node must be in the general
* direction of our parent. Go up the tree; any time the
* ancestor is a right-hand child of its parent, keep going
* up. First time it's a left-hand child of its parent, said
* parent is our 'next' node.
*/
while ((parent = rb_parent(node)) && node == parent->rb_right)
node = parent;
return parent;
}
struct rb_node *rb_prev(const struct rb_node *node)
{
struct rb_node *parent;
if (rb_parent(node) == node)
return NULL;
/*
* If we have a left-hand child, go down and
* then right as far as we can.
*/
if (node->rb_left) {
node = node->rb_left;
while (node->rb_right)
node = node->rb_right;
return (struct rb_node *)node;
}
/*
* No left-hand children. Go up till we find
* an ancestor which is a right-hand child of its parent.
*/
while ((parent = rb_parent(node)) && node == parent->rb_left)
node = parent;
return parent;
}
void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root)
{
struct rb_node *parent = rb_parent(victim);
/* Set the surrounding nodes to point to the replacement */
if (parent) {
if (victim == parent->rb_left)
parent->rb_left = new;
else
parent->rb_right = new;
} else
root->rb_node = new;
if (victim->rb_left)
rb_set_parent(victim->rb_left, new);
if (victim->rb_right)
rb_set_parent(victim->rb_right, new);
/* Copy the pointers/colour from the victim to the replacement */
*new = *victim;
}
| 7,667 | 20.6 | 96 |
c
|
criu
|
criu-master/criu/rst-malloc.c
|
#include <stdio.h>
#include <stdbool.h>
#include <sys/mman.h>
#include "page.h"
#include "rst-malloc.h"
#include "log.h"
#include "common/bug.h"
struct rst_mem_type_s {
bool remapable;
bool enabled;
unsigned long free_bytes;
void *free_mem;
int (*grow)(struct rst_mem_type_s *, unsigned long size);
unsigned long last;
void *buf;
unsigned long size;
};
static inline unsigned long rst_mem_grow(unsigned long need_size)
{
int rst_mem_batch = 2 * page_size();
need_size = round_up(need_size, page_size());
if (likely(need_size < rst_mem_batch))
need_size = rst_mem_batch;
else
pr_debug("Growing rst memory %lu pages\n", need_size / page_size());
return need_size;
}
static int grow_shared(struct rst_mem_type_s *t, unsigned long size)
{
void *aux;
size = rst_mem_grow(size);
/*
* This buffer will not get remapped into
* restorer, thus we can just forget the
* previous chunk location and allocate a
* new one
*/
aux = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
if (aux == MAP_FAILED)
return -1;
t->free_mem = aux;
t->free_bytes = size;
t->last = 0;
return 0;
}
static int grow_remap(struct rst_mem_type_s *t, int flag, unsigned long size)
{
void *aux;
size = rst_mem_grow(size);
if (!t->buf)
/*
* Can't call mremap with NULL address :(
*/
aux = mmap(NULL, size, PROT_READ | PROT_WRITE, flag | MAP_ANONYMOUS, 0, 0);
else {
if (flag & MAP_SHARED) {
/*
* Anon shared memory cannot grow with
* mremap, anon-shmem file size doesn't
* change and memory access generates
* SIGBUS. We should truncate the guy,
* but for now we don't need it.
*/
pr_err("Can't grow RM_SHREMAP memory\n");
return -1;
}
/*
* We'll have to remap all objects into restorer
* address space and get their new addresses. Since
* we allocate many objects as one linear array, it's
* simpler just to grow the buffer and let callers
* find out new array addresses, rather than allocate
* a completely new one and force callers use objects'
* cpos-s.
*/
aux = mremap(t->buf, t->size, t->size + size, MREMAP_MAYMOVE);
}
if (aux == MAP_FAILED)
return -1;
t->free_mem += (aux - t->buf);
t->free_bytes += size;
t->size += size;
t->buf = aux;
return 0;
}
static int grow_shremap(struct rst_mem_type_s *t, unsigned long size)
{
return grow_remap(t, MAP_SHARED, size);
}
static int grow_private(struct rst_mem_type_s *t, unsigned long size)
{
return grow_remap(t, MAP_PRIVATE, size);
}
static struct rst_mem_type_s rst_mems[RST_MEM_TYPES] = {
[RM_SHARED] = {
.grow = grow_shared,
.remapable = false,
.enabled = true,
},
[RM_SHREMAP] = {
.grow = grow_shremap,
.remapable = true,
.enabled = true,
},
[RM_PRIVATE] = {
.grow = grow_private,
.remapable = true,
.enabled = false,
},
};
void rst_mem_switch_to_private(void)
{
rst_mems[RM_SHARED].enabled = false;
rst_mems[RM_SHREMAP].enabled = false;
rst_mems[RM_PRIVATE].enabled = true;
}
void rst_mem_align(int type)
{
struct rst_mem_type_s *t = &rst_mems[type];
void *ptr;
ptr = (void *)round_up((unsigned long)t->free_mem, sizeof(void *));
t->free_bytes -= (ptr - t->free_mem);
t->free_mem = ptr;
}
unsigned long rst_mem_align_cpos(int type)
{
struct rst_mem_type_s *t = &rst_mems[type];
BUG_ON(!t->remapable || !t->enabled);
rst_mem_align(type);
return t->free_mem - t->buf;
}
void *rst_mem_remap_ptr(unsigned long pos, int type)
{
struct rst_mem_type_s *t = &rst_mems[type];
BUG_ON(!t->remapable);
return t->buf + pos;
}
void *rst_mem_alloc(unsigned long size, int type)
{
struct rst_mem_type_s *t = &rst_mems[type];
void *ret;
BUG_ON(!t->enabled);
if ((t->free_bytes < size) && t->grow(t, size)) {
pr_perror("Can't grow rst mem");
return NULL;
}
ret = t->free_mem;
t->free_mem += size;
t->free_bytes -= size;
t->last = size;
return ret;
}
void rst_mem_free_last(int type)
{
struct rst_mem_type_s *t = &rst_mems[type];
BUG_ON(!t->enabled);
t->free_mem -= t->last;
t->free_bytes += t->last;
t->last = 0; /* next free_last would be no-op */
}
unsigned long rst_mem_lock(void)
{
/*
* Don't allow further allocations from rst_mem since we're
* going to get the bootstrap area and remap all the stuff
* into it. The SHREMAP and SHARED should be already locked
* in the rst_mem_switch_to_private().
*/
rst_mems[RM_PRIVATE].enabled = false;
return rst_mems[RM_PRIVATE].size + rst_mems[RM_SHREMAP].size;
}
static int rst_mem_remap_one(struct rst_mem_type_s *t, void *to)
{
void *aux;
BUG_ON(!t->remapable || t->enabled);
if (!t->buf)
/*
* No allocations happened from this buffer.
* It's safe just to do nothing.
*/
return 0;
pr_debug("\tcall mremap(%p, %lu, %lu, MAYMOVE | FIXED, %p)\n", t->buf, t->size, t->size, to);
aux = mremap(t->buf, t->size, t->size, MREMAP_MAYMOVE | MREMAP_FIXED, to);
if (aux == MAP_FAILED) {
pr_perror("Can't mremap rst mem");
return -1;
}
t->buf = aux;
return 0;
}
int rst_mem_remap(void *to)
{
int ret;
ret = rst_mem_remap_one(&rst_mems[RM_PRIVATE], to);
if (!ret) {
to += rst_mems[RM_PRIVATE].size;
ret = rst_mem_remap_one(&rst_mems[RM_SHREMAP], to);
}
return ret;
}
void *shmalloc(size_t bytes)
{
rst_mem_align(RM_SHARED);
return rst_mem_alloc(bytes, RM_SHARED);
}
/* Only last chunk can be released */
void shfree_last(void *ptr)
{
rst_mem_free_last(RM_SHARED);
}
| 5,407 | 20.207843 | 94 |
c
|
criu
|
criu-master/criu/seccomp.c
|
#include <linux/filter.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <ptrace.h>
#include "common/config.h"
#include "imgset.h"
#include "kcmp.h"
#include "pstree.h"
#include <compel/ptrace.h>
#include "proc_parse.h"
#include "restorer.h"
#include "seccomp.h"
#include "servicefd.h"
#include "util.h"
#include "rst-malloc.h"
#include "protobuf.h"
#include "images/seccomp.pb-c.h"
#undef LOG_PREFIX
#define LOG_PREFIX "seccomp: "
static struct rb_root seccomp_tid_rb_root = RB_ROOT;
static struct seccomp_entry *seccomp_tid_entry_root;
static SeccompEntry *seccomp_img_entry;
struct seccomp_entry *seccomp_lookup(pid_t tid_real, bool create, bool mandatory)
{
struct seccomp_entry *entry = NULL;
struct rb_node *node = seccomp_tid_rb_root.rb_node;
struct rb_node **new = &seccomp_tid_rb_root.rb_node;
struct rb_node *parent = NULL;
while (node) {
struct seccomp_entry *this = rb_entry(node, struct seccomp_entry, node);
parent = *new;
if (tid_real < this->tid_real)
node = node->rb_left, new = &((*new)->rb_left);
else if (tid_real > this->tid_real)
node = node->rb_right, new = &((*new)->rb_right);
else
return this;
}
if (create) {
entry = xzalloc(sizeof(*entry));
if (!entry)
return NULL;
rb_init_node(&entry->node);
entry->tid_real = tid_real;
entry->next = seccomp_tid_entry_root, seccomp_tid_entry_root = entry;
rb_link_and_balance(&seccomp_tid_rb_root, &entry->node, parent, new);
} else {
if (mandatory)
pr_err("Can't find entry on tid_real %d\n", tid_real);
}
return entry;
}
int seccomp_collect_entry(pid_t tid_real, unsigned int mode)
{
struct seccomp_entry *entry;
entry = seccomp_lookup(tid_real, true, false);
if (!entry) {
pr_err("Can't create entry on tid_real %d\n", tid_real);
return -1;
}
entry->mode = mode;
pr_debug("Collected tid_real %d mode %#x\n", tid_real, mode);
return 0;
}
static void seccomp_free_chain(struct seccomp_entry *entry)
{
struct seccomp_filter_chain *chain, *prev;
for (chain = entry->chain; chain; chain = prev) {
prev = chain->prev;
xfree(chain->filter.filter.data);
xfree(chain);
}
entry->nr_chains = 0;
entry->chain = NULL;
}
void seccomp_free_entries(void)
{
struct seccomp_entry *entry, *next;
for (entry = seccomp_tid_entry_root; entry; entry = next) {
next = entry->next;
seccomp_free_chain(entry);
xfree(entry);
}
seccomp_tid_rb_root = RB_ROOT;
seccomp_tid_entry_root = NULL;
}
int seccomp_dump_thread(pid_t tid_real, ThreadCoreEntry *thread_core)
{
struct seccomp_entry *entry = seccomp_find_entry(tid_real);
if (!entry) {
pr_err("Can't dump thread core on tid_real %d\n", tid_real);
return -1;
}
if (entry->mode != SECCOMP_MODE_DISABLED) {
thread_core->has_seccomp_mode = true;
thread_core->seccomp_mode = entry->mode;
if (entry->mode == SECCOMP_MODE_FILTER) {
thread_core->has_seccomp_filter = true;
thread_core->seccomp_filter = entry->img_filter_pos;
}
}
return 0;
}
static int collect_filter(struct seccomp_entry *entry)
{
seccomp_metadata_t meta_buf, *meta = &meta_buf;
struct seccomp_filter_chain *chain, *prev;
struct sock_filter buf[BPF_MAXINSNS];
size_t i;
int len;
if (entry->mode != SECCOMP_MODE_FILTER)
return 0;
for (i = 0; true; i++) {
len = ptrace(PTRACE_SECCOMP_GET_FILTER, entry->tid_real, i, buf);
if (len < 0) {
if (errno == ENOENT) {
break;
} else {
pr_perror("Can't fetch filter on tid_real %d i %zu", entry->tid_real, i);
return -1;
}
}
if (meta) {
meta->filter_off = i;
if (ptrace(PTRACE_SECCOMP_GET_METADATA, entry->tid_real, sizeof(*meta), meta) < 0) {
if (errno == EIO) {
/* Old kernel, no METADATA support */
meta = NULL;
} else {
pr_perror("Can't fetch seccomp metadata on tid_real %d pos %zu",
entry->tid_real, i);
return -1;
}
}
}
chain = xzalloc(sizeof(*chain));
if (!chain)
return -1;
seccomp_filter__init(&chain->filter);
chain->filter.has_flags = true;
chain->filter.flags = 0;
chain->filter.filter.len = len * sizeof(struct sock_filter);
chain->filter.filter.data = xmalloc(chain->filter.filter.len);
if (!chain->filter.filter.data) {
xfree(chain);
return -1;
}
memcpy(chain->filter.filter.data, buf, chain->filter.filter.len);
if (meta)
chain->filter.flags |= meta->flags;
prev = entry->chain, entry->chain = chain, chain->prev = prev;
entry->nr_chains++;
}
return 0;
}
/*
* When filter is being set up with SECCOMP_FILTER_FLAG_TSYNC then all
* threads share same filters chain. Still without kernel support we
* don't know if the chains are indeed were propagated by the flag above
* or application installed identical chains manually.
*
* Thus we do a trick: if all threads are sharing chains we just drop
* all ones except on a leader and assign SECCOMP_FILTER_FLAG_TSYNC there.
* The rationale is simple: if application is using tsync it always can
* assign new not-tsync filters after, but in reverse if we don't provide
* tsync on restore the further calls with tsync will fail later.
*
* Proper fix needs some support from kernel side (presumably kcmp mode).
*/
static void try_use_tsync(struct seccomp_entry *leader, struct pstree_item *item)
{
struct seccomp_filter_chain *chain_a, *chain_b;
struct seccomp_entry *entry;
size_t i, j;
if (leader->mode != SECCOMP_MODE_FILTER)
return;
for (i = 0; i < item->nr_threads; i++) {
entry = seccomp_find_entry(item->threads[i].real);
BUG_ON(!entry);
if (entry == leader)
continue;
if (entry->mode != leader->mode || entry->nr_chains != leader->nr_chains)
return;
chain_a = leader->chain;
chain_b = entry->chain;
for (j = 0; j < leader->nr_chains; j++) {
BUG_ON((!chain_a || !chain_b));
if (chain_a->filter.filter.len != chain_b->filter.filter.len)
return;
if (memcmp(chain_a->filter.filter.data, chain_b->filter.filter.data,
chain_a->filter.filter.len))
return;
chain_a = chain_a->prev;
chain_b = chain_b->prev;
}
}
/* OK, so threads can be restored with tsync */
pr_debug("Use SECCOMP_FILTER_FLAG_TSYNC for tid_real %d\n", leader->tid_real);
for (chain_a = leader->chain; chain_a; chain_a = chain_a->prev)
chain_a->filter.flags |= SECCOMP_FILTER_FLAG_TSYNC;
for (i = 0; i < item->nr_threads; i++) {
entry = seccomp_find_entry(item->threads[i].real);
BUG_ON(!entry);
if (entry == leader)
continue;
pr_debug("\t Disable filter on tid_rea %d, will be propagated\n", entry->tid_real);
entry->mode = SECCOMP_MODE_DISABLED;
seccomp_free_chain(entry);
}
}
static int collect_filters(struct pstree_item *item)
{
struct seccomp_entry *leader, *entry;
size_t i;
if (item->pid->state == TASK_DEAD)
return 0;
leader = seccomp_find_entry(item->pid->real);
if (!leader) {
pr_err("Can't collect filter on leader tid_real %d\n", item->pid->real);
return -1;
}
for (i = 0; i < item->nr_threads; i++) {
entry = seccomp_find_entry(item->threads[i].real);
if (!entry) {
pr_err("Can't collect filter on tid_real %d\n", item->pid->real);
return -1;
}
if (collect_filter(entry))
return -1;
}
try_use_tsync(leader, item);
return 0;
}
static int dump_seccomp_filters(void)
{
SeccompEntry se = SECCOMP_ENTRY__INIT;
struct seccomp_filter_chain *chain;
struct seccomp_entry *entry;
size_t img_filter_pos = 0, nr_chains = 0;
struct rb_node *node;
int ret;
for (node = rb_first(&seccomp_tid_rb_root); node; node = rb_next(node)) {
entry = rb_entry(node, struct seccomp_entry, node);
nr_chains += entry->nr_chains;
}
se.n_seccomp_filters = nr_chains;
if (nr_chains) {
se.seccomp_filters = xmalloc(sizeof(*se.seccomp_filters) * nr_chains);
if (!se.seccomp_filters)
return -1;
}
for (node = rb_first(&seccomp_tid_rb_root); node; node = rb_next(node)) {
entry = rb_entry(node, struct seccomp_entry, node);
if (!entry->nr_chains)
continue;
for (chain = entry->chain; chain; chain = chain->prev) {
if (img_filter_pos >= nr_chains) {
pr_err("Unexpected position %zu > %zu\n", img_filter_pos, nr_chains);
xfree(se.seccomp_filters);
return -1;
}
se.seccomp_filters[img_filter_pos] = &chain->filter;
if (chain != entry->chain) {
chain->filter.has_prev = true;
chain->filter.prev = img_filter_pos - 1;
}
img_filter_pos++;
}
entry->img_filter_pos = img_filter_pos - 1;
}
ret = pb_write_one(img_from_set(glob_imgset, CR_FD_SECCOMP), &se, PB_SECCOMP);
xfree(se.seccomp_filters);
for (node = rb_first(&seccomp_tid_rb_root); node; node = rb_next(node)) {
entry = rb_entry(node, struct seccomp_entry, node);
seccomp_free_chain(entry);
}
return ret;
}
int seccomp_collect_dump_filters(void)
{
if (preorder_pstree_traversal(root_item, collect_filters) < 0)
return -1;
if (dump_seccomp_filters())
return -1;
return 0;
}
/* The seccomp_img_entry will be shared between all children */
int seccomp_read_image(void)
{
struct cr_img *img;
int ret;
img = open_image(CR_FD_SECCOMP, O_RSTR);
if (!img)
return -1;
ret = pb_read_one_eof(img, &seccomp_img_entry, PB_SECCOMP);
close_image(img);
if (ret <= 0)
return 0; /* there were no filters */
BUG_ON(!seccomp_img_entry);
return 0;
}
/* seccomp_img_entry will be freed per-children after forking */
static void free_seccomp_filters(void)
{
if (seccomp_img_entry) {
seccomp_entry__free_unpacked(seccomp_img_entry, NULL);
seccomp_img_entry = NULL;
}
}
void seccomp_rst_reloc(struct thread_restore_args *args)
{
size_t j, off;
if (!args->seccomp_filters_n)
return;
args->seccomp_filters = rst_mem_remap_ptr(args->seccomp_filters_pos, RM_PRIVATE);
args->seccomp_filters_data =
(void *)args->seccomp_filters + args->seccomp_filters_n * sizeof(struct thread_seccomp_filter);
for (j = off = 0; j < args->seccomp_filters_n; j++) {
struct thread_seccomp_filter *f = &args->seccomp_filters[j];
f->sock_fprog.filter = args->seccomp_filters_data + off;
off += f->sock_fprog.len * sizeof(struct sock_filter);
}
}
int seccomp_prepare_threads(struct pstree_item *item, struct task_restore_args *ta)
{
struct thread_restore_args *args_array = (struct thread_restore_args *)(&ta[1]);
size_t i, j, nr_filters, filters_size, rst_size, off;
for (i = 0; i < item->nr_threads; i++) {
ThreadCoreEntry *thread_core = item->core[i]->thread_core;
struct thread_restore_args *args = &args_array[i];
SeccompFilter *sf;
args->seccomp_mode = SECCOMP_MODE_DISABLED;
args->seccomp_filters_pos = 0;
args->seccomp_filters_n = 0;
args->seccomp_filters = NULL;
args->seccomp_filters_data = NULL;
if (thread_core->has_seccomp_mode)
args->seccomp_mode = thread_core->seccomp_mode;
if (args->seccomp_mode != SECCOMP_MODE_FILTER)
continue;
if (thread_core->seccomp_filter >= seccomp_img_entry->n_seccomp_filters) {
pr_err("Corrupted filter index on tid %d (%u > %zu)\n", item->threads[i].ns[0].virt,
thread_core->seccomp_filter, seccomp_img_entry->n_seccomp_filters);
return -1;
}
sf = seccomp_img_entry->seccomp_filters[thread_core->seccomp_filter];
if (sf->filter.len % (sizeof(struct sock_filter))) {
pr_err("Corrupted filter len on tid %d (index %u)\n", item->threads[i].ns[0].virt,
thread_core->seccomp_filter);
return -1;
}
filters_size = sf->filter.len;
nr_filters = 1;
while (sf->has_prev) {
if (sf->prev >= seccomp_img_entry->n_seccomp_filters) {
pr_err("Corrupted filter index on tid %d (%u > %zu)\n", item->threads[i].ns[0].virt,
sf->prev, seccomp_img_entry->n_seccomp_filters);
return -1;
}
sf = seccomp_img_entry->seccomp_filters[sf->prev];
if (sf->filter.len % (sizeof(struct sock_filter))) {
pr_err("Corrupted filter len on tid %d (index %u)\n", item->threads[i].ns[0].virt,
sf->prev);
return -1;
}
filters_size += sf->filter.len;
nr_filters++;
}
args->seccomp_filters_n = nr_filters;
rst_size = filters_size + nr_filters * sizeof(struct thread_seccomp_filter);
args->seccomp_filters_pos = rst_mem_align_cpos(RM_PRIVATE);
args->seccomp_filters = rst_mem_alloc(rst_size, RM_PRIVATE);
if (!args->seccomp_filters) {
pr_err("Can't allocate %zu bytes for filters on tid %d\n", rst_size,
item->threads[i].ns[0].virt);
return -ENOMEM;
}
args->seccomp_filters_data =
(void *)args->seccomp_filters + nr_filters * sizeof(struct thread_seccomp_filter);
sf = seccomp_img_entry->seccomp_filters[thread_core->seccomp_filter];
for (j = off = 0; j < nr_filters; j++) {
struct thread_seccomp_filter *f = &args->seccomp_filters[j];
f->sock_fprog.len = sf->filter.len / sizeof(struct sock_filter);
f->sock_fprog.filter = args->seccomp_filters_data + off;
f->flags = sf->flags;
memcpy(f->sock_fprog.filter, sf->filter.data, sf->filter.len);
off += sf->filter.len;
sf = seccomp_img_entry->seccomp_filters[sf->prev];
}
}
free_seccomp_filters();
return 0;
}
| 12,964 | 25.034137 | 97 |
c
|
criu
|
criu-master/criu/seize.c
|
#include <stdbool.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/wait.h>
#include <time.h>
#include "int.h"
#include "common/compiler.h"
#include "cr_options.h"
#include "cr-errno.h"
#include "pstree.h"
#include "criu-log.h"
#include <compel/ptrace.h>
#include "proc_parse.h"
#include "seccomp.h"
#include "seize.h"
#include "stats.h"
#include "string.h"
#include "xmalloc.h"
#include "util.h"
char *task_comm_info(pid_t pid, char *comm, size_t size)
{
bool is_read = false;
if (!pr_quelled(LOG_INFO)) {
int saved_errno = errno;
char path[32];
int fd;
snprintf(path, sizeof(path), "/proc/%d/comm", pid);
fd = open(path, O_RDONLY);
if (fd >= 0) {
ssize_t n = read(fd, comm, size);
if (n > 0) {
is_read = true;
/* Replace '\n' printed by kernel with '\0' */
comm[n - 1] = '\0';
} else {
pr_warn("Failed to read %s: %s\n", path, strerror(errno));
}
close(fd);
} else {
pr_warn("Failed to open %s: %s\n", path, strerror(errno));
}
errno = saved_errno;
}
if (!is_read)
comm[0] = '\0';
return comm;
}
/*
* NOTE: Don't run simultaneously, it uses local static buffer!
*/
char *__task_comm_info(pid_t pid)
{
static char comm[32];
return task_comm_info(pid, comm, sizeof(comm));
}
#define NR_ATTEMPTS 5
static const char frozen[] = "FROZEN";
static const char freezing[] = "FREEZING";
static const char thawed[] = "THAWED";
enum freezer_state { FREEZER_ERROR = -1, THAWED, FROZEN, FREEZING };
/* Track if we are running on cgroup v2 system. */
static bool cgroup_v2 = false;
static enum freezer_state get_freezer_v1_state(int fd)
{
char state[32];
int ret;
BUILD_BUG_ON((sizeof(state) < sizeof(frozen)) || (sizeof(state) < sizeof(freezing)) ||
(sizeof(state) < sizeof(thawed)));
lseek(fd, 0, SEEK_SET);
ret = read(fd, state, sizeof(state) - 1);
if (ret <= 0) {
pr_perror("Unable to get a current state");
goto err;
}
if (state[ret - 1] == '\n')
state[ret - 1] = 0;
else
state[ret] = 0;
pr_debug("freezer.state=%s\n", state);
if (strcmp(state, frozen) == 0)
return FROZEN;
else if (strcmp(state, freezing) == 0)
return FREEZING;
else if (strcmp(state, thawed) == 0)
return THAWED;
pr_err("Unknown freezer state: %s\n", state);
err:
return FREEZER_ERROR;
}
static enum freezer_state get_freezer_v2_state(int fd)
{
int exit_code = FREEZER_ERROR;
char path[PATH_MAX];
FILE *event;
char state;
int ret;
/*
* cgroupv2 freezer uses cgroup.freeze to control the state. The file
* can return 0 or 1. 1 means the cgroup is frozen; 0 means it is not
* frozen. Writing 1 to an unfrozen cgroup can freeze it. Freezing can
* take some time and if the cgroup has finished freezing can be
* seen in cgroup.events: frozen 0|1.
*/
ret = lseek(fd, 0, SEEK_SET);
if (ret < 0) {
pr_perror("Unable to seek freezer FD");
goto out;
}
ret = read(fd, &state, 1);
if (ret <= 0) {
pr_perror("Unable to read from freezer FD");
goto out;
}
pr_debug("cgroup.freeze=%c\n", state);
if (state == '0') {
exit_code = THAWED;
goto out;
}
snprintf(path, sizeof(path), "%s/cgroup.events", opts.freeze_cgroup);
event = fopen(path, "r");
if (event == NULL) {
pr_perror("Unable to open %s", path);
goto out;
}
while (fgets(path, sizeof(path), event)) {
if (strncmp(path, "frozen", 6) != 0) {
continue;
} else if (strncmp(path, "frozen 0", 8) == 0) {
exit_code = FREEZING;
goto close;
} else if (strncmp(path, "frozen 1", 8) == 0) {
exit_code = FROZEN;
goto close;
}
}
pr_err("Unknown freezer state: %c\n", state);
close:
fclose(event);
out:
return exit_code;
}
static enum freezer_state get_freezer_state(int fd)
{
if (cgroup_v2)
return get_freezer_v2_state(fd);
return get_freezer_v1_state(fd);
}
static enum freezer_state origin_freezer_state = FREEZER_ERROR;
const char *get_real_freezer_state(void)
{
return origin_freezer_state == THAWED ? thawed : frozen;
}
static int freezer_write_state(int fd, enum freezer_state new_state)
{
char state[32] = { 0 };
int ret;
if (new_state == THAWED) {
if (cgroup_v2)
state[0] = '0';
else if (__strlcpy(state, thawed, sizeof(state)) >= sizeof(state))
return -1;
} else if (new_state == FROZEN) {
if (cgroup_v2)
state[0] = '1';
else if (__strlcpy(state, frozen, sizeof(state)) >= sizeof(state))
return -1;
} else {
return -1;
}
ret = lseek(fd, 0, SEEK_SET);
if (ret < 0) {
pr_perror("Unable to seek freezer FD");
return -1;
}
if (write(fd, state, sizeof(state)) != sizeof(state)) {
pr_perror("Unable to %s tasks", (new_state == THAWED) ? "thaw" : "freeze");
return -1;
}
return 0;
}
static int freezer_open(void)
{
const char freezer_v1[] = "freezer.state";
const char freezer_v2[] = "cgroup.freeze";
char path[PATH_MAX];
int fd;
snprintf(path, sizeof(path), "%s/%s", opts.freeze_cgroup, cgroup_v2 ? freezer_v2 : freezer_v1);
fd = open(path, O_RDWR);
if (fd < 0) {
pr_perror("Unable to open %s", path);
return -1;
}
return fd;
}
static int freezer_restore_state(void)
{
int fd;
int ret;
if (!opts.freeze_cgroup || origin_freezer_state != FROZEN)
return 0;
fd = freezer_open();
if (fd < 0)
return -1;
ret = freezer_write_state(fd, FROZEN);
close(fd);
return ret;
}
static FILE *freezer_open_thread_list(char *root_path)
{
char path[PATH_MAX];
FILE *f;
snprintf(path, sizeof(path), "%s/%s", root_path, cgroup_v2 ? "cgroup.threads" : "tasks");
f = fopen(path, "r");
if (f == NULL) {
pr_perror("Unable to open %s", path);
return NULL;
}
return f;
}
/* A number of tasks in a freezer cgroup which are not going to be dumped */
static int processes_to_wait;
static pid_t *processes_to_wait_pids;
static int seize_cgroup_tree(char *root_path, enum freezer_state state)
{
DIR *dir;
struct dirent *de;
char path[PATH_MAX];
FILE *f;
/*
* New tasks can appear while a freezer state isn't
* frozen, so we need to catch all new tasks.
*/
f = freezer_open_thread_list(root_path);
if (f == NULL)
return -1;
while (fgets(path, sizeof(path), f)) {
pid_t pid;
int ret;
pid = atoi(path);
/* Here we are going to skip tasks which are already traced. */
ret = ptrace(PTRACE_INTERRUPT, pid, NULL, NULL);
if (ret == 0)
continue;
if (errno != ESRCH) {
pr_perror("Unexpected error for pid %d (comm %s)", pid, __task_comm_info(pid));
fclose(f);
return -1;
}
if (!compel_interrupt_task(pid)) {
pr_debug("SEIZE %d (comm %s): success\n", pid, __task_comm_info(pid));
processes_to_wait++;
} else if (state == FROZEN) {
char buf[] = "/proc/XXXXXXXXXX/exe";
struct stat st;
/* skip kernel threads */
snprintf(buf, sizeof(buf), "/proc/%d/exe", pid);
if (stat(buf, &st) == -1 && errno == ENOENT)
continue;
/*
* fails when meets a zombie, or exiting process:
* there is a small race in a kernel -- the process
* may start exiting and we are trying to freeze it
* before it compete exit procedure. The caller simply
* should wait a bit and try freezing again.
*/
pr_err("zombie %d (comm %s) found while seizing\n", pid, __task_comm_info(pid));
fclose(f);
return -EAGAIN;
}
}
fclose(f);
dir = opendir(root_path);
if (!dir) {
pr_perror("Unable to open %s", root_path);
return -1;
}
while ((de = readdir(dir))) {
struct stat st;
int ret;
if (dir_dots(de))
continue;
sprintf(path, "%s/%s", root_path, de->d_name);
if (fstatat(dirfd(dir), de->d_name, &st, 0) < 0) {
pr_perror("stat of %s failed", path);
closedir(dir);
return -1;
}
if (!S_ISDIR(st.st_mode))
continue;
ret = seize_cgroup_tree(path, state);
if (ret < 0) {
closedir(dir);
return ret;
}
}
closedir(dir);
return 0;
}
/*
* A freezer cgroup can contain tasks which will not be dumped
* and we need to wait them, because the are interrupted them by ptrace.
*/
static int freezer_wait_processes(void)
{
int i;
processes_to_wait_pids = xmalloc(sizeof(pid_t) * processes_to_wait);
if (processes_to_wait_pids == NULL)
return -1;
for (i = 0; i < processes_to_wait; i++) {
int status;
pid_t pid;
/*
* Here we are going to skip tasks which are already traced.
* Ptraced tasks looks like children for us, so if
* a task isn't ptraced yet, waitpid() will return a error.
*/
pid = waitpid(-1, &status, 0);
if (pid < 0) {
pr_perror("Unable to wait processes");
xfree(processes_to_wait_pids);
processes_to_wait_pids = NULL;
return -1;
}
pr_warn("Unexpected process %d in the freezer cgroup (status 0x%x)\n", pid, status);
processes_to_wait_pids[i] = pid;
}
return 0;
}
static int freezer_detach(void)
{
int i;
if (!opts.freeze_cgroup)
return 0;
for (i = 0; i < processes_to_wait && processes_to_wait_pids; i++) {
pid_t pid = processes_to_wait_pids[i];
int status, save_errno;
if (ptrace(PTRACE_DETACH, pid, NULL, NULL) == 0)
continue;
save_errno = errno;
/* A process may be killed by SIGKILL */
if (wait4(pid, &status, __WALL, NULL) == pid) {
pr_warn("The %d process returned 0x %x\n", pid, status);
continue;
}
errno = save_errno;
pr_perror("Unable to detach from %d", pid);
}
return 0;
}
static int log_unfrozen_stacks(char *root)
{
DIR *dir;
struct dirent *de;
char path[PATH_MAX];
FILE *f;
f = freezer_open_thread_list(root);
if (f == NULL)
return -1;
while (fgets(path, sizeof(path), f)) {
pid_t pid;
int ret, stack;
char stackbuf[2048];
pid = atoi(path);
stack = open_proc(pid, "stack");
if (stack < 0) {
pr_err("`- couldn't log %d's stack\n", pid);
fclose(f);
return -1;
}
ret = read(stack, stackbuf, sizeof(stackbuf) - 1);
close(stack);
if (ret < 0) {
pr_perror("couldn't read %d's stack", pid);
fclose(f);
return -1;
}
stackbuf[ret] = '\0';
pr_debug("Task %d has stack:\n%s", pid, stackbuf);
}
fclose(f);
dir = opendir(root);
if (!dir) {
pr_perror("Unable to open %s", root);
return -1;
}
while ((de = readdir(dir))) {
struct stat st;
if (dir_dots(de))
continue;
sprintf(path, "%s/%s", root, de->d_name);
if (fstatat(dirfd(dir), de->d_name, &st, 0) < 0) {
pr_perror("stat of %s failed", path);
closedir(dir);
return -1;
}
if (!S_ISDIR(st.st_mode))
continue;
if (log_unfrozen_stacks(path) < 0) {
closedir(dir);
return -1;
}
}
closedir(dir);
return 0;
}
static int freeze_processes(void)
{
int fd, exit_code = -1;
enum freezer_state state = THAWED;
static const unsigned long step_ms = 100;
unsigned long nr_attempts = (opts.timeout * 1000000) / step_ms;
unsigned long i = 0;
const struct timespec req = {
.tv_nsec = step_ms * 1000000,
.tv_sec = 0,
};
if (unlikely(!nr_attempts)) {
/*
* If timeout is turned off, lets
* wait for at least 10 seconds.
*/
nr_attempts = (10 * 1000000) / step_ms;
}
pr_debug("freezing processes: %lu attempts with %lu ms steps\n", nr_attempts, step_ms);
fd = freezer_open();
if (fd < 0)
return -1;
state = get_freezer_state(fd);
if (state == FREEZER_ERROR) {
close(fd);
return -1;
}
origin_freezer_state = state == FREEZING ? FROZEN : state;
if (state == THAWED) {
if (freezer_write_state(fd, FROZEN)) {
close(fd);
return -1;
}
/*
* Wait the freezer to complete before
* processing tasks. They might be exiting
* before freezing complete so we should
* not read @tasks pids while freezer in
* transition stage.
*/
for (; i <= nr_attempts; i++) {
state = get_freezer_state(fd);
if (state == FREEZER_ERROR) {
close(fd);
return -1;
}
if (state == FROZEN)
break;
if (alarm_timeouted())
goto err;
nanosleep(&req, NULL);
}
if (i > nr_attempts) {
pr_err("Unable to freeze cgroup %s\n", opts.freeze_cgroup);
if (!pr_quelled(LOG_DEBUG))
log_unfrozen_stacks(opts.freeze_cgroup);
goto err;
}
pr_debug("freezing processes: %lu attempts done\n", i);
}
/*
* Pay attention on @i variable -- it's continuation.
*/
for (; i <= nr_attempts; i++) {
exit_code = seize_cgroup_tree(opts.freeze_cgroup, state);
if (exit_code == -EAGAIN) {
if (alarm_timeouted())
goto err;
nanosleep(&req, NULL);
} else
break;
}
err:
if (exit_code == 0 || origin_freezer_state == THAWED) {
if (freezer_write_state(fd, THAWED))
exit_code = -1;
}
if (close(fd)) {
pr_perror("Unable to thaw tasks");
return -1;
}
return exit_code;
}
static inline bool child_collected(struct pstree_item *i, pid_t pid)
{
struct pstree_item *c;
list_for_each_entry(c, &i->children, sibling)
if (c->pid->real == pid)
return true;
return false;
}
static int collect_task(struct pstree_item *item);
static int collect_children(struct pstree_item *item)
{
pid_t *ch;
int ret, i, nr_children, nr_inprogress;
ret = parse_children(item->pid->real, &ch, &nr_children);
if (ret < 0)
return ret;
nr_inprogress = 0;
for (i = 0; i < nr_children; i++) {
struct pstree_item *c;
struct proc_status_creds creds;
pid_t pid = ch[i];
/* Is it already frozen? */
if (child_collected(item, pid))
continue;
nr_inprogress++;
if (alarm_timeouted()) {
ret = -1;
goto free;
}
pr_info("Seized task %d, state %d\n", pid, ret);
c = alloc_pstree_item();
if (c == NULL) {
ret = -1;
goto free;
}
if (!opts.freeze_cgroup)
/* fails when meets a zombie */
__ignore_value(compel_interrupt_task(pid));
ret = compel_wait_task(pid, item->pid->real, parse_pid_status, NULL, &creds.s, NULL);
if (ret < 0) {
/*
* Here is a race window between parse_children() and seize(),
* so the task could die for these time.
* Don't worry, will try again on the next attempt. The number
* of attempts is restricted, so it will exit if something
* really wrong.
*/
ret = 0;
xfree(c);
continue;
}
if (ret == TASK_ZOMBIE)
ret = TASK_DEAD;
else
processes_to_wait--;
if (ret == TASK_STOPPED)
c->pid->stop_signo = compel_parse_stop_signo(pid);
c->pid->real = pid;
c->parent = item;
c->pid->state = ret;
list_add_tail(&c->sibling, &item->children);
ret = seccomp_collect_entry(pid, creds.s.seccomp_mode);
if (ret < 0)
goto free;
/* Here is a recursive call (Depth-first search) */
ret = collect_task(c);
if (ret < 0)
goto free;
}
free:
xfree(ch);
return ret < 0 ? ret : nr_inprogress;
}
static void unseize_task_and_threads(const struct pstree_item *item, int st)
{
int i;
if (item->pid->state == TASK_DEAD)
return;
/*
* The st is the state we want to switch tasks into,
* the item->state is the state task was in when we seized one.
*/
compel_resume_task_sig(item->pid->real, item->pid->state, st, item->pid->stop_signo);
if (st == TASK_DEAD)
return;
for (i = 1; i < item->nr_threads; i++)
if (ptrace(PTRACE_DETACH, item->threads[i].real, NULL, NULL))
pr_perror("Unable to detach from %d", item->threads[i].real);
}
static void pstree_wait(struct pstree_item *root_item)
{
struct pstree_item *item = root_item;
int pid, status, i;
for_each_pstree_item(item) {
if (item->pid->state == TASK_DEAD)
continue;
for (i = 0; i < item->nr_threads; i++) {
pid = wait4(-1, &status, __WALL, NULL);
if (pid < 0) {
pr_perror("wait4 failed");
break;
} else {
if (!WIFSIGNALED(status) || WTERMSIG(status) != SIGKILL) {
pr_err("Unexpected exit code %d of %d: %s\n", status, pid, strsignal(status));
BUG();
}
}
}
}
pid = wait4(-1, &status, __WALL, NULL);
if (pid > 0) {
pr_err("Unexpected child %d\n", pid);
BUG();
}
}
void pstree_switch_state(struct pstree_item *root_item, int st)
{
struct pstree_item *item = root_item;
if (!root_item)
return;
if (st != TASK_DEAD)
freezer_restore_state();
/*
* We need to detach from all processes before waiting the init
* process, because one of these processes may collect processes from a
* target pid namespace. The pid namespace is destroyed only when all
* processes have been killed and collected.
*/
freezer_detach();
pr_info("Unfreezing tasks into %d\n", st);
for_each_pstree_item(item)
unseize_task_and_threads(item, st);
if (st == TASK_DEAD)
pstree_wait(root_item);
}
static pid_t item_ppid(const struct pstree_item *item)
{
item = item->parent;
return item ? item->pid->real : -1;
}
static inline bool thread_collected(struct pstree_item *i, pid_t tid)
{
int t;
if (i->pid->real == tid) /* thread leader is collected as task */
return true;
for (t = 0; t < i->nr_threads; t++)
if (tid == i->threads[t].real)
return true;
return false;
}
static int collect_threads(struct pstree_item *item)
{
struct seccomp_entry *task_seccomp_entry;
struct pid *threads = NULL;
struct pid *tmp = NULL;
int nr_threads = 0, i = 0, ret, nr_inprogress, nr_stopped = 0;
task_seccomp_entry = seccomp_find_entry(item->pid->real);
if (!task_seccomp_entry)
goto err;
ret = parse_threads(item->pid->real, &threads, &nr_threads);
if (ret < 0)
goto err;
if ((item->pid->state == TASK_DEAD) && (nr_threads > 1)) {
pr_err("Zombies with threads are not supported\n");
goto err;
}
/* The number of threads can't be less than already frozen */
tmp = xrealloc(item->threads, nr_threads * sizeof(struct pid));
if (tmp == NULL)
goto err;
item->threads = tmp;
if (item->nr_threads == 0) {
item->threads[0].real = item->pid->real;
item->nr_threads = 1;
item->threads[0].item = NULL;
}
nr_inprogress = 0;
for (i = 0; i < nr_threads; i++) {
pid_t pid = threads[i].real;
struct proc_status_creds t_creds = {};
if (thread_collected(item, pid))
continue;
nr_inprogress++;
pr_info("\tSeizing %d's %d thread\n", item->pid->real, pid);
if (!opts.freeze_cgroup && compel_interrupt_task(pid))
continue;
ret = compel_wait_task(pid, item_ppid(item), parse_pid_status, NULL, &t_creds.s, NULL);
if (ret < 0) {
/*
* Here is a race window between parse_threads() and seize(),
* so the task could die for these time.
* Don't worry, will try again on the next attempt. The number
* of attempts is restricted, so it will exit if something
* really wrong.
*/
continue;
}
if (ret == TASK_ZOMBIE)
ret = TASK_DEAD;
else
processes_to_wait--;
BUG_ON(item->nr_threads + 1 > nr_threads);
item->threads[item->nr_threads].real = pid;
item->threads[item->nr_threads].ns[0].virt = t_creds.s.vpid;
item->threads[item->nr_threads].item = NULL;
item->nr_threads++;
if (ret == TASK_DEAD) {
pr_err("Zombie thread not supported\n");
goto err;
}
if (seccomp_collect_entry(pid, t_creds.s.seccomp_mode))
goto err;
if (ret == TASK_STOPPED) {
nr_stopped++;
}
}
if (nr_stopped && nr_stopped != nr_inprogress) {
pr_err("Individually stopped threads not supported\n");
goto err;
}
xfree(threads);
return nr_inprogress;
err:
xfree(threads);
return -1;
}
static int collect_loop(struct pstree_item *item, int (*collect)(struct pstree_item *))
{
int attempts = NR_ATTEMPTS, nr_inprogress = 1;
if (opts.freeze_cgroup)
attempts = 1;
/*
* While we scan the proc and seize the children/threads
* new ones can appear (with clone(CLONE_PARENT) or with
* pthread_create). Thus, after one go, we need to repeat
* the scan-and-freeze again collecting new arrivals. As
* new guys may appear again we do NR_ATTEMPTS passes and
* fail to seize the item if new tasks/threads still
* appear.
*/
while (nr_inprogress > 0 && attempts >= 0) {
attempts--;
nr_inprogress = collect(item);
}
pr_info("Collected (%d attempts, %d in_progress)\n", attempts, nr_inprogress);
/*
* We may fail to collect items or run out of attempts.
* In the former case nr_inprogress will be negative, in
* the latter -- positive. Thus it's enough just to check
* for "no more new stuff" and say "we're OK" if so.
*/
return (nr_inprogress == 0) ? 0 : -1;
}
static int collect_task(struct pstree_item *item)
{
int ret;
ret = collect_loop(item, collect_threads);
if (ret < 0)
goto err_close;
/* Depth-first search (DFS) is used for traversing a process tree. */
ret = collect_loop(item, collect_children);
if (ret < 0)
goto err_close;
if ((item->pid->state == TASK_DEAD) && !list_empty(&item->children)) {
pr_err("Zombie with children?! O_o Run, run, run!\n");
goto err_close;
}
if (pstree_alloc_cores(item))
goto err_close;
pr_info("Collected %d in %d state\n", item->pid->real, item->pid->state);
return 0;
err_close:
close_pid_proc();
return -1;
}
static int cgroup_version(void)
{
char path[PATH_MAX];
snprintf(path, sizeof(path), "%s/freezer.state", opts.freeze_cgroup);
if (access(path, F_OK) == 0) {
cgroup_v2 = false;
return 0;
}
snprintf(path, sizeof(path), "%s/cgroup.freeze", opts.freeze_cgroup);
if (access(path, F_OK) == 0) {
cgroup_v2 = true;
return 0;
}
pr_err("Neither a cgroupv1 (freezer.state) or cgroupv2 (cgroup.freeze) control file found.\n");
return -1;
}
int collect_pstree(void)
{
pid_t pid = root_item->pid->real;
int ret = -1;
struct proc_status_creds creds;
timing_start(TIME_FREEZING);
/*
* wait4() may hang for some reason. Enable timer and fire SIGALRM
* if timeout reached. SIGALRM handler will do the necessary
* cleanups and terminate current process.
*/
alarm(opts.timeout);
if (opts.freeze_cgroup && cgroup_version())
goto err;
pr_debug("Detected cgroup V%d freezer\n", cgroup_v2 ? 2 : 1);
if (opts.freeze_cgroup && freeze_processes())
goto err;
if (!opts.freeze_cgroup && compel_interrupt_task(pid)) {
set_cr_errno(ESRCH);
goto err;
}
ret = compel_wait_task(pid, -1, parse_pid_status, NULL, &creds.s, NULL);
if (ret < 0)
goto err;
if (ret == TASK_ZOMBIE)
ret = TASK_DEAD;
else
processes_to_wait--;
if (ret == TASK_STOPPED)
root_item->pid->stop_signo = compel_parse_stop_signo(pid);
pr_info("Seized task %d, state %d\n", pid, ret);
root_item->pid->state = ret;
ret = seccomp_collect_entry(pid, creds.s.seccomp_mode);
if (ret < 0)
goto err;
ret = collect_task(root_item);
if (ret < 0)
goto err;
if (opts.freeze_cgroup && freezer_wait_processes()) {
ret = -1;
goto err;
}
ret = 0;
timing_stop(TIME_FREEZING);
timing_start(TIME_FROZEN);
err:
/* Freezing stage finished in time - disable timer. */
alarm(0);
return ret;
}
| 22,410 | 20.779397 | 96 |
c
|
criu
|
criu-master/criu/servicefd.c
|
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <sched.h>
#include <sys/time.h>
#include <sys/syscall.h>
#include <sys/resource.h>
#include "common/compiler.h"
#include "common/list.h"
#include "util.h"
#include "bitops.h"
#include "pstree.h"
#include "files.h"
#include "rst_info.h"
#include "servicefd.h"
#undef LOG_PREFIX
#define LOG_PREFIX "sfd: "
/* Max potentially possible fd to be open by criu process */
int service_fd_rlim_cur;
/* Base of current process service fds set */
static int service_fd_base;
/* Id of current process in shared fdt */
static int service_fd_id = 0;
static DECLARE_BITMAP(sfd_map, SERVICE_FD_MAX);
static int sfd_arr[SERVICE_FD_MAX];
/*
* Variable for marking areas of code, where service fds modifications
* are prohibited. It's used to safe them from reusing their numbers
* by ordinary files. See install_service_fd() and close_service_fd().
*/
bool sfds_protected = false;
const char *sfd_type_name(enum sfd_type type)
{
static const char *names[] = {
[SERVICE_FD_MIN] = __stringify_1(SERVICE_FD_MIN),
[LOG_FD_OFF] = __stringify_1(LOG_FD_OFF),
[IMG_FD_OFF] = __stringify_1(IMG_FD_OFF),
[PROC_FD_OFF] = __stringify_1(PROC_FD_OFF),
[PROC_PID_FD_OFF] = __stringify_1(PROC_PID_FD_OFF),
[PROC_SELF_FD_OFF] = __stringify_1(PROC_SELF_FD_OFF),
[CR_PROC_FD_OFF] = __stringify_1(CR_PROC_FD_OFF),
[ROOT_FD_OFF] = __stringify_1(ROOT_FD_OFF),
[CGROUP_YARD] = __stringify_1(CGROUP_YARD),
[USERNSD_SK] = __stringify_1(USERNSD_SK),
[NS_FD_OFF] = __stringify_1(NS_FD_OFF),
[TRANSPORT_FD_OFF] = __stringify_1(TRANSPORT_FD_OFF),
[RPC_SK_OFF] = __stringify_1(RPC_SK_OFF),
[FDSTORE_SK_OFF] = __stringify_1(FDSTORE_SK_OFF),
[SERVICE_FD_MAX] = __stringify_1(SERVICE_FD_MAX),
};
if (type < ARRAY_SIZE(names))
return names[type];
return "UNKNOWN";
}
int init_service_fd(void)
{
struct rlimit64 rlimit;
/*
* Service fd engine implies that file descriptors used won't be
* borrowed by the rest of the code and default 1024 limit is not
* enough for high loaded test/containers. Thus use kdat engine to
* fetch current system level limit for numbers of files allowed to
* open up and lift up own limits.
*
* Note we have to do it before the service fd get initialized and we
* don't exit with errors here because in worst scenario where clash of
* fd happen we simply exit with explicit error during real action
* stage.
*/
rlimit_unlimit_nofile();
/*
* Service FDs are those that most likely won't
* conflict with any 'real-life' ones
*/
if (syscall(__NR_prlimit64, getpid(), RLIMIT_NOFILE, NULL, &rlimit)) {
pr_perror("Can't get rlimit");
return -1;
}
service_fd_rlim_cur = (int)rlimit.rlim_cur;
return 0;
}
static int __get_service_fd(enum sfd_type type, int service_fd_id)
{
return service_fd_base - type - SERVICE_FD_MAX * service_fd_id;
}
int get_service_fd(enum sfd_type type)
{
BUG_ON((int)type <= SERVICE_FD_MIN || (int)type >= SERVICE_FD_MAX);
if (!test_bit(type, sfd_map))
return -1;
if (service_fd_base == 0)
return sfd_arr[type];
return __get_service_fd(type, service_fd_id);
}
bool is_any_service_fd(int fd)
{
int sfd_min_fd = __get_service_fd(SERVICE_FD_MAX, service_fd_id);
int sfd_max_fd = __get_service_fd(SERVICE_FD_MIN, service_fd_id);
if (fd > sfd_min_fd && fd < sfd_max_fd) {
int type = SERVICE_FD_MAX - (fd - sfd_min_fd);
if (type > SERVICE_FD_MIN && type < SERVICE_FD_MAX)
return !!test_bit(type, sfd_map);
}
return false;
}
bool is_service_fd(int fd, enum sfd_type type)
{
return fd == get_service_fd(type);
}
int service_fd_min_fd(struct pstree_item *item)
{
struct fdt *fdt = rsti(item)->fdt;
int id = 0;
if (fdt)
id = fdt->nr - 1;
return service_fd_rlim_cur - (SERVICE_FD_MAX - 1) - SERVICE_FD_MAX * id;
}
static void sfds_protection_bug(enum sfd_type type)
{
pr_err("Service fd %s is being modified in protected context\n", sfd_type_name(type));
print_stack_trace(current ? vpid(current) : 0);
BUG();
}
int install_service_fd(enum sfd_type type, int fd)
{
int sfd = __get_service_fd(type, service_fd_id);
int tmp;
BUG_ON((int)type <= SERVICE_FD_MIN || (int)type >= SERVICE_FD_MAX);
if (sfds_protected && !test_bit(type, sfd_map))
sfds_protection_bug(type);
if (service_fd_base == 0) {
if (test_bit(type, sfd_map))
close(sfd_arr[type]);
sfd_arr[type] = fd;
set_bit(type, sfd_map);
return fd;
}
if (!test_bit(type, sfd_map))
tmp = fcntl(fd, F_DUPFD, sfd);
else
tmp = dup3(fd, sfd, O_CLOEXEC);
if (tmp < 0) {
pr_perror("%s dup %d -> %d failed", sfd_type_name(type), fd, sfd);
close(fd);
return -1;
} else if (tmp != sfd) {
pr_err("%s busy target %d -> %d\n", sfd_type_name(type), fd, sfd);
close(tmp);
close(fd);
return -1;
}
set_bit(type, sfd_map);
close(fd);
return sfd;
}
int close_service_fd(enum sfd_type type)
{
int fd;
if (sfds_protected)
sfds_protection_bug(type);
fd = get_service_fd(type);
if (fd < 0)
return 0;
if (close_safe(&fd))
return -1;
clear_bit(type, sfd_map);
return 0;
}
void __close_service_fd(enum sfd_type type)
{
int fd;
fd = __get_service_fd(type, service_fd_id);
close(fd);
clear_bit(type, sfd_map);
}
static int move_service_fd(struct pstree_item *me, int type, int new_id, int new_base)
{
int old = get_service_fd(type);
int new = new_base - type - SERVICE_FD_MAX *new_id;
int ret;
if (old < 0)
return 0;
if (!test_bit(type, sfd_map))
ret = fcntl(old, F_DUPFD, new);
else
ret = dup2(old, new);
if (ret == -1) {
pr_perror("%s unable to clone %d->%d", sfd_type_name(type), old, new);
return -1;
} else if (ret != new) {
pr_err("%s busy target %d -> %d\n", sfd_type_name(type), old, new);
return -1;
} else if (!(rsti(me)->clone_flags & CLONE_FILES))
close(old);
return 0;
}
static int choose_service_fd_base(struct pstree_item *me)
{
int nr, real_nr, fdt_nr = 1, id = rsti(me)->service_fd_id;
if (rsti(me)->fdt) {
/* The base is set by owner of fdt (id 0) */
if (id != 0)
return service_fd_base;
fdt_nr = rsti(me)->fdt->nr;
}
/* Now find process's max used fd number */
if (!list_empty(&rsti(me)->fds))
nr = list_entry(rsti(me)->fds.prev, struct fdinfo_list_entry, ps_list)->fe->fd;
else
nr = -1;
nr = max(nr, inh_fd_max);
/*
* Service fds go after max fd near right border of alignment:
*
* ...|max_fd|max_fd+1|...|sfd first|...|sfd last (aligned)|
*
* So, they take maximum numbers of area allocated by kernel.
* See linux alloc_fdtable() for details.
*/
nr += (SERVICE_FD_MAX - SERVICE_FD_MIN) * fdt_nr;
nr += 16; /* Safety pad */
real_nr = nr;
nr /= (1024 / sizeof(void *));
if (nr)
nr = 1 << (32 - __builtin_clz(nr));
else
nr = 1;
nr *= (1024 / sizeof(void *));
if (nr > service_fd_rlim_cur) {
/* Right border is bigger, than rlim. OK, then just aligned value is enough */
nr = round_down(service_fd_rlim_cur, (1024 / sizeof(void *)));
if (nr < real_nr) {
pr_err("Can't chose service_fd_base: %d %d\n", nr, real_nr);
return -1;
}
}
return nr;
}
int clone_service_fd(struct pstree_item *me)
{
int id, new_base, i, ret = -1;
new_base = choose_service_fd_base(me);
id = rsti(me)->service_fd_id;
if (new_base == -1)
return -1;
if (get_service_fd(LOG_FD_OFF) == new_base - LOG_FD_OFF - SERVICE_FD_MAX * id)
return 0;
/* Dup sfds in memmove() style: they may overlap */
if (get_service_fd(LOG_FD_OFF) < new_base - LOG_FD_OFF - SERVICE_FD_MAX * id)
for (i = SERVICE_FD_MIN + 1; i < SERVICE_FD_MAX; i++)
move_service_fd(me, i, id, new_base);
else
for (i = SERVICE_FD_MAX - 1; i > SERVICE_FD_MIN; i--)
move_service_fd(me, i, id, new_base);
service_fd_base = new_base;
service_fd_id = id;
ret = 0;
return ret;
}
| 7,734 | 23.400631 | 87 |
c
|
criu
|
criu-master/criu/shmem.c
|
#include <unistd.h>
#include <sys/mman.h>
#include <stdlib.h>
#include <fcntl.h>
#include <stdbool.h>
#include "common/config.h"
#include "common/list.h"
#include "pid.h"
#include "shmem.h"
#include "image.h"
#include "cr_options.h"
#include "kerndat.h"
#include "stats.h"
#include "page-pipe.h"
#include "page-xfer.h"
#include "rst-malloc.h"
#include "vma.h"
#include "mem.h"
#include <compel/plugins/std/syscall-codes.h>
#include "bitops.h"
#include "log.h"
#include "types.h"
#include "page.h"
#include "util.h"
#include "memfd.h"
#include "protobuf.h"
#include "images/pagemap.pb-c.h"
#include "namespaces.h"
#ifndef SEEK_DATA
#define SEEK_DATA 3
#define SEEK_HOLE 4
#endif
/*
* Hash table and routines for keeping shmid -> shmem_xinfo mappings
*/
/*
* The hash is filled with shared objects before we fork
* any tasks. Thus the heads are private (COW-ed) and the
* entries are all in shmem.
*/
#define SHMEM_HASH_SIZE 32
static struct hlist_head shmems_hash[SHMEM_HASH_SIZE];
#define for_each_shmem(_i, _si) \
for (_i = 0; _i < SHMEM_HASH_SIZE; _i++) \
hlist_for_each_entry(_si, &shmems_hash[_i], h)
struct shmem_info {
struct hlist_node h;
unsigned long shmid;
/*
* Owner PID. This guy creates anon shmem on restore and
* from this the shmem is read on dump
*/
int pid;
unsigned long size;
union {
struct { /* For restore */
/*
* Descriptor by which this shmem is opened
* by the creator
*/
int fd;
/*
* 0. lock is initialized to zero
* 1. the master opens a descriptor and set lock to 1
* 2. slaves open their descriptors and increment lock
* 3. the master waits all slaves on lock. After that
* it can close the descriptor.
*/
futex_t lock;
/*
* Here is a problem, that we don't know, which process will restore
* an region. Each time when we found a process with a smaller pid,
* we reset self_count, so we can't have only one counter.
*/
int count; /* the number of regions */
int self_count; /* the number of regions, which belongs to "pid" */
};
/* For sysvipc restore */
struct {
struct list_head att; /* list of shmem_sysv_att-s */
int want_write;
};
struct { /* For dump */
unsigned long start;
unsigned long end;
unsigned long *pstate_map;
};
};
};
struct shmem_sysv_att {
struct list_head l;
VmaEntry *first;
unsigned long prev_end;
};
/* This is the "pid that will restore shmem" value for sysv */
#define SYSVIPC_SHMEM_PID (-1)
static inline struct hlist_head *shmem_chain(unsigned long shmid)
{
return &shmems_hash[shmid % SHMEM_HASH_SIZE];
}
static void shmem_hash_add(struct shmem_info *si)
{
struct hlist_head *chain;
chain = shmem_chain(si->shmid);
hlist_add_head(&si->h, chain);
}
static struct shmem_info *shmem_find(unsigned long shmid)
{
struct hlist_head *chain;
struct shmem_info *si;
chain = shmem_chain(shmid);
hlist_for_each_entry(si, chain, h)
if (si->shmid == shmid)
return si;
return NULL;
}
#define PST_DONT_DUMP 0
#define PST_DUMP 1
#define PST_ZERO 2
#define PST_DIRTY 3
#define PST_BITS 2
#define PST_BIT0_IX(pfn) ((pfn)*PST_BITS)
#define PST_BIT1_IX(pfn) (PST_BIT0_IX(pfn) + 1)
/*
* Disable pagemap based shmem changes tracking by default
* because it has bugs in implementation -
* process can map shmem page, change it and unmap it.
* We won't observe any changes in such pagemaps during dump.
*/
static bool is_shmem_tracking_en(void)
{
static bool is_inited = false;
static bool is_enabled = false;
if (!is_inited) {
is_enabled = (bool)getenv("CRIU_TRACK_SHMEM");
is_inited = true;
if (is_enabled)
pr_msg("Turn anon shmem tracking on via env\n");
}
return is_enabled;
}
static unsigned int get_pstate(unsigned long *pstate_map, unsigned long pfn)
{
unsigned int bit0 = test_bit(PST_BIT0_IX(pfn), pstate_map) ? 1 : 0;
unsigned int bit1 = test_bit(PST_BIT1_IX(pfn), pstate_map) ? 1 : 0;
return (bit1 << 1) | bit0;
}
static void set_pstate(unsigned long *pstate_map, unsigned long pfn, unsigned int pstate)
{
if (pstate & 1)
set_bit(PST_BIT0_IX(pfn), pstate_map);
if (pstate & 2)
set_bit(PST_BIT1_IX(pfn), pstate_map);
}
static int expand_shmem(struct shmem_info *si, unsigned long new_size)
{
unsigned long nr_pages, nr_map_items, map_size;
unsigned long nr_new_map_items, new_map_size, old_size;
old_size = si->size;
si->size = new_size;
if (!is_shmem_tracking_en())
return 0;
nr_pages = DIV_ROUND_UP(old_size, PAGE_SIZE);
nr_map_items = BITS_TO_LONGS(nr_pages * PST_BITS);
map_size = nr_map_items * sizeof(*si->pstate_map);
nr_pages = DIV_ROUND_UP(new_size, PAGE_SIZE);
nr_new_map_items = BITS_TO_LONGS(nr_pages * PST_BITS);
new_map_size = nr_new_map_items * sizeof(*si->pstate_map);
BUG_ON(new_map_size < map_size);
if (xrealloc_safe(&si->pstate_map, new_map_size))
return -1;
memzero(si->pstate_map + nr_map_items, new_map_size - map_size);
return 0;
}
static void update_shmem_pmaps(struct shmem_info *si, u64 *map, VmaEntry *vma)
{
unsigned long shmem_pfn, vma_pfn, vma_pgcnt;
if (!is_shmem_tracking_en())
return;
vma_pgcnt = DIV_ROUND_UP(si->size - vma->pgoff, PAGE_SIZE);
for (vma_pfn = 0; vma_pfn < vma_pgcnt; ++vma_pfn) {
if (!should_dump_page(vma, map[vma_pfn]))
continue;
shmem_pfn = vma_pfn + DIV_ROUND_UP(vma->pgoff, PAGE_SIZE);
if (map[vma_pfn] & PME_SOFT_DIRTY)
set_pstate(si->pstate_map, shmem_pfn, PST_DIRTY);
else if (page_is_zero(map[vma_pfn]))
set_pstate(si->pstate_map, shmem_pfn, PST_ZERO);
else
set_pstate(si->pstate_map, shmem_pfn, PST_DUMP);
}
}
int collect_sysv_shmem(unsigned long shmid, unsigned long size)
{
struct shmem_info *si;
/*
* Tasks will not modify this object, so don't
* shmalloc() as we do it for anon shared mem
*/
si = xmalloc(sizeof(*si));
if (!si)
return -1;
si->shmid = shmid;
si->pid = SYSVIPC_SHMEM_PID;
si->size = size;
si->want_write = 0;
INIT_LIST_HEAD(&si->att);
shmem_hash_add(si);
pr_info("Collected SysV shmem %lx, size %ld\n", si->shmid, si->size);
return 0;
}
int fixup_sysv_shmems(void)
{
int i;
struct shmem_info *si;
struct shmem_sysv_att *att;
for_each_shmem(i, si)
{
/* It can be anon shmem */
if (si->pid != SYSVIPC_SHMEM_PID)
continue;
list_for_each_entry(att, &si->att, l) {
/*
* Same thing is checked in open_shmem_sysv() for
* intermediate holes.
*/
if (att->first->start + round_up(si->size, page_size()) != att->prev_end) {
pr_err("Sysv shmem %lx with tail hole not supported\n", si->shmid);
return -1;
}
/*
* See comment in open_shmem_sysv() about this PROT_EXEC
*/
if (si->want_write)
att->first->prot |= PROT_EXEC;
}
}
return 0;
}
static int open_shmem_sysv(int pid, struct vma_area *vma)
{
VmaEntry *vme = vma->e;
struct shmem_info *si;
struct shmem_sysv_att *att;
uint64_t ret_fd;
si = shmem_find(vme->shmid);
if (!si) {
pr_err("Can't find sysv shmem for %" PRIx64 "\n", vme->shmid);
return -1;
}
if (si->pid != SYSVIPC_SHMEM_PID) {
pr_err("SysV shmem vma 0x%" PRIx64 " points to anon vma %lx\n", vme->start, si->shmid);
return -1;
}
/*
* We can have a chain of VMAs belonging to the same
* sysv shmem segment all with different access rights
* (ro and rw). But single shmat() system call attaches
* the whole segment regardless of the actual mapping
* size. This can be achieved by attaching a segment
* and then write-protecting its parts.
*
* So, to restore this thing we note the very first
* area of the segment and make it restore the whole
* thing. All the subsequent ones will carry the sign
* telling the restorer to omit shmat and only do the
* ro protection. Yes, it may happen that some sysv
* shmem vma-s sit in the list (and restorer's array)
* for no use.
*
* Holes in between are not handled now, as well as
* the hole at the end (see fixup_sysv_shmems).
*
* One corner case. At shmat() time we need to know
* whether to create the segment rw or ro, but the
* first vma can have different protection. So the
* segment ro-ness is marked with PROT_EXEC bit in
* the first vma. Unfortunately, we only know this
* after we scan all the vmas, so this bit is set
* at the end in fixup_sysv_shmems().
*/
if (vme->pgoff == 0) {
att = xmalloc(sizeof(*att));
if (!att)
return -1;
att->first = vme;
list_add(&att->l, &si->att);
ret_fd = si->shmid;
} else {
att = list_first_entry(&si->att, struct shmem_sysv_att, l);
if (att->prev_end != vme->start) {
pr_err("Sysv shmem %lx with a hole not supported\n", si->shmid);
return -1;
}
if (vme->pgoff != att->prev_end - att->first->start) {
pr_err("Sysv shmem %lx with misordered attach chunks\n", si->shmid);
return -1;
}
/*
* Value that doesn't (shouldn't) match with any real
* sysv shmem ID (thus it cannot be 0, as shmem id can)
* and still is not negative to prevent prepare_vmas() from
* treating it as error.
*/
ret_fd = SYSV_SHMEM_SKIP_FD;
}
pr_info("Note 0x%" PRIx64 "-0x%" PRIx64 " as %lx sysvshmem\n", vme->start, vme->end, si->shmid);
att->prev_end = vme->end;
if (!vme->has_fdflags || vme->fdflags == O_RDWR)
/*
* We can't look at vma->prot & PROT_WRITE as all this stuff
* can be read-protected. If !has_fdflags these are old images
* and ... we have no other choice other than make it with
* maximum access :(
*/
si->want_write = 1;
vme->fd = ret_fd;
return 0;
}
static int open_shmem(int pid, struct vma_area *vma);
int collect_shmem(int pid, struct vma_area *vma)
{
VmaEntry *vi = vma->e;
unsigned long size = vi->pgoff + vi->end - vi->start;
struct shmem_info *si;
if (vma_entry_is(vi, VMA_AREA_SYSVIPC)) {
vma->vm_open = open_shmem_sysv;
return 0;
}
vma->vm_open = open_shmem;
si = shmem_find(vi->shmid);
if (si) {
if (si->pid == SYSVIPC_SHMEM_PID) {
pr_err("Shmem %" PRIx64 " already collected as SYSVIPC\n", vi->shmid);
return -1;
}
if (si->size < size)
si->size = size;
si->count++;
/*
* Only the shared mapping with a lowest
* pid will be created in real, other processes
* will wait until the kernel propagate this mapping
* into /proc
*/
if (!pid_rst_prio(pid, si->pid)) {
if (si->pid == pid)
si->self_count++;
return 0;
}
si->pid = pid;
si->self_count = 1;
return 0;
}
si = shmalloc(sizeof(struct shmem_info));
if (!si)
return -1;
pr_info("Add new shmem 0x%" PRIx64 " (%#016" PRIx64 "-%#016" PRIx64 ")\n", vi->shmid, vi->start, vi->end);
si->shmid = vi->shmid;
si->pid = pid;
si->size = size;
si->fd = -1;
si->count = 1;
si->self_count = 1;
futex_init(&si->lock);
shmem_hash_add(si);
return 0;
}
static int shmem_wait_and_open(struct shmem_info *si, VmaEntry *vi)
{
char path[128];
int ret;
pr_info("Waiting for the %lx shmem to appear\n", si->shmid);
futex_wait_while(&si->lock, 0);
snprintf(path, sizeof(path), "/proc/%d/fd/%d", si->pid, si->fd);
pr_info("Opening shmem [%s] \n", path);
ret = open_proc_rw(si->pid, "fd/%d", si->fd);
futex_inc_and_wake(&si->lock);
if (ret < 0)
return -1;
vi->fd = ret;
return 0;
}
static int do_restore_shmem_content(void *addr, unsigned long size, unsigned long shmid)
{
int ret = 0;
struct page_read pr;
ret = open_page_read(shmid, &pr, PR_SHMEM);
if (ret <= 0)
return -1;
while (1) {
unsigned long vaddr;
unsigned nr_pages;
ret = pr.advance(&pr);
if (ret <= 0)
break;
vaddr = (unsigned long)decode_pointer(pr.pe->vaddr);
nr_pages = pr.pe->nr_pages;
if (vaddr + nr_pages * PAGE_SIZE > size)
break;
pr.read_pages(&pr, vaddr, nr_pages, addr + vaddr, 0);
}
pr.close(&pr);
return ret;
}
int restore_shmem_content(void *addr, struct shmem_info *si)
{
return do_restore_shmem_content(addr, si->size, si->shmid);
}
int restore_sysv_shmem_content(void *addr, unsigned long size, unsigned long shmid)
{
return do_restore_shmem_content(addr, round_up(size, PAGE_SIZE), shmid);
}
int restore_memfd_shmem_content(int fd, unsigned long shmid, unsigned long size)
{
void *addr = NULL;
int ret = 1;
if (size == 0)
return 0;
if (ftruncate(fd, size) < 0) {
pr_perror("Can't resize shmem 0x%lx size=%ld", shmid, size);
goto out;
}
addr = mmap(NULL, size, PROT_WRITE | PROT_READ, MAP_SHARED, fd, 0);
if (addr == MAP_FAILED) {
pr_perror("Can't mmap shmem 0x%lx size=%ld", shmid, size);
goto out;
}
/*
* do_restore_shmem_content needs size to be page aligned.
*/
if (do_restore_shmem_content(addr, round_up(size, PAGE_SIZE), shmid) < 0) {
pr_err("Can't restore shmem content\n");
goto out;
}
ret = 0;
out:
if (addr)
munmap(addr, size);
return ret;
}
struct open_map_file_args {
unsigned long addr, size;
};
static int open_map_file(void *args, int fd, pid_t pid)
{
struct open_map_file_args *vma = args;
return open_proc_rw(pid, "map_files/%lx-%lx", vma->addr, vma->addr + vma->size);
}
static int open_shmem(int pid, struct vma_area *vma)
{
VmaEntry *vi = vma->e;
struct shmem_info *si;
void *addr = MAP_FAILED;
int f = -1;
int flags, is_hugetlb, memfd_flag = 0;
si = shmem_find(vi->shmid);
pr_info("Search for %#016" PRIx64 " shmem 0x%" PRIx64 " %p/%d\n", vi->start, vi->shmid, si, si ? si->pid : -1);
if (!si) {
pr_err("Can't find my shmem %#016" PRIx64 "\n", vi->start);
return -1;
}
BUG_ON(si->pid == SYSVIPC_SHMEM_PID);
if (si->pid != pid)
return shmem_wait_and_open(si, vi);
if (si->fd != -1) {
f = dup(si->fd);
if (f < 0) {
pr_perror("Can't dup shmem fd");
return -1;
}
goto out;
}
is_hugetlb = vi->flags & MAP_HUGETLB;
flags = MAP_SHARED;
if (is_hugetlb) {
int size_flag = vi->flags & MAP_HUGETLB_SIZE_MASK;
flags |= MAP_HUGETLB | size_flag;
memfd_flag |= MFD_HUGETLB | size_flag;
}
if (kdat.has_memfd && (!is_hugetlb || kdat.has_memfd_hugetlb)) {
f = memfd_create("", memfd_flag);
if (f < 0) {
pr_perror("Unable to create memfd");
goto err;
}
if (ftruncate(f, si->size)) {
pr_perror("Unable to truncate memfd");
goto err;
}
flags |= MAP_FILE;
} else
flags |= MAP_ANONYMOUS;
/*
* The following hack solves problems:
* vi->pgoff may be not zero in a target process.
* This mapping may be mapped more then once.
* The restorer doesn't have snprintf.
* Here is a good place to restore content
*/
addr = mmap(NULL, si->size, PROT_WRITE | PROT_READ, flags, f, 0);
if (addr == MAP_FAILED) {
pr_perror("Can't mmap shmid=0x%" PRIx64 " size=%ld", vi->shmid, si->size);
goto err;
}
if (restore_shmem_content(addr, si) < 0) {
pr_err("Can't restore shmem content\n");
goto err;
}
if (f == -1) {
struct open_map_file_args args = {
.addr = (unsigned long)addr,
.size = si->size,
};
f = userns_call(open_map_file, UNS_FDOUT, &args, sizeof(args), -1);
if (f < 0)
goto err;
}
munmap(addr, si->size);
si->fd = f;
/* Send signal to slaves, that they can open fd for this shmem */
futex_inc_and_wake(&si->lock);
/*
* All other regions in this process will duplicate
* the file descriptor, so we don't wait them.
*/
futex_wait_until(&si->lock, si->count - si->self_count + 1);
out:
vi->fd = f;
return 0;
err:
if (addr != MAP_FAILED)
munmap(addr, si->size);
close_safe(&f);
return -1;
}
int add_shmem_area(pid_t pid, VmaEntry *vma, u64 *map)
{
struct shmem_info *si;
unsigned long size = vma->pgoff + (vma->end - vma->start);
if (vma_entry_is(vma, VMA_AREA_SYSVIPC))
pid = SYSVIPC_SHMEM_PID;
si = shmem_find(vma->shmid);
if (si) {
if (si->size < size) {
if (expand_shmem(si, size))
return -1;
}
update_shmem_pmaps(si, map, vma);
return 0;
}
si = xzalloc(sizeof(*si));
if (!si)
return -1;
si->pid = pid;
si->start = vma->start;
si->end = vma->end;
si->shmid = vma->shmid;
shmem_hash_add(si);
if (expand_shmem(si, size))
return -1;
update_shmem_pmaps(si, map, vma);
return 0;
}
static int dump_pages(struct page_pipe *pp, struct page_xfer *xfer)
{
struct page_pipe_buf *ppb;
list_for_each_entry(ppb, &pp->bufs, l)
if (vmsplice(ppb->p[1], ppb->iov, ppb->nr_segs, SPLICE_F_GIFT | SPLICE_F_NONBLOCK) !=
ppb->pages_in * PAGE_SIZE) {
pr_perror("Can't get shmem into page-pipe");
return -1;
}
return page_xfer_dump_pages(xfer, pp);
}
static int next_data_segment(int fd, unsigned long pfn, unsigned long *next_data_pfn, unsigned long *next_hole_pfn)
{
off_t off;
off = lseek(fd, pfn * PAGE_SIZE, SEEK_DATA);
if (off == (off_t)-1) {
if (errno == ENXIO) {
*next_data_pfn = ~0UL;
*next_hole_pfn = ~0UL;
return 0;
}
pr_perror("Unable to lseek(SEEK_DATA)");
return -1;
}
*next_data_pfn = off / PAGE_SIZE;
off = lseek(fd, off, SEEK_HOLE);
if (off == (off_t)-1) {
pr_perror("Unable to lseek(SEEK_HOLE)");
return -1;
}
*next_hole_pfn = off / PAGE_SIZE;
return 0;
}
static int do_dump_one_shmem(int fd, void *addr, struct shmem_info *si)
{
struct page_pipe *pp;
struct page_xfer xfer;
int err, ret = -1;
unsigned long pfn, nrpages, next_data_pnf = 0, next_hole_pfn = 0;
unsigned long pages[2] = {};
nrpages = (si->size + PAGE_SIZE - 1) / PAGE_SIZE;
pp = create_page_pipe((nrpages + 1) / 2, NULL, PP_CHUNK_MODE);
if (!pp)
goto err;
err = open_page_xfer(&xfer, CR_FD_SHMEM_PAGEMAP, si->shmid);
if (err)
goto err_pp;
xfer.offset = (unsigned long)addr;
for (pfn = 0; pfn < nrpages; pfn++) {
unsigned int pgstate = PST_DIRTY;
bool use_mc = true;
unsigned long pgaddr;
int st = -1;
if (fd >= 0 && pfn >= next_hole_pfn && next_data_segment(fd, pfn, &next_data_pnf, &next_hole_pfn))
goto err_xfer;
if (si->pstate_map && is_shmem_tracking_en()) {
pgstate = get_pstate(si->pstate_map, pfn);
use_mc = pgstate == PST_DONT_DUMP;
}
if (use_mc) {
if (pfn < next_data_pnf)
pgstate = PST_ZERO;
else
pgstate = PST_DIRTY;
}
pgaddr = (unsigned long)addr + pfn * PAGE_SIZE;
again:
if (pgstate == PST_ZERO)
ret = 0;
else if (xfer.parent && page_in_parent(pgstate == PST_DIRTY)) {
ret = page_pipe_add_hole(pp, pgaddr, PP_HOLE_PARENT);
st = 0;
} else {
ret = page_pipe_add_page(pp, pgaddr, 0);
st = 1;
}
if (ret == -EAGAIN) {
ret = dump_pages(pp, &xfer);
if (ret)
goto err_xfer;
page_pipe_reinit(pp);
goto again;
} else if (ret)
goto err_xfer;
if (st >= 0)
pages[st]++;
}
cnt_add(CNT_SHPAGES_SCANNED, nrpages);
cnt_add(CNT_SHPAGES_SKIPPED_PARENT, pages[0]);
cnt_add(CNT_SHPAGES_WRITTEN, pages[1]);
ret = dump_pages(pp, &xfer);
err_xfer:
xfer.close(&xfer);
err_pp:
destroy_page_pipe(pp);
err:
return ret;
}
static int dump_one_shmem(struct shmem_info *si)
{
int fd, ret = -1;
void *addr;
unsigned long cur, remaining;
pr_info("Dumping shared memory %ld\n", si->shmid);
fd = __open_proc(si->pid, EPERM, O_RDONLY, "map_files/%lx-%lx", si->start, si->end);
if (fd >= 0) {
addr = mmap(NULL, si->size, PROT_READ, MAP_SHARED, fd, 0);
if (addr == MAP_FAILED) {
pr_perror("Can't map shmem 0x%lx (0x%lx-0x%lx)", si->shmid, si->start, si->end);
goto errc;
}
} else {
if (errno != EPERM || !opts.unprivileged) {
goto err;
}
pr_debug("Could not access map_files/ link, falling back to /proc/$pid/mem\n");
fd = open_proc(si->pid, "mem");
if (fd < 0) {
goto err;
}
addr = mmap(NULL, si->size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (addr == MAP_FAILED) {
pr_perror("Can't map empty space for shmem 0x%lx (0x%lx-0x%lx)", si->shmid, si->start, si->end);
goto errc;
}
if (lseek(fd, si->start, SEEK_SET) < 0) {
pr_perror("Can't seek virtual memory");
goto errc;
}
cur = 0;
remaining = si->size;
do {
ret = read(fd, addr + cur, remaining);
if (ret <= 0) {
pr_perror("Can't read virtual memory");
goto errc;
}
remaining -= ret;
cur += ret;
} while (remaining > 0);
close(fd);
fd = -1;
}
ret = do_dump_one_shmem(fd, addr, si);
munmap(addr, si->size);
errc:
if (fd >= 0)
close(fd);
err:
return ret;
}
int dump_one_memfd_shmem(int fd, unsigned long shmid, unsigned long size)
{
int ret = -1;
void *addr;
struct shmem_info si;
if (size == 0)
return 0;
memset(&si, 0, sizeof(si));
si.shmid = shmid;
si.size = size;
addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
if (addr == MAP_FAILED) {
pr_perror("Can't mmap shmem 0x%lx", shmid);
goto err;
}
ret = do_dump_one_shmem(fd, addr, &si);
munmap(addr, size);
err:
return ret;
}
int dump_one_sysv_shmem(void *addr, unsigned long size, unsigned long shmid)
{
int fd, ret;
struct shmem_info *si, det;
si = shmem_find(shmid);
if (!si) {
pr_info("Detached shmem...\n");
det.pid = SYSVIPC_SHMEM_PID;
det.shmid = shmid;
det.size = round_up(size, PAGE_SIZE);
det.pstate_map = NULL;
si = &det;
}
fd = open_proc(PROC_SELF, "map_files/%lx-%lx", (unsigned long)addr, (unsigned long)addr + si->size);
if (fd < 0)
return -1;
ret = do_dump_one_shmem(fd, addr, si);
close(fd);
return ret;
}
int cr_dump_shmem(void)
{
int ret = 0, i;
struct shmem_info *si;
for_each_shmem(i, si)
{
if (si->pid == SYSVIPC_SHMEM_PID)
continue;
ret = dump_one_shmem(si);
if (ret)
goto out;
}
out:
return ret;
}
| 21,205 | 21.631804 | 115 |
c
|
criu
|
criu-master/criu/sigframe.c
|
#include <unistd.h>
#include <string.h>
#include "log.h"
#include "restore.h"
#include "images/core.pb-c.h"
#ifndef setup_sas
static inline void setup_sas(struct rt_sigframe *sigframe, ThreadSasEntry *sas)
{
if (sas) {
#define UC RT_SIGFRAME_UC(sigframe)
UC->uc_stack.ss_sp = (void *)decode_pointer((sas)->ss_sp);
UC->uc_stack.ss_flags = (int)(sas)->ss_flags;
UC->uc_stack.ss_size = (size_t)(sas)->ss_size;
#undef UC
}
}
#endif
int construct_sigframe(struct rt_sigframe *sigframe, struct rt_sigframe *rsigframe, k_rtsigset_t *blkset,
CoreEntry *core)
{
/*
* Copy basic register set in the first place: this will set
* rt_sigframe type: native/compat.
*/
if (restore_gpregs(sigframe, CORE_THREAD_ARCH_INFO(core)->gpregs))
return -1;
if (blkset)
rt_sigframe_copy_sigset(sigframe, blkset);
else
rt_sigframe_erase_sigset(sigframe);
if (restore_fpu(sigframe, core))
return -1;
if (RT_SIGFRAME_HAS_FPU(sigframe))
if (sigreturn_prep_fpu_frame(sigframe, rsigframe))
return -1;
setup_sas(sigframe, core->thread_core->sas);
return 0;
}
| 1,078 | 21.957447 | 105 |
c
|
criu
|
criu-master/criu/signalfd.c
|
#include <unistd.h>
#include <signal.h>
#include <sys/signalfd.h>
#include "common/compiler.h"
#include "signalfd.h"
#include "fdinfo.h"
#include "imgset.h"
#include "image.h"
#include "util.h"
#include "log.h"
#include "files.h"
#include "protobuf.h"
#include "images/signalfd.pb-c.h"
struct signalfd_info {
SignalfdEntry *sfe;
struct file_desc d;
};
int is_signalfd_link(char *link)
{
return is_anon_link_type(link, "[signalfd]");
}
static int dump_one_signalfd(int lfd, u32 id, const struct fd_parms *p)
{
SignalfdEntry sfd = SIGNALFD_ENTRY__INIT;
FileEntry fe = FILE_ENTRY__INIT;
if (parse_fdinfo(lfd, FD_TYPES__SIGNALFD, &sfd))
return -1;
sfd.id = id;
sfd.flags = p->flags;
sfd.fown = (FownEntry *)&p->fown;
fe.type = FD_TYPES__SIGNALFD;
fe.id = sfd.id;
fe.sgfd = &sfd;
return pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE);
}
const struct fdtype_ops signalfd_dump_ops = {
.type = FD_TYPES__SIGNALFD,
.dump = dump_one_signalfd,
};
static void sigset_fill(sigset_t *to, unsigned long long from)
{
int sig;
pr_info("\tCalculating sigmask for %llx\n", from);
sigemptyset(to);
for (sig = 1; sig < NSIG; sig++)
if (from & (1ULL << (sig - 1))) {
pr_debug("\t\tAdd %d signal to mask\n", sig);
sigaddset(to, sig);
}
}
static int signalfd_open(struct file_desc *d, int *new_fd)
{
struct signalfd_info *info;
int tmp;
sigset_t mask;
info = container_of(d, struct signalfd_info, d);
pr_info("Restoring signalfd %#x\n", info->sfe->id);
sigset_fill(&mask, info->sfe->sigmask);
tmp = signalfd(-1, &mask, 0);
if (tmp < 0) {
pr_perror("Can't create signalfd %#08x", info->sfe->id);
return -1;
}
if (rst_file_params(tmp, info->sfe->fown, info->sfe->flags)) {
pr_perror("Can't restore params on signalfd %#08x", info->sfe->id);
goto err_close;
}
*new_fd = tmp;
return 0;
err_close:
close(tmp);
return -1;
}
static struct file_desc_ops signalfd_desc_ops = {
.type = FD_TYPES__SIGNALFD,
.open = signalfd_open,
};
static int collect_one_sigfd(void *o, ProtobufCMessage *msg, struct cr_img *i)
{
struct signalfd_info *info = o;
info->sfe = pb_msg(msg, SignalfdEntry);
return file_desc_add(&info->d, info->sfe->id, &signalfd_desc_ops);
}
struct collect_image_info signalfd_cinfo = {
.fd_type = CR_FD_SIGNALFD,
.pb_type = PB_SIGNALFD,
.priv_size = sizeof(struct signalfd_info),
.collect = collect_one_sigfd,
};
| 2,397 | 20.410714 | 78 |
c
|
criu
|
criu-master/criu/sk-netlink.c
|
#include <unistd.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <libnl3/netlink/msg.h>
#include "imgset.h"
#include "files.h"
#include "sockets.h"
#include "util.h"
#include "protobuf.h"
#include "images/sk-netlink.pb-c.h"
#include "netlink_diag.h"
#include "libnetlink.h"
#include "namespaces.h"
#undef LOG_PREFIX
#define LOG_PREFIX "netlink: "
struct netlink_sk_desc {
struct socket_desc sd;
u32 portid;
u32 *groups;
u32 gsize;
u32 dst_portid;
u32 dst_group;
u8 state;
u8 protocol;
};
int netlink_receive_one(struct nlmsghdr *hdr, struct ns_id *ns, void *arg)
{
struct nlattr *tb[NETLINK_DIAG_MAX + 1];
struct netlink_diag_msg *m;
struct netlink_sk_desc *sd;
unsigned long *groups;
m = NLMSG_DATA(hdr);
pr_debug("Collect netlink sock 0x%x\n", m->ndiag_ino);
sd = xmalloc(sizeof(*sd));
if (!sd)
return -1;
sd->protocol = m->ndiag_protocol;
sd->portid = m->ndiag_portid;
sd->dst_portid = m->ndiag_dst_portid;
sd->dst_group = m->ndiag_dst_group;
sd->state = m->ndiag_state;
nlmsg_parse(hdr, sizeof(struct netlink_diag_msg), tb, NETLINK_DIAG_MAX, NULL);
if (tb[NETLINK_DIAG_GROUPS]) {
sd->gsize = nla_len(tb[NETLINK_DIAG_GROUPS]);
groups = nla_data(tb[NETLINK_DIAG_GROUPS]);
sd->groups = xmalloc(sd->gsize);
if (!sd->groups) {
xfree(sd);
return -1;
}
memcpy(sd->groups, groups, sd->gsize);
} else {
sd->groups = NULL;
sd->gsize = 0;
}
return sk_collect_one(m->ndiag_ino, PF_NETLINK, &sd->sd, ns);
}
static bool can_dump_netlink_sk(int lfd)
{
int ret;
ret = fd_has_data(lfd);
if (ret == 1)
pr_err("The socket has data to read\n");
return ret == 0;
}
static int dump_one_netlink_fd(int lfd, u32 id, const struct fd_parms *p)
{
struct netlink_sk_desc *sk;
FileEntry fe = FILE_ENTRY__INIT;
NetlinkSkEntry ne = NETLINK_SK_ENTRY__INIT;
SkOptsEntry skopts = SK_OPTS_ENTRY__INIT;
sk = (struct netlink_sk_desc *)lookup_socket(p->stat.st_ino, PF_NETLINK, 0);
if (IS_ERR(sk))
goto err;
ne.id = id;
ne.ino = p->stat.st_ino;
if (!can_dump_netlink_sk(lfd))
goto err;
if (sk) {
BUG_ON(sk->sd.already_dumped);
ne.ns_id = sk->sd.sk_ns->id;
ne.has_ns_id = true;
ne.protocol = sk->protocol;
ne.portid = sk->portid;
ne.groups = sk->groups;
ne.n_groups = sk->gsize / sizeof(ne.groups[0]);
/*
* On 64-bit sk->gsize is multiple to 8 bytes (sizeof(long)),
* so remove the last 4 bytes if they are empty.
*/
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
/*
* Big endian swap: Ugly hack for zdtm/static/sk-netlink
*
* For big endian systems:
*
* - sk->groups[0] are bits 32-64
* - sk->groups[1] are bits 0-32
*/
if (ne.n_groups == 2) {
uint32_t tmp = sk->groups[1];
sk->groups[1] = sk->groups[0];
sk->groups[0] = tmp;
}
#endif
if (ne.n_groups && sk->groups[ne.n_groups - 1] == 0)
ne.n_groups -= 1;
if (ne.n_groups > 1) {
pr_err("%d %x\n", sk->gsize, sk->groups[1]);
pr_err("The netlink socket 0x%x has more than 32 groups\n", ne.ino);
return -1;
}
if (sk->groups && !sk->portid) {
pr_err("The netlink socket 0x%x is bound to groups but not to portid\n", ne.ino);
return -1;
}
ne.state = sk->state;
ne.dst_portid = sk->dst_portid;
ne.dst_group = sk->dst_group;
} else { /* unconnected and unbound socket */
struct ns_id *nsid;
int val;
socklen_t aux = sizeof(val);
if (root_ns_mask & CLONE_NEWNET) {
nsid = get_socket_ns(lfd);
if (nsid == NULL)
return -1;
ne.ns_id = nsid->id;
ne.has_ns_id = true;
}
if (getsockopt(lfd, SOL_SOCKET, SO_PROTOCOL, &val, &aux) < 0) {
pr_perror("Unable to get protocol for netlink socket");
goto err;
}
ne.protocol = val;
}
ne.flags = p->flags;
ne.fown = (FownEntry *)&p->fown;
ne.opts = &skopts;
if (dump_socket_opts(lfd, &skopts))
goto err;
fe.type = FD_TYPES__NETLINKSK;
fe.id = ne.id;
fe.nlsk = ≠
if (pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE))
goto err;
return 0;
err:
return -1;
}
const struct fdtype_ops netlink_dump_ops = {
.type = FD_TYPES__NETLINKSK,
.dump = dump_one_netlink_fd,
};
struct netlink_sock_info {
NetlinkSkEntry *nse;
struct file_desc d;
};
static int open_netlink_sk(struct file_desc *d, int *new_fd)
{
struct netlink_sock_info *nsi;
NetlinkSkEntry *nse;
struct sockaddr_nl addr;
int sk = -1;
nsi = container_of(d, struct netlink_sock_info, d);
nse = nsi->nse;
pr_info("Opening netlink socket id %#x\n", nse->id);
if (set_netns(nse->ns_id))
return -1;
sk = socket(PF_NETLINK, SOCK_RAW, nse->protocol);
if (sk < 0) {
pr_perror("Can't create netlink sock");
return -1;
}
if (nse->portid) {
memset(&addr, 0, sizeof(addr));
addr.nl_family = AF_NETLINK;
if (nse->n_groups > 1) {
pr_err("Groups above 32 are not supported yet\n");
goto err;
}
if (nse->n_groups)
addr.nl_groups = nse->groups[0];
addr.nl_pid = nse->portid;
if (bind(sk, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
/*
* Reassign if original bind fails, because socket addresses are
* typically kernel assigned based on PID, and collisions are common
* and very few applications care what address they are bound to.
*/
addr.nl_pid = 0;
if (bind(sk, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
pr_perror("Can't bind netlink socket");
goto err;
}
pr_warn("Netlink socket id %#x reassigned new port\n", nse->id);
}
}
if (nse->state == NETLINK_CONNECTED) {
addr.nl_family = AF_NETLINK;
addr.nl_groups = 1 << (nse->dst_group - 1);
addr.nl_pid = nse->dst_portid;
if (connect(sk, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
pr_perror("Can't connect netlink socket");
goto err;
}
}
if (rst_file_params(sk, nse->fown, nse->flags))
goto err;
if (restore_socket_opts(sk, nse->opts))
goto err;
*new_fd = sk;
return 0;
err:
close(sk);
return -1;
}
static struct file_desc_ops netlink_sock_desc_ops = {
.type = FD_TYPES__NETLINKSK,
.open = open_netlink_sk,
};
static int collect_one_netlink_sk(void *o, ProtobufCMessage *base, struct cr_img *i)
{
struct netlink_sock_info *si = o;
si->nse = pb_msg(base, NetlinkSkEntry);
return file_desc_add(&si->d, si->nse->id, &netlink_sock_desc_ops);
}
struct collect_image_info netlink_sk_cinfo = {
.fd_type = CR_FD_NETLINK_SK,
.pb_type = PB_NETLINK_SK,
.priv_size = sizeof(struct netlink_sock_info),
.collect = collect_one_netlink_sk,
};
| 6,389 | 21.659574 | 84 |
c
|
criu
|
criu-master/criu/sk-packet.c
|
#include <linux/if_packet.h>
#include <sys/socket.h>
#include <net/if.h>
#include <sys/ioctl.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <libnl3/netlink/msg.h>
#include <unistd.h>
#include <string.h>
#include "imgset.h"
#include "files.h"
#include "sockets.h"
#include "libnetlink.h"
#include "sk-packet.h"
#include "packet_diag.h"
#include "vma.h"
#include <arpa/inet.h>
#include "protobuf.h"
#include "xmalloc.h"
#include "images/packet-sock.pb-c.h"
#include "images/fdinfo.pb-c.h"
#include "namespaces.h"
#undef LOG_PREFIX
#define LOG_PREFIX "packet: "
struct packet_sock_info {
PacketSockEntry *pse;
struct file_desc d;
};
struct packet_mreq_max {
int mr_ifindex;
unsigned short mr_type;
unsigned short mr_alen;
unsigned char mr_address[MAX_ADDR_LEN];
};
struct packet_sock_desc {
struct socket_desc sd;
unsigned int file_id;
unsigned int type;
unsigned short proto;
struct packet_diag_info nli;
int mreq_n;
struct packet_diag_mclist *mreqs;
unsigned int fanout;
struct packet_diag_ring *rx, *tx;
};
#define NO_FANOUT ((unsigned int)-1)
static int dump_mreqs(PacketSockEntry *psk, struct packet_sock_desc *sd)
{
int i;
if (!sd->mreq_n)
return 0;
pr_debug("\tdumping %d mreqs\n", sd->mreq_n);
psk->mclist = xmalloc(sd->mreq_n * sizeof(psk->mclist[0]));
if (!psk->mclist)
return -1;
for (i = 0; i < sd->mreq_n; i++) {
struct packet_diag_mclist *m = &sd->mreqs[i];
PacketMclist *im;
if (m->pdmc_count != 1) {
pr_err("Multiple MC membership not supported (but can be)\n");
goto err;
}
pr_debug("\tmr%d: idx %d type %d\n", i, m->pdmc_index, m->pdmc_type);
im = xmalloc(sizeof(*im));
if (!im)
goto err;
packet_mclist__init(im);
psk->mclist[i] = im;
psk->n_mclist++;
im->index = m->pdmc_index;
im->type = m->pdmc_type;
switch (m->pdmc_type) {
case PACKET_MR_MULTICAST:
case PACKET_MR_UNICAST:
im->addr.len = m->pdmc_alen;
im->addr.data = xmalloc(m->pdmc_alen);
if (!im->addr.data)
goto err;
memcpy(im->addr.data, m->pdmc_addr, m->pdmc_alen);
break;
case PACKET_MR_PROMISC:
case PACKET_MR_ALLMULTI:
break;
default:
pr_err("Unknown mc membership type %d\n", m->pdmc_type);
goto err;
}
}
return 0;
err:
return -1;
}
static PacketRing *dump_ring(struct packet_diag_ring *dr)
{
PacketRing *ring;
ring = xmalloc(sizeof(*ring));
if (!ring)
return NULL;
packet_ring__init(ring);
ring->block_size = dr->pdr_block_size;
ring->block_nr = dr->pdr_block_nr;
ring->frame_size = dr->pdr_frame_size;
ring->frame_nr = dr->pdr_frame_nr;
ring->retire_tmo = dr->pdr_retire_tmo;
ring->sizeof_priv = dr->pdr_sizeof_priv;
ring->features = dr->pdr_features;
return ring;
}
static int dump_rings(PacketSockEntry *psk, struct packet_sock_desc *sd)
{
if (sd->rx) {
psk->rx_ring = dump_ring(sd->rx);
if (!psk->rx_ring)
return -1;
}
if (sd->tx) {
psk->tx_ring = dump_ring(sd->tx);
if (!psk->tx_ring)
return -1;
}
return 0;
}
static int dump_one_packet_fd(int lfd, u32 id, const struct fd_parms *p)
{
FileEntry fe = FILE_ENTRY__INIT;
PacketSockEntry psk = PACKET_SOCK_ENTRY__INIT;
SkOptsEntry skopts = SK_OPTS_ENTRY__INIT;
struct packet_sock_desc *sd;
int i, ret;
sd = (struct packet_sock_desc *)lookup_socket(p->stat.st_ino, PF_PACKET, 0);
if (IS_ERR_OR_NULL(sd)) {
pr_err("Can't find packet socket %" PRIu64 "\n", p->stat.st_ino);
return -1;
}
pr_info("Dumping packet socket fd %d id %#x\n", lfd, id);
BUG_ON(sd->sd.already_dumped);
sd->sd.already_dumped = 1;
psk.id = sd->file_id = id;
psk.ns_id = sd->sd.sk_ns->id;
psk.has_ns_id = true;
psk.type = sd->type;
psk.flags = p->flags;
psk.fown = (FownEntry *)&p->fown;
psk.opts = &skopts;
if (dump_socket_opts(lfd, &skopts))
return -1;
psk.protocol = sd->proto;
psk.ifindex = sd->nli.pdi_index;
psk.version = sd->nli.pdi_version;
psk.reserve = sd->nli.pdi_reserve;
psk.timestamp = sd->nli.pdi_tstamp;
psk.copy_thresh = sd->nli.pdi_copy_thresh;
psk.aux_data = (sd->nli.pdi_flags & PDI_AUXDATA ? true : false);
psk.orig_dev = (sd->nli.pdi_flags & PDI_ORIGDEV ? true : false);
psk.vnet_hdr = (sd->nli.pdi_flags & PDI_VNETHDR ? true : false);
psk.loss = (sd->nli.pdi_flags & PDI_LOSS ? true : false);
ret = dump_mreqs(&psk, sd);
if (ret)
goto out;
if (sd->fanout != NO_FANOUT) {
psk.has_fanout = true;
psk.fanout = sd->fanout;
}
ret = dump_rings(&psk, sd);
if (ret)
goto out;
fe.type = FD_TYPES__PACKETSK;
fe.id = psk.id;
fe.psk = &psk;
ret = pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE);
out:
release_skopts(&skopts);
xfree(psk.rx_ring);
xfree(psk.tx_ring);
for (i = 0; i < psk.n_mclist; i++)
xfree(psk.mclist[i]->addr.data);
xfree(psk.mclist);
return ret;
}
const struct fdtype_ops packet_dump_ops = {
.type = FD_TYPES__PACKETSK,
.dump = dump_one_packet_fd,
};
int dump_socket_map(struct vma_area *vma)
{
struct packet_sock_desc *sd;
sd = (struct packet_sock_desc *)lookup_socket(vma->vm_socket_id, PF_PACKET, 0);
if (IS_ERR_OR_NULL(sd)) {
pr_err("Can't find packet socket %u to mmap\n", vma->vm_socket_id);
return -1;
}
if (!sd->file_id) {
pr_err("Mmap-ed socket %u not open\n", vma->vm_socket_id);
return -1;
}
pr_info("Dumping socket map %x -> %" PRIx64 "\n", sd->file_id, vma->e->start);
vma->e->shmid = sd->file_id;
return 0;
}
static int packet_save_mreqs(struct packet_sock_desc *sd, struct nlattr *mc)
{
sd->mreq_n = nla_len(mc) / sizeof(struct packet_diag_mclist);
pr_debug("\tGot %d mreqs\n", sd->mreq_n);
sd->mreqs = xmalloc(nla_len(mc));
if (!sd->mreqs)
return -1;
memcpy(sd->mreqs, nla_data(mc), nla_len(mc));
return 0;
}
int packet_receive_one(struct nlmsghdr *hdr, struct ns_id *ns, void *arg)
{
struct packet_diag_msg *m;
struct nlattr *tb[PACKET_DIAG_MAX + 1];
struct packet_sock_desc *sd;
m = NLMSG_DATA(hdr);
nlmsg_parse(hdr, sizeof(struct packet_diag_msg), tb, PACKET_DIAG_MAX, NULL);
pr_info("Collect packet sock %u %u\n", m->pdiag_ino, (unsigned int)m->pdiag_num);
if (!tb[PACKET_DIAG_INFO]) {
pr_err("No packet sock info in nlm\n");
return -1;
}
if (!tb[PACKET_DIAG_MCLIST]) {
pr_err("No packet sock mclist in nlm\n");
return -1;
}
sd = xmalloc(sizeof(*sd));
if (!sd)
return -1;
sd->file_id = 0;
sd->type = m->pdiag_type;
sd->proto = htons(m->pdiag_num);
sd->rx = NULL;
sd->tx = NULL;
memcpy(&sd->nli, nla_data(tb[PACKET_DIAG_INFO]), sizeof(sd->nli));
if (packet_save_mreqs(sd, tb[PACKET_DIAG_MCLIST]))
goto err;
if (tb[PACKET_DIAG_FANOUT])
sd->fanout = *(__u32 *)RTA_DATA(tb[PACKET_DIAG_FANOUT]);
else
sd->fanout = NO_FANOUT;
if (tb[PACKET_DIAG_RX_RING]) {
sd->rx = xmalloc(sizeof(*sd->rx));
if (sd->rx == NULL)
goto err;
memcpy(sd->rx, RTA_DATA(tb[PACKET_DIAG_RX_RING]), sizeof(*sd->rx));
}
if (tb[PACKET_DIAG_TX_RING]) {
sd->tx = xmalloc(sizeof(*sd->tx));
if (sd->tx == NULL)
goto err;
memcpy(sd->tx, RTA_DATA(tb[PACKET_DIAG_TX_RING]), sizeof(*sd->tx));
}
return sk_collect_one(m->pdiag_ino, PF_PACKET, &sd->sd, ns);
err:
xfree(sd->tx);
xfree(sd->rx);
xfree(sd);
return -1;
}
static int open_socket_map(int pid, struct vma_area *vm)
{
VmaEntry *vma = vm->e;
struct file_desc *fd;
struct fdinfo_list_entry *le;
pr_info("Getting packet socket fd for %d:%x\n", pid, (int)vma->shmid);
fd = find_file_desc_raw(FD_TYPES__PACKETSK, vma->shmid);
if (!fd) {
pr_err("No packet socket %x\n", (int)vma->shmid);
return -1;
}
list_for_each_entry(le, &fd->fd_info_head, desc_list)
if (le->pid == pid) {
int fd;
/*
* Restorer will close the mmap-ed fd
*/
fd = dup(le->fe->fd);
if (fd < 0) {
pr_perror("Can't dup packet sk");
return -1;
}
vma->fd = fd;
return 0;
}
pr_err("No open packet socket %x by %d\n", (int)vma->shmid, pid);
return -1;
}
int collect_socket_map(struct vma_area *vma)
{
vma->vm_open = open_socket_map;
return 0;
}
static int restore_mreqs(int sk, PacketSockEntry *pse)
{
int i;
for (i = 0; i < pse->n_mclist; i++) {
PacketMclist *ml;
struct packet_mreq_max mreq;
ml = pse->mclist[i];
pr_info("Restoring mreq type %d\n", ml->type);
if (ml->addr.len > sizeof(mreq.mr_address)) {
pr_err("To big mcaddr %zu\n", ml->addr.len);
return -1;
}
mreq.mr_ifindex = ml->index;
mreq.mr_type = ml->type;
mreq.mr_alen = ml->addr.len;
memcpy(mreq.mr_address, ml->addr.data, ml->addr.len);
if (restore_opt(sk, SOL_PACKET, PACKET_ADD_MEMBERSHIP, &mreq))
return -1;
}
return 0;
}
static int restore_ring(int sk, int type, PacketRing *ring)
{
struct tpacket_req3 req;
if (!ring)
return 0;
pr_debug("\tRestoring %d ring\n", type);
req.tp_block_size = ring->block_size;
req.tp_block_nr = ring->block_nr;
req.tp_frame_size = ring->frame_size;
req.tp_frame_nr = ring->frame_nr;
req.tp_retire_blk_tov = ring->retire_tmo;
req.tp_sizeof_priv = ring->sizeof_priv;
req.tp_feature_req_word = ring->features;
return restore_opt(sk, SOL_PACKET, type, &req);
}
static int restore_rings(int sk, PacketSockEntry *psk)
{
if (restore_ring(sk, PACKET_RX_RING, psk->rx_ring))
return -1;
if (restore_ring(sk, PACKET_TX_RING, psk->tx_ring))
return -1;
return 0;
}
static int open_packet_sk_spkt(PacketSockEntry *pse, int *new_fd)
{
struct sockaddr addr_spkt;
int sk;
sk = socket(PF_PACKET, pse->type, pse->protocol);
if (sk < 0) {
pr_perror("Can't create packet socket");
return -1;
}
memset(&addr_spkt, 0, sizeof(addr_spkt));
addr_spkt.sa_family = AF_PACKET;
// if the socket was bound to any device
if (pse->ifindex > 0) {
const size_t sa_data_size = sizeof(addr_spkt.sa_data);
struct ifreq req;
memset(&req, 0, sizeof(req));
req.ifr_ifindex = pse->ifindex;
if (ioctl(sk, SIOCGIFNAME, &req) < 0) {
pr_perror("Can't get interface name (ifindex %d)", pse->ifindex);
goto err;
}
memcpy(addr_spkt.sa_data, req.ifr_name, sa_data_size);
addr_spkt.sa_data[sa_data_size - 1] = 0;
if (bind(sk, &addr_spkt, sizeof(addr_spkt)) < 0) {
pr_perror("Can't bind packet socket to %s", req.ifr_name);
goto err;
}
}
if (rst_file_params(sk, pse->fown, pse->flags))
goto err;
if (restore_socket_opts(sk, pse->opts))
goto err;
*new_fd = sk;
return 0;
err:
close(sk);
return -1;
}
static int open_packet_sk(struct file_desc *d, int *new_fd)
{
struct packet_sock_info *psi;
PacketSockEntry *pse;
struct sockaddr_ll addr;
int sk, yes;
psi = container_of(d, struct packet_sock_info, d);
pse = psi->pse;
pr_info("Opening packet socket id %#x\n", pse->id);
if (set_netns(pse->ns_id))
return -1;
if (pse->type == SOCK_PACKET)
return open_packet_sk_spkt(pse, new_fd);
sk = socket(PF_PACKET, pse->type, pse->protocol);
if (sk < 0) {
pr_perror("Can't create packet sock");
goto err;
}
memset(&addr, 0, sizeof(addr));
addr.sll_family = AF_PACKET;
addr.sll_ifindex = pse->ifindex;
if (bind(sk, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
pr_perror("Can't bind packet socket");
goto err_cl;
}
if (restore_opt(sk, SOL_PACKET, PACKET_VERSION, &pse->version))
goto err_cl;
if (restore_opt(sk, SOL_PACKET, PACKET_RESERVE, &pse->reserve))
goto err_cl;
if (restore_opt(sk, SOL_PACKET, PACKET_TIMESTAMP, &pse->timestamp))
goto err_cl;
if (restore_opt(sk, SOL_PACKET, PACKET_COPY_THRESH, &pse->copy_thresh))
goto err_cl;
if (pse->aux_data) {
yes = 1;
if (restore_opt(sk, SOL_PACKET, PACKET_AUXDATA, &yes))
goto err_cl;
}
if (pse->orig_dev) {
yes = 1;
if (restore_opt(sk, SOL_PACKET, PACKET_ORIGDEV, &yes))
goto err_cl;
}
if (pse->vnet_hdr) {
yes = 1;
if (restore_opt(sk, SOL_PACKET, PACKET_VNET_HDR, &yes))
goto err_cl;
}
if (pse->loss) {
yes = 1;
if (restore_opt(sk, SOL_PACKET, PACKET_LOSS, &yes))
goto err_cl;
}
if (restore_mreqs(sk, pse))
goto err_cl;
if (restore_rings(sk, pse))
goto err_cl;
if (pse->has_fanout) {
pr_info("Restoring fanout %x\n", pse->fanout);
if (restore_opt(sk, SOL_PACKET, PACKET_FANOUT, &pse->fanout))
goto err_cl;
}
if (rst_file_params(sk, pse->fown, pse->flags))
goto err_cl;
if (restore_socket_opts(sk, pse->opts))
goto err_cl;
*new_fd = sk;
return 0;
err_cl:
close(sk);
err:
return -1;
}
static struct file_desc_ops packet_sock_desc_ops = {
.type = FD_TYPES__PACKETSK,
.open = open_packet_sk,
};
static int collect_one_packet_sk(void *o, ProtobufCMessage *base, struct cr_img *i)
{
struct packet_sock_info *si = o;
si->pse = pb_msg(base, PacketSockEntry);
return file_desc_add(&si->d, si->pse->id, &packet_sock_desc_ops);
}
struct collect_image_info packet_sk_cinfo = {
.fd_type = CR_FD_PACKETSK,
.pb_type = PB_PACKET_SOCK,
.priv_size = sizeof(struct packet_sock_info),
.collect = collect_one_packet_sk,
};
| 12,731 | 20.913941 | 83 |
c
|
criu
|
criu-master/criu/sk-tcp.c
|
#include <netinet/tcp.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <string.h>
#include <sched.h>
#include <netinet/in.h>
#include "../soccr/soccr.h"
#include "common/config.h"
#include "cr_options.h"
#include "util.h"
#include "common/list.h"
#include "log.h"
#include "files.h"
#include "sockets.h"
#include "sk-inet.h"
#include "netfilter.h"
#include "image.h"
#include "namespaces.h"
#include "xmalloc.h"
#include "kerndat.h"
#include "restorer.h"
#include "rst-malloc.h"
#include "protobuf.h"
#include "images/tcp-stream.pb-c.h"
#undef LOG_PREFIX
#define LOG_PREFIX "tcp: "
static LIST_HEAD(cpt_tcp_repair_sockets);
static LIST_HEAD(rst_tcp_repair_sockets);
static int lock_connection(struct inet_sk_desc *sk)
{
if (opts.network_lock_method == NETWORK_LOCK_IPTABLES)
return iptables_lock_connection(sk);
else if (opts.network_lock_method == NETWORK_LOCK_NFTABLES)
return nftables_lock_connection(sk);
return -1;
}
static int unlock_connection(struct inet_sk_desc *sk)
{
if (opts.network_lock_method == NETWORK_LOCK_IPTABLES)
return iptables_unlock_connection(sk);
else if (opts.network_lock_method == NETWORK_LOCK_NFTABLES)
/* All connections will be unlocked in network_unlock(void) */
return 0;
return -1;
}
static int tcp_repair_established(int fd, struct inet_sk_desc *sk)
{
int ret;
struct libsoccr_sk *socr;
pr_info("\tTurning repair on for socket %x\n", sk->sd.ino);
/*
* Keep the socket open in criu till the very end. In
* case we close this fd after one task fd dumping and
* fail we'll have to turn repair mode off
*/
sk->rfd = dup(fd);
if (sk->rfd < 0) {
pr_perror("Can't save socket fd for repair");
goto err1;
}
if (!(root_ns_mask & CLONE_NEWNET)) {
ret = lock_connection(sk);
if (ret < 0) {
pr_err("Failed to lock TCP connection %x\n", sk->sd.ino);
goto err2;
}
}
socr = libsoccr_pause(sk->rfd);
if (!socr)
goto err3;
sk->priv = socr;
list_add_tail(&sk->rlist, &cpt_tcp_repair_sockets);
return 0;
err3:
if (!(root_ns_mask & CLONE_NEWNET))
unlock_connection(sk);
err2:
close(sk->rfd);
err1:
return -1;
}
static void tcp_unlock_one(struct inet_sk_desc *sk)
{
int ret;
list_del(&sk->rlist);
if (!(root_ns_mask & CLONE_NEWNET)) {
ret = unlock_connection(sk);
if (ret < 0)
pr_err("Failed to unlock TCP connection %x\n", sk->sd.ino);
}
libsoccr_resume(sk->priv);
sk->priv = NULL;
/*
* tcp_repair_off modifies SO_REUSEADDR so
* don't forget to restore original value.
*/
restore_opt(sk->rfd, SOL_SOCKET, SO_REUSEADDR, &sk->cpt_reuseaddr);
close(sk->rfd);
}
void cpt_unlock_tcp_connections(void)
{
struct inet_sk_desc *sk, *n;
list_for_each_entry_safe(sk, n, &cpt_tcp_repair_sockets, rlist)
tcp_unlock_one(sk);
}
static int dump_tcp_conn_state(struct inet_sk_desc *sk)
{
struct libsoccr_sk *socr = sk->priv;
int ret, aux;
struct cr_img *img;
TcpStreamEntry tse = TCP_STREAM_ENTRY__INIT;
char *buf;
struct libsoccr_sk_data data;
ret = libsoccr_save(socr, &data, sizeof(data));
if (ret < 0) {
pr_err("libsoccr_save() failed with %d\n", ret);
goto err_r;
}
if (ret != sizeof(data)) {
pr_err("This libsocr is not supported (%d vs %d)\n", ret, (int)sizeof(data));
goto err_r;
}
sk->state = data.state;
tse.inq_len = data.inq_len;
tse.inq_seq = data.inq_seq;
tse.outq_len = data.outq_len;
tse.outq_seq = data.outq_seq;
tse.unsq_len = data.unsq_len;
tse.has_unsq_len = true;
tse.mss_clamp = data.mss_clamp;
tse.opt_mask = data.opt_mask;
if (tse.opt_mask & TCPI_OPT_WSCALE) {
tse.snd_wscale = data.snd_wscale;
tse.rcv_wscale = data.rcv_wscale;
tse.has_rcv_wscale = true;
}
if (tse.opt_mask & TCPI_OPT_TIMESTAMPS) {
tse.timestamp = data.timestamp;
tse.has_timestamp = true;
}
if (data.flags & SOCCR_FLAGS_WINDOW) {
tse.has_snd_wl1 = true;
tse.has_snd_wnd = true;
tse.has_max_window = true;
tse.has_rcv_wnd = true;
tse.has_rcv_wup = true;
tse.snd_wl1 = data.snd_wl1;
tse.snd_wnd = data.snd_wnd;
tse.max_window = data.max_window;
tse.rcv_wnd = data.rcv_wnd;
tse.rcv_wup = data.rcv_wup;
}
/*
* TCP socket options
*/
if (dump_opt(sk->rfd, SOL_TCP, TCP_NODELAY, &aux))
goto err_opt;
if (aux) {
tse.has_nodelay = true;
tse.nodelay = true;
}
if (dump_opt(sk->rfd, SOL_TCP, TCP_CORK, &aux))
goto err_opt;
if (aux) {
tse.has_cork = true;
tse.cork = true;
}
/*
* Push the stuff to image
*/
img = open_image(CR_FD_TCP_STREAM, O_DUMP, sk->sd.ino);
if (!img)
goto err_img;
ret = pb_write_one(img, &tse, PB_TCP_STREAM);
if (ret < 0)
goto err_iw;
buf = libsoccr_get_queue_bytes(socr, TCP_RECV_QUEUE, SOCCR_MEM_EXCL);
if (buf) {
ret = write_img_buf(img, buf, tse.inq_len);
if (ret < 0)
goto err_iw;
xfree(buf);
}
buf = libsoccr_get_queue_bytes(socr, TCP_SEND_QUEUE, SOCCR_MEM_EXCL);
if (buf) {
ret = write_img_buf(img, buf, tse.outq_len);
if (ret < 0)
goto err_iw;
xfree(buf);
}
pr_info("Done\n");
err_iw:
close_image(img);
err_img:
err_opt:
err_r:
return ret;
}
int dump_one_tcp(int fd, struct inet_sk_desc *sk, SkOptsEntry *soe)
{
soe->has_tcp_keepcnt = true;
if (dump_opt(fd, SOL_TCP, TCP_KEEPCNT, &soe->tcp_keepcnt)) {
pr_perror("Can't read TCP_KEEPCNT");
return -1;
}
soe->has_tcp_keepidle = true;
if (dump_opt(fd, SOL_TCP, TCP_KEEPIDLE, &soe->tcp_keepidle)) {
pr_perror("Can't read TCP_KEEPIDLE");
return -1;
}
soe->has_tcp_keepintvl = true;
if (dump_opt(fd, SOL_TCP, TCP_KEEPINTVL, &soe->tcp_keepintvl)) {
pr_perror("Can't read TCP_KEEPINTVL");
return -1;
}
if (sk->dst_port == 0)
return 0;
if (opts.tcp_close) {
return 0;
}
pr_info("Dumping TCP connection\n");
if (tcp_repair_established(fd, sk))
return -1;
if (dump_tcp_conn_state(sk))
return -1;
/*
* Socket is left in repair mode, so that at the end it's just
* closed and the connection is silently terminated
*/
return 0;
}
static int read_tcp_queue(struct libsoccr_sk *sk, struct libsoccr_sk_data *data, int queue, u32 len, struct cr_img *img)
{
char *buf;
buf = xmalloc(len);
if (!buf)
return -1;
if (read_img_buf(img, buf, len) < 0)
goto err;
return libsoccr_set_queue_bytes(sk, queue, buf, SOCCR_MEM_EXCL);
err:
xfree(buf);
return -1;
}
static int read_tcp_queues(struct libsoccr_sk *sk, struct libsoccr_sk_data *data, struct cr_img *img)
{
u32 len;
len = data->inq_len;
if (len && read_tcp_queue(sk, data, TCP_RECV_QUEUE, len, img))
return -1;
len = data->outq_len;
if (len && read_tcp_queue(sk, data, TCP_SEND_QUEUE, len, img))
return -1;
return 0;
}
static int restore_tcp_conn_state(int sk, struct libsoccr_sk *socr, struct inet_sk_info *ii)
{
int aux;
struct cr_img *img;
TcpStreamEntry *tse;
struct libsoccr_sk_data data = {};
union libsoccr_addr sa_src, sa_dst;
pr_info("Restoring TCP connection id %x ino %x\n", ii->ie->id, ii->ie->ino);
img = open_image(CR_FD_TCP_STREAM, O_RSTR, ii->ie->ino);
if (!img)
goto err;
if (pb_read_one(img, &tse, PB_TCP_STREAM) < 0)
goto err_c;
if (!tse->has_unsq_len) {
pr_err("No unsq len in the image\n");
goto err_c;
}
data.state = ii->ie->state;
data.inq_len = tse->inq_len;
data.inq_seq = tse->inq_seq;
data.outq_len = tse->outq_len;
data.outq_seq = tse->outq_seq;
data.unsq_len = tse->unsq_len;
data.mss_clamp = tse->mss_clamp;
data.opt_mask = tse->opt_mask;
if (tse->opt_mask & TCPI_OPT_WSCALE) {
if (!tse->has_rcv_wscale) {
pr_err("No rcv wscale in the image\n");
goto err_c;
}
data.snd_wscale = tse->snd_wscale;
data.rcv_wscale = tse->rcv_wscale;
}
if (tse->opt_mask & TCPI_OPT_TIMESTAMPS) {
if (!tse->has_timestamp) {
pr_err("No timestamp in the image\n");
goto err_c;
}
data.timestamp = tse->timestamp;
}
if (tse->has_snd_wnd) {
data.flags |= SOCCR_FLAGS_WINDOW;
data.snd_wl1 = tse->snd_wl1;
data.snd_wnd = tse->snd_wnd;
data.max_window = tse->max_window;
data.rcv_wnd = tse->rcv_wnd;
data.rcv_wup = tse->rcv_wup;
}
if (restore_sockaddr(&sa_src, ii->ie->family, ii->ie->src_port, ii->ie->src_addr, 0) < 0)
goto err_c;
if (restore_sockaddr(&sa_dst, ii->ie->family, ii->ie->dst_port, ii->ie->dst_addr, 0) < 0)
goto err_c;
libsoccr_set_addr(socr, 1, &sa_src, 0);
libsoccr_set_addr(socr, 0, &sa_dst, 0);
/*
* O_NONBLOCK has to be set before libsoccr_restore(),
* it is required to restore syn-sent sockets.
*/
if (restore_prepare_socket(sk))
goto err_c;
if (read_tcp_queues(socr, &data, img))
goto err_c;
if (libsoccr_restore(socr, &data, sizeof(data)))
goto err_c;
if (tse->has_nodelay && tse->nodelay) {
aux = 1;
if (restore_opt(sk, SOL_TCP, TCP_NODELAY, &aux))
goto err_c;
}
if (tse->has_cork && tse->cork) {
aux = 1;
if (restore_opt(sk, SOL_TCP, TCP_CORK, &aux))
goto err_c;
}
tcp_stream_entry__free_unpacked(tse, NULL);
close_image(img);
return 0;
err_c:
tcp_stream_entry__free_unpacked(tse, NULL);
close_image(img);
err:
return -1;
}
int prepare_tcp_socks(struct task_restore_args *ta)
{
struct inet_sk_info *ii;
ta->tcp_socks = (struct rst_tcp_sock *)rst_mem_align_cpos(RM_PRIVATE);
ta->tcp_socks_n = 0;
list_for_each_entry(ii, &rst_tcp_repair_sockets, rlist) {
struct rst_tcp_sock *rs;
/*
* rst_tcp_repair_sockets contains all sockets, so we need to
* select sockets which restored in a current process.
*/
if (ii->sk_fd == -1)
continue;
rs = rst_mem_alloc(sizeof(*rs), RM_PRIVATE);
if (!rs)
return -1;
rs->sk = ii->sk_fd;
rs->reuseaddr = ii->ie->opts->reuseaddr;
ta->tcp_socks_n++;
}
return 0;
}
int restore_one_tcp(int fd, struct inet_sk_info *ii)
{
struct libsoccr_sk *sk;
pr_info("Restoring TCP connection\n");
if (opts.tcp_close) {
if (shutdown(fd, SHUT_RDWR) && errno != ENOTCONN) {
pr_perror("Unable to shutdown the socket id %x ino %x", ii->ie->id, ii->ie->ino);
}
return 0;
}
sk = libsoccr_pause(fd);
if (!sk)
return -1;
if (restore_tcp_conn_state(fd, sk, ii)) {
libsoccr_release(sk);
return -1;
}
return 0;
}
void tcp_locked_conn_add(struct inet_sk_info *ii)
{
list_add_tail(&ii->rlist, &rst_tcp_repair_sockets);
ii->sk_fd = -1;
}
static int unlock_connection_info(struct inet_sk_info *si)
{
if (opts.network_lock_method == NETWORK_LOCK_IPTABLES)
return iptables_unlock_connection_info(si);
else if (opts.network_lock_method == NETWORK_LOCK_NFTABLES)
/* All connections will be unlocked in network_unlock(void) */
return 0;
return -1;
}
void rst_unlock_tcp_connections(void)
{
struct inet_sk_info *ii;
if (opts.tcp_close)
return;
/* Network will be unlocked by network-unlock scripts */
if (root_ns_mask & CLONE_NEWNET)
return;
list_for_each_entry(ii, &rst_tcp_repair_sockets, rlist)
unlock_connection_info(ii);
}
| 10,737 | 20.305556 | 120 |
c
|
criu
|
criu-master/criu/stats.c
|
#include <unistd.h>
#include <fcntl.h>
#include <sys/time.h>
#include "int.h"
#include "atomic.h"
#include "cr_options.h"
#include "rst-malloc.h"
#include "protobuf.h"
#include "stats.h"
#include "util.h"
#include "image.h"
#include "images/stats.pb-c.h"
struct timing {
struct timeval start;
struct timeval total;
};
struct dump_stats {
struct timing timings[DUMP_TIME_NR_STATS];
unsigned long counts[DUMP_CNT_NR_STATS];
};
struct restore_stats {
struct timing timings[RESTORE_TIME_NS_STATS];
atomic_t counts[RESTORE_CNT_NR_STATS];
};
struct dump_stats *dstats;
struct restore_stats *rstats;
void cnt_add(int c, unsigned long val)
{
if (dstats != NULL) {
BUG_ON(c >= DUMP_CNT_NR_STATS);
dstats->counts[c] += val;
} else if (rstats != NULL) {
BUG_ON(c >= RESTORE_CNT_NR_STATS);
atomic_add(val, &rstats->counts[c]);
} else
BUG();
}
void cnt_sub(int c, unsigned long val)
{
if (dstats != NULL) {
BUG_ON(c >= DUMP_CNT_NR_STATS);
dstats->counts[c] -= val;
} else if (rstats != NULL) {
BUG_ON(c >= RESTORE_CNT_NR_STATS);
atomic_add(-val, &rstats->counts[c]);
} else
BUG();
}
static void timeval_accumulate(const struct timeval *from, const struct timeval *to, struct timeval *res)
{
suseconds_t usec;
res->tv_sec += to->tv_sec - from->tv_sec;
usec = to->tv_usec;
if (usec < from->tv_usec) {
usec += USEC_PER_SEC;
res->tv_sec -= 1;
}
res->tv_usec += usec - from->tv_usec;
if (res->tv_usec > USEC_PER_SEC) {
res->tv_usec -= USEC_PER_SEC;
res->tv_sec += 1;
}
}
static struct timing *get_timing(int t)
{
if (dstats != NULL) {
BUG_ON(t >= DUMP_TIME_NR_STATS);
return &dstats->timings[t];
} else if (rstats != NULL) {
/*
* FIXME -- this does _NOT_ work when called
* from different tasks.
*/
BUG_ON(t >= RESTORE_TIME_NS_STATS);
return &rstats->timings[t];
}
BUG();
return NULL;
}
void timing_start(int t)
{
struct timing *tm;
tm = get_timing(t);
gettimeofday(&tm->start, NULL);
}
void timing_stop(int t)
{
struct timing *tm;
struct timeval now;
/* stats haven't been initialized. */
if (!dstats && !rstats)
return;
tm = get_timing(t);
gettimeofday(&now, NULL);
timeval_accumulate(&tm->start, &now, &tm->total);
}
static void encode_time(int t, u_int32_t *to)
{
struct timing *tm;
tm = get_timing(t);
*to = tm->total.tv_sec * USEC_PER_SEC + tm->total.tv_usec;
}
static void display_stats(int what, StatsEntry *stats)
{
if (what == DUMP_STATS) {
pr_msg("Displaying dump stats:\n");
pr_msg("Freezing time: %d us\n", stats->dump->freezing_time);
pr_msg("Frozen time: %d us\n", stats->dump->frozen_time);
pr_msg("Memory dump time: %d us\n", stats->dump->memdump_time);
pr_msg("Memory write time: %d us\n", stats->dump->memwrite_time);
if (stats->dump->has_irmap_resolve)
pr_msg("IRMAP resolve time: %d us\n", stats->dump->irmap_resolve);
pr_msg("Memory pages scanned: %" PRIu64 " (0x%" PRIx64 ")\n", stats->dump->pages_scanned,
stats->dump->pages_scanned);
pr_msg("Memory pages skipped from parent: %" PRIu64 " (0x%" PRIx64 ")\n",
stats->dump->pages_skipped_parent, stats->dump->pages_skipped_parent);
pr_msg("Memory pages written: %" PRIu64 " (0x%" PRIx64 ")\n", stats->dump->pages_written,
stats->dump->pages_written);
pr_msg("Lazy memory pages: %" PRIu64 " (0x%" PRIx64 ")\n", stats->dump->pages_lazy,
stats->dump->pages_lazy);
} else if (what == RESTORE_STATS) {
pr_msg("Displaying restore stats:\n");
pr_msg("Pages compared: %" PRIu64 " (0x%" PRIx64 ")\n", stats->restore->pages_compared,
stats->restore->pages_compared);
pr_msg("Pages skipped COW: %" PRIu64 " (0x%" PRIx64 ")\n", stats->restore->pages_skipped_cow,
stats->restore->pages_skipped_cow);
if (stats->restore->has_pages_restored)
pr_msg("Pages restored: %" PRIu64 " (0x%" PRIx64 ")\n", stats->restore->pages_restored,
stats->restore->pages_restored);
pr_msg("Restore time: %d us\n", stats->restore->restore_time);
pr_msg("Forking time: %d us\n", stats->restore->forking_time);
} else
return;
}
void write_stats(int what)
{
StatsEntry stats = STATS_ENTRY__INIT;
DumpStatsEntry ds_entry = DUMP_STATS_ENTRY__INIT;
RestoreStatsEntry rs_entry = RESTORE_STATS_ENTRY__INIT;
char *name;
struct cr_img *img;
pr_info("Writing stats\n");
if (what == DUMP_STATS) {
stats.dump = &ds_entry;
encode_time(TIME_FREEZING, &ds_entry.freezing_time);
encode_time(TIME_FROZEN, &ds_entry.frozen_time);
encode_time(TIME_MEMDUMP, &ds_entry.memdump_time);
encode_time(TIME_MEMWRITE, &ds_entry.memwrite_time);
ds_entry.has_irmap_resolve = true;
encode_time(TIME_IRMAP_RESOLVE, &ds_entry.irmap_resolve);
ds_entry.pages_scanned = dstats->counts[CNT_PAGES_SCANNED];
ds_entry.pages_skipped_parent = dstats->counts[CNT_PAGES_SKIPPED_PARENT];
ds_entry.pages_written = dstats->counts[CNT_PAGES_WRITTEN];
ds_entry.pages_lazy = dstats->counts[CNT_PAGES_LAZY];
ds_entry.page_pipes = dstats->counts[CNT_PAGE_PIPES];
ds_entry.has_page_pipes = true;
ds_entry.page_pipe_bufs = dstats->counts[CNT_PAGE_PIPE_BUFS];
ds_entry.has_page_pipe_bufs = true;
ds_entry.shpages_scanned = dstats->counts[CNT_SHPAGES_SCANNED];
ds_entry.has_shpages_scanned = true;
ds_entry.shpages_skipped_parent = dstats->counts[CNT_SHPAGES_SKIPPED_PARENT];
ds_entry.has_shpages_skipped_parent = true;
ds_entry.shpages_written = dstats->counts[CNT_SHPAGES_WRITTEN];
ds_entry.has_shpages_written = true;
name = "dump";
} else if (what == RESTORE_STATS) {
stats.restore = &rs_entry;
rs_entry.pages_compared = atomic_read(&rstats->counts[CNT_PAGES_COMPARED]);
rs_entry.pages_skipped_cow = atomic_read(&rstats->counts[CNT_PAGES_SKIPPED_COW]);
rs_entry.has_pages_restored = true;
rs_entry.pages_restored = atomic_read(&rstats->counts[CNT_PAGES_RESTORED]);
encode_time(TIME_FORK, &rs_entry.forking_time);
encode_time(TIME_RESTORE, &rs_entry.restore_time);
name = "restore";
} else
return;
img = open_image_at(AT_FDCWD, CR_FD_STATS, O_DUMP, name);
if (img) {
pb_write_one(img, &stats, PB_STATS);
close_image(img);
}
if (opts.display_stats)
display_stats(what, &stats);
}
int init_stats(int what)
{
if (what == DUMP_STATS) {
/*
* Dumping happens via one process most of the time,
* so we are typically OK with the plain malloc, but
* when dumping namespaces we fork() a separate process
* for it and when it goes and dumps shmem segments
* it will alter the CNT_SHPAGES_ counters, so we need
* to have them in shmem.
*/
dstats = shmalloc(sizeof(*dstats));
return dstats ? 0 : -1;
}
rstats = shmalloc(sizeof(struct restore_stats));
return rstats ? 0 : -1;
}
| 6,666 | 27.613734 | 105 |
c
|
criu
|
criu-master/criu/string.c
|
/*
* Adopted from linux kernel
*/
#include <sys/types.h>
#include <string.h>
#include "string.h"
/**
* strlcpy - Copy a %NUL terminated string into a sized buffer
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @size: size of destination buffer
*
* Compatible with *BSD: the result is always a valid
* NUL-terminated string that fits in the buffer (unless,
* of course, the buffer size is zero). It does not pad
* out the result like strncpy() does.
*/
size_t __strlcpy(char *dest, const char *src, size_t size)
{
size_t ret = strlen(src);
if (size) {
size_t len = (ret >= size) ? size - 1 : ret;
memcpy(dest, src, len);
dest[len] = '\0';
}
return ret;
}
/**
* strlcat - Append a length-limited, %NUL-terminated string to another
* @dest: The string to be appended to
* @src: The string to append to it
* @count: The size of the destination buffer.
*/
size_t __strlcat(char *dest, const char *src, size_t count)
{
size_t dsize = strlen(dest);
size_t len = strlen(src);
size_t res = dsize + len;
/*
* It's assumed that @dsize strictly
* less than count. Otherwise it's
* a bug. But we left it to a caller.
*/
dest += dsize;
count -= dsize;
if (len >= count)
len = count - 1;
memcpy(dest, src, len);
dest[len] = 0;
return res;
}
| 1,308 | 21.964912 | 71 |
c
|
criu
|
criu-master/criu/sysctl.c
|
#include <unistd.h>
#include <fcntl.h>
#include <ctype.h>
#include <string.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sched.h>
#include "namespaces.h"
#include "sysctl.h"
#include "util.h"
/* These are the namespaces we know how to restore in various ways.
*/
#define KNOWN_NS_MASK (CLONE_NEWUTS | CLONE_NEWNET | CLONE_NEWIPC)
struct sysctl_userns_req {
int op;
unsigned int ns;
size_t nr_req;
struct sysctl_req *reqs;
};
#define __SYSCTL_OP(__ret, __fd, __req, __type, __nr, __op) \
do { \
if (__op == CTL_READ) \
__ret = sysctl_read_##__type(__fd, __req, (__type *)(__req)->arg, __nr); \
else if (__op == CTL_WRITE) \
__ret = sysctl_write_##__type(__fd, __req, (__type *)(__req)->arg, __nr); \
else \
__ret = -1; \
} while (0)
#define GEN_SYSCTL_READ_FUNC(__type, __conv) \
static int sysctl_read_##__type(int fd, struct sysctl_req *req, __type *arg, int nr) \
{ \
char buf[1024] = { 0 }; \
int i, ret = -1; \
char *p = buf; \
\
ret = read(fd, buf, sizeof(buf)); \
if (ret < 0) { \
pr_perror("Can't read %s", req->name); \
ret = -1; \
goto err; \
} \
\
for (i = 0; i < nr && p < buf + sizeof(buf); p++, i++) \
((__type *)arg)[i] = __conv(p, &p, 10); \
\
if (i != nr) { \
pr_err("Not enough params for %s (%d != %d)\n", req->name, i, nr); \
goto err; \
} \
\
ret = 0; \
\
err: \
return ret; \
}
#define GEN_SYSCTL_WRITE_FUNC(__type, __fmt) \
static int sysctl_write_##__type(int fd, struct sysctl_req *req, __type *arg, int nr) \
{ \
char buf[1024]; \
int i, ret = -1; \
int off = 0; \
\
for (i = 0; i < nr && off < sizeof(buf) - 1; i++) { \
snprintf(&buf[off], sizeof(buf) - off, __fmt, arg[i]); \
off += strlen(&buf[off]); \
} \
\
if (i != nr) { \
pr_err("Not enough space for %s (%d != %d)\n", req->name, i, nr); \
goto err; \
} \
\
/* trailing spaces in format */ \
while (off > 0 && isspace(buf[off - 1])) \
off--; \
buf[off + 0] = '\n'; \
ret = write(fd, buf, off + 1); \
if (ret < 0) { \
pr_perror("Can't write %s", req->name); \
ret = -1; \
goto err; \
} \
\
ret = 0; \
err: \
return ret; \
}
GEN_SYSCTL_READ_FUNC(u32, strtoul);
GEN_SYSCTL_READ_FUNC(u64, strtoull);
GEN_SYSCTL_READ_FUNC(s32, strtol);
GEN_SYSCTL_WRITE_FUNC(u32, "%u ");
GEN_SYSCTL_WRITE_FUNC(u64, "%" PRIu64 " ");
GEN_SYSCTL_WRITE_FUNC(s32, "%d ");
static int sysctl_write_char(int fd, struct sysctl_req *req, char *arg, int nr)
{
pr_debug("%s nr %d\n", req->name, nr);
if (dprintf(fd, "%s\n", arg) < 0)
return -1;
return 0;
}
static int sysctl_read_char(int fd, struct sysctl_req *req, char *arg, int nr)
{
int ret = -1;
pr_debug("%s nr %d\n", req->name, nr);
ret = read(fd, arg, nr - 1);
if (ret < 0) {
if (errno != EIO || !(req->flags & CTL_FLAGS_READ_EIO_SKIP))
pr_perror("Can't read %s", req->name);
goto err;
}
arg[ret] = '\0';
ret = 0;
err:
return ret;
}
static int sysctl_userns_arg_size(int type)
{
switch (CTL_TYPE(type)) {
case __CTL_U32A:
return sizeof(u32) * CTL_LEN(type);
case CTL_U32:
return sizeof(u32);
case CTL_32:
return sizeof(s32);
case __CTL_U64A:
return sizeof(u64) * CTL_LEN(type);
case CTL_U64:
return sizeof(u64);
case __CTL_STR:
return sizeof(char) * CTL_LEN(type) + 1;
default:
pr_err("unknown arg type %d\n", type);
/* Ensure overflow to cause an error */
return MAX_UNSFD_MSG_SIZE;
}
}
static int do_sysctl_op(int fd, struct sysctl_req *req, int op)
{
int ret = -1, nr = 1;
switch (CTL_TYPE(req->type)) {
case __CTL_U32A:
nr = CTL_LEN(req->type);
/* fallthrough */
case CTL_U32:
__SYSCTL_OP(ret, fd, req, u32, nr, op);
break;
case CTL_32:
__SYSCTL_OP(ret, fd, req, s32, nr, op);
break;
case __CTL_U64A:
nr = CTL_LEN(req->type);
/* fallthrough */
case CTL_U64:
__SYSCTL_OP(ret, fd, req, u64, nr, op);
break;
case __CTL_STR:
nr = CTL_LEN(req->type);
__SYSCTL_OP(ret, fd, req, char, nr, op);
break;
}
return ret;
}
static int __userns_sysctl_op(void *arg, int proc_fd, pid_t pid)
{
int fd, ret = -1, dir, i, status, *fds = NULL;
struct sysctl_userns_req *userns_req = arg;
int op = userns_req->op;
struct sysctl_req *req, **reqs = NULL;
sigset_t blockmask, oldmask;
pid_t worker;
// fix up the pointer
req = userns_req->reqs = (struct sysctl_req *)&userns_req[1];
/* For files in the IPC/UTS namespaces, restoring is more complicated
* than for net. Unprivileged users cannot even open these files, so
* they must be opened by usernsd. However, the value in the kernel is
* changed for the IPC/UTS namespace that write()s to the open sysctl
* file (not who opened it). So, we must set the value from inside the
* usernsd caller's namespace. We:
*
* 1. unsd opens the sysctl files
* 2. forks a task
* 3. setns()es to the UTS/IPC namespace of the caller
* 4. write()s to the files and exits
*
* For the IPC namespace, since
* https://github.com/torvalds/linux/commit/5563cabdde, user with
* enough capability can open IPC sysctl files and write to it. Later
* commit https://github.com/torvalds/linux/commit/1f5c135ee5 and
* https://github.com/torvalds/linux/commit/0889f44e28 bind the IPC
* namespace at the open() time so the changed value does not depend
* on the IPC namespace at the write() time. Also, the permission check
* changes a little bit which makes the above approach unusable but we
* can simply use nonuserns version for restoring as IPC sysctl as the
* restored process currently has enough capability.
*/
dir = open("/proc/sys", O_RDONLY, O_DIRECTORY);
if (dir < 0) {
pr_perror("Can't open sysctl dir");
return -1;
}
fds = xmalloc(sizeof(int) * userns_req->nr_req);
if (!fds)
goto out;
reqs = xmalloc(sizeof(struct sysctl_req *) * userns_req->nr_req);
if (!reqs)
goto out;
memset(fds, -1, sizeof(int) * userns_req->nr_req);
for (i = 0; i < userns_req->nr_req; i++) {
int arg_len = sysctl_userns_arg_size(req->type);
int name_len = strlen((char *)&req[1]) + 1;
int total_len = sizeof(*req) + arg_len + name_len;
int flags;
/* fix up the pointers */
req->name = (char *)&req[1];
req->arg = req->name + name_len;
if (((char *)req) + total_len >= ((char *)userns_req) + MAX_UNSFD_MSG_SIZE) {
pr_err("bad sysctl req %s, too big: %d\n", req->name, total_len);
goto out;
}
if (op == CTL_READ)
flags = O_RDONLY;
else
flags = O_WRONLY;
fd = openat(dir, req->name, flags);
if (fd < 0) {
if (errno == ENOENT && (req->flags & CTL_FLAGS_OPTIONAL))
continue;
pr_perror("Can't open sysctl %s", req->name);
goto out;
}
/* save a pointer to the req, so we don't need to recompute its
* location
*/
reqs[i] = req;
fds[i] = fd;
req = (struct sysctl_req *)(((char *)req) + total_len);
}
/*
* Don't let the sigchld_handler() mess with us
* calling waitpid() on the exited worker. The
* same is done in cr_system().
*/
sigemptyset(&blockmask);
sigaddset(&blockmask, SIGCHLD);
sigprocmask(SIG_BLOCK, &blockmask, &oldmask);
worker = fork();
if (worker < 0)
goto out;
if (!worker) {
int nsfd;
const char *nsname = ns_to_string(userns_req->ns);
BUG_ON(!nsname);
nsfd = openat(proc_fd, nsname, O_RDONLY);
if (nsfd < 0) {
pr_perror("failed to open pid %d's ns %s", pid, nsname);
exit(1);
}
if (setns(nsfd, 0) < 0) {
pr_perror("failed to setns to %d's ns %s", pid, nsname);
exit(1);
}
close(nsfd);
for (i = 0; i < userns_req->nr_req; i++) {
if (do_sysctl_op(fds[i], reqs[i], op) < 0) {
if (op != CTL_READ || errno != EIO || !(req->flags & CTL_FLAGS_READ_EIO_SKIP))
exit(1);
} else {
/* mark sysctl in question exists */
req->flags |= CTL_FLAGS_HAS;
}
}
exit(0);
}
if (waitpid(worker, &status, 0) != worker) {
pr_perror("worker didn't die?");
kill(worker, SIGKILL);
goto out;
}
sigprocmask(SIG_SETMASK, &oldmask, NULL);
if (!WIFEXITED(status) || WEXITSTATUS(status)) {
pr_err("worker failed: %d\n", status);
goto out;
}
ret = 0;
out:
if (fds) {
for (i = 0; i < userns_req->nr_req; i++) {
if (fds[i] < 0)
break;
close_safe(&fds[i]);
}
xfree(fds);
}
if (reqs)
xfree(reqs);
close_safe(&dir);
return ret;
}
/* exit_code = 1 in case nonuserns failed but we want to fallback to userns approach */
static int __nonuserns_sysctl_op(struct sysctl_req **orig_req, size_t *orig_nr_req, int op)
{
int ret, exit_code = -1;
struct sysctl_req *req = *orig_req;
size_t nr_req = *orig_nr_req;
while (nr_req--) {
int fd;
if (op == CTL_READ)
fd = do_open_proc(PROC_GEN, O_RDONLY, "sys/%s", req->name);
else
fd = do_open_proc(PROC_GEN, O_RDWR, "sys/%s", req->name);
if (fd < 0) {
if (errno == ENOENT && (req->flags & CTL_FLAGS_OPTIONAL)) {
req++;
continue;
}
if (errno == EACCES && (req->flags & CTL_FLAGS_IPC_EACCES_SKIP)) {
/* The remaining requests are restored using userns approach */
*orig_req = req;
*orig_nr_req = nr_req + 1;
exit_code = 1;
goto out;
}
pr_perror("Can't open sysctl %s", req->name);
goto out;
}
ret = do_sysctl_op(fd, req, op);
if (ret) {
if (op != CTL_READ || errno != EIO || !(req->flags & CTL_FLAGS_READ_EIO_SKIP)) {
close(fd);
goto out;
}
} else {
/* mark sysctl in question exists */
req->flags |= CTL_FLAGS_HAS;
}
close(fd);
req++;
}
exit_code = 0;
out:
return exit_code;
}
int sysctl_op(struct sysctl_req *req, size_t nr_req, int op, unsigned int ns)
{
int i, fd, ret;
struct sysctl_userns_req *userns_req;
struct sysctl_req *cur;
if (nr_req == 0)
return 0;
if (ns & ~KNOWN_NS_MASK) {
pr_err("don't know how to restore some namespaces in %u\n", ns);
return -1;
}
/* The way sysctl files behave on open/write depends on the namespace
* they correspond to. If we don't want to interact with something in a
* namespace (e.g. kernel/cap_last_cap is global), we can do this from
* the current process. Similarly, if we're accessing net namespaces,
* we can just do the operation from our current process, since
* anything with CAP_NET_ADMIN can write to the net/ sysctls, and we
* still have that even when restoring in a user ns.
*
* For IPC/UTS, we restore them as described above.
*
* For read operations, we need to copy the values back to return.
* Fortunately, we only do read on dump (or global reads on restore),
* so we can do those in process as well.
*/
if (!ns || ns & CLONE_NEWNET || op == CTL_READ)
return __nonuserns_sysctl_op(&req, &nr_req, op);
/* Try to use nonuserns for restoring IPC sysctl and fallback to
* userns approach when the returned code is 1.
*/
if (ns & CLONE_NEWIPC && op == CTL_WRITE) {
ret = __nonuserns_sysctl_op(&req, &nr_req, op);
if (ret <= 0)
return ret;
}
/*
* In order to avoid lots of opening of /proc/sys for each struct sysctl_req,
* we encode each array of sysctl_reqs into one contiguous region of memory so
* it can be passed via userns_call if necessary. It looks like this:
*
* struct sysctl_userns_req struct sysctl_req name arg
* ---------------------------------------------------------------------------
* | op | nr_req | reqs | <fields> | name | arg | "the name" | "the arg" ...
* ---------------------------------------------------------------------------
* |____^ |______|__^ ^
* |_______________|
*/
userns_req = alloca(MAX_UNSFD_MSG_SIZE);
userns_req->op = op;
userns_req->nr_req = nr_req;
userns_req->ns = ns;
userns_req->reqs = (struct sysctl_req *)(&userns_req[1]);
cur = userns_req->reqs;
for (i = 0; i < nr_req; i++) {
int arg_len = sysctl_userns_arg_size(req[i].type);
int name_len = strlen(req[i].name) + 1;
int total_len = sizeof(*cur) + arg_len + name_len;
if (((char *)cur) + total_len >= ((char *)userns_req) + MAX_UNSFD_MSG_SIZE) {
pr_err("sysctl msg %s too big: %d\n", req[i].name, total_len);
return -1;
}
/* copy over the non-pointer fields */
cur->type = req[i].type;
cur->flags = req[i].flags;
cur->name = (char *)&cur[1];
strcpy(cur->name, req[i].name);
cur->arg = cur->name + name_len;
memcpy(cur->arg, req[i].arg, arg_len);
cur = (struct sysctl_req *)(((char *)cur) + total_len);
}
fd = open_proc(PROC_SELF, "ns");
if (fd < 0)
return -1;
ret = userns_call(__userns_sysctl_op, 0, userns_req, MAX_UNSFD_MSG_SIZE, fd);
close(fd);
return ret;
}
| 16,281 | 32.228571 | 99 |
c
|
criu
|
criu-master/criu/sysfs_parse.c
|
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <sys/types.h>
#include <dirent.h>
#include <sys/stat.h>
#include "cr_options.h"
#include "log.h"
#include "xmalloc.h"
#include "files.h"
#include "proc_parse.h"
#include "util.h"
#include "sysfs_parse.h"
#include "namespaces.h"
#include "mount.h"
/*
* Currently, there are two kernel problems dealing with AUFS
* filesystems. Until these problems are fixed in the kernel,
* we have AUFS support in CRIU to handle the following issues:
*
* 1) /proc/<pid>/mountinfo: The problem is that for AUFS the root field
* of the root entry is missing the pathname (it's only /). For example:
*
* 90 61 0:33 / / rw,relatime - aufs none rw,si=4476a910a24617e6
*
* To handle this issue, the user has to specify the root of the AUFS
* filesystem with the --root command line option.
*
* 2) /proc/<pid>/map_files: The symlinks are absolute pathnames of the
* corresponding *physical* files in the branch they exist. For example,
* for a Docker container using AUFS, a symlink would look like:
* 400000-489000 -> /var/lib/docker/aufs/diff/<LAYER_ID>/bin/<cmd>
*
* Therefore, when we use the link file descriptor vm_file_fd in
* dump_one_reg_file() to read the link, we get the file's physical
* absolute pathname which does not exist relative to the root of the
* mount namespace and even if we used its relative pathname, the dev:ino
* values would be different from the physical file's dev:ino causing the
* dump to fail.
*
* To handle this issue, we figure out the "correct" paths when parsing
* map_files and save it for later use. See fixup_aufs_vma_fd() for
* details.
*/
struct ns_id *aufs_nsid;
static char **aufs_branches;
/*
* Parse out and save the AUFS superblock info in the
* given buffer.
*/
static int parse_aufs_sbinfo(struct mount_info *mi, char *sbinfo, int len)
{
char *cp;
int n;
cp = strstr(mi->options, "si=");
if (!cp) {
pr_err("Cannot find sbinfo in option string %s\n", mi->options);
return -1;
}
/* all ok, copy */
if (len < 4) { /* 4 for "si_" */
pr_err("Buffer of %d bytes too small for sbinfo\n", len);
return -1;
}
strcpy(sbinfo, "si_");
n = 3;
sbinfo += n;
cp += n;
while (isxdigit(*cp) && n < len) {
*sbinfo++ = *cp++;
n++;
}
if (n >= len) {
pr_err("Sbinfo in options string %s too long\n", mi->options);
return -1;
}
*sbinfo = '\0';
return 0;
}
/*
* If the specified path is in a branch, replace it
* with pathname from root.
*/
static int fixup_aufs_path(char *path, int size)
{
char rpath[PATH_MAX];
int n;
int blen;
if (aufs_branches == NULL) {
pr_err("No aufs branches to search for %s\n", path);
return -1;
}
for (n = 0; aufs_branches[n] != NULL; n++) {
blen = strlen(aufs_branches[n]);
if (!strncmp(path, aufs_branches[n], blen))
break;
}
if (aufs_branches[n] == NULL)
return 0; /* not in a branch */
n = snprintf(rpath, PATH_MAX, "%s", &path[blen]);
if (n >= min(PATH_MAX, size)) {
pr_err("Not enough space to replace %s\n", path);
return -1;
}
pr_debug("Replacing %s with %s\n", path, rpath);
strcpy(path, rpath);
return n;
}
/*
* Kernel stores patchnames to AUFS branches in the br<n> files in
* the /sys/fs/aufs/si_<sbinfo> directory where <n> denotes a branch
* number and <sbinfo> is a hexadecimal number in %lx format. For
* example:
*
* $ cat /sys/fs/aufs/si_f598876b087ed883/br0
* /path/to/branch0/directory=rw
*
* This function sets up an array of pointers to branch pathnames.
*/
int parse_aufs_branches(struct mount_info *mi)
{
char path[AUFSBR_PATH_LEN];
char *cp;
int n;
int ret;
unsigned int br_num;
unsigned int br_max;
DIR *dp;
FILE *fp;
struct dirent *de;
pr_info("Collecting AUFS branch pathnames ...\n");
if (mi->nsid == 0) {
pr_err("No nsid to parse its aufs branches\n");
return -1;
}
if (mi->nsid == aufs_nsid) {
pr_debug("Using cached aufs branch paths for nsid %p\n", aufs_nsid);
return 0;
}
if (aufs_nsid)
free_aufs_branches();
strcpy(path, SYSFS_AUFS); /* /sys/fs/aufs/ */
if (parse_aufs_sbinfo(mi, &path[sizeof SYSFS_AUFS - 1], SBINFO_LEN) < 0)
return -1;
if ((dp = opendir(path)) == NULL) {
pr_perror("Cannot opendir %s", path);
return -1;
}
/*
* Find out how many branches we have.
*/
br_max = 0;
ret = 0;
while (1) {
errno = 0;
if ((de = readdir(dp)) == NULL) {
if (errno) {
pr_perror("Cannot readdir %s", path);
ret = -1;
}
break;
}
ret = sscanf(de->d_name, "br%d", &br_num);
if (ret == 1 && br_num > br_max)
br_max = br_num;
}
closedir(dp);
if (ret == -1)
return -1;
/*
* Default AUFS maximum is 127, so 1000 should be plenty.
* If you increase the maximum to more than 3 digits,
* make sure to change AUFSBR_PATH_LEN accordingly.
*/
if (br_max > 999) {
pr_err("Too many branches %d\n", br_max);
return -1;
}
/*
* Allocate an array of pointers to branch pathnames to be read.
* Branches are indexed from 0 and we need a NULL pointer at the end.
*/
aufs_branches = xzalloc((br_max + 2) * sizeof(char *));
if (!aufs_branches)
return -1;
/*
* Now read branch pathnames from the branch files.
*/
n = strlen(path);
for (br_num = 0; br_num <= br_max; br_num++) {
fp = NULL;
ret = snprintf(&path[n], sizeof path - n, "/br%d", br_num);
if (ret >= sizeof path - n) {
pr_err("Buffer overrun creating path for branch %d\n", br_num);
goto err;
}
if ((fp = fopen(path, "r")) == NULL) {
pr_perror("Cannot fopen %s", path);
goto err;
}
if (fscanf(fp, "%ms=", &aufs_branches[br_num]) != 1 || aufs_branches[br_num] == NULL) {
pr_perror("Parse error reading %s", path);
goto err;
}
/* chop off the trailing "=..." stuff */
if ((cp = strchr(aufs_branches[br_num], '=')) == NULL) {
pr_err("Bad format in branch pathname %s\n", aufs_branches[br_num]);
goto err;
}
*cp = '\0';
fclose(fp);
/*
* Log branch information for external utitilies that
* want to recreate the process's AUFS filesystem
* before calling criu restore.
*
* DO NOT CHANGE this format!
*/
pr_info("%s : %s\n", path, aufs_branches[br_num]);
}
aufs_nsid = mi->nsid;
return 0;
err:
if (fp)
fclose(fp);
free_aufs_branches();
return -1;
}
/*
* AUFS support to compensate for the kernel bug
* exposing branch pathnames in map_files and providing
* a wrong mnt_id value in /proc/<pid>/fdinfo/<fd>.
*
* If the link points inside a branch, save the
* relative pathname from the root of the mount
* namespace as well as the full pathname from
* globl root (/) for later use in dump_filemap()
* and parse_smaps().
*/
int fixup_aufs_vma_fd(struct vma_area *vma, int vm_file_fd)
{
char path[PATH_MAX];
int len;
path[0] = '.';
len = read_fd_link(vm_file_fd, &path[0], sizeof path - 1);
if (len < 0)
return -1;
len = fixup_aufs_path(&path[1], sizeof path - 1);
if (len <= 0)
return len;
vma->aufs_rpath = xmalloc(len + 2);
if (!vma->aufs_rpath)
return -1;
strcpy(vma->aufs_rpath, path);
if (opts.root) {
/* skip ./ in path */
vma->aufs_fpath = xsprintf("%s/%s", opts.root, &path[2]);
if (!vma->aufs_fpath)
return -1;
}
pr_debug("Saved AUFS paths %s and %s\n", vma->aufs_rpath, vma->aufs_fpath);
if (stat(vma->aufs_fpath, vma->vmst) < 0) {
pr_perror("Failed stat on map %" PRIx64 " (%s)", vma->e->start, vma->aufs_fpath);
return -1;
}
/* tell parse_smap() not to call get_fd_mntid() */
vma->mnt_id = -1;
return len;
}
void free_aufs_branches(void)
{
int n;
if (aufs_branches) {
for (n = 0; aufs_branches[n] != NULL; n++)
xfree(aufs_branches[n]);
xfree(aufs_branches);
aufs_branches = NULL;
}
aufs_nsid = NULL;
}
| 7,739 | 22.815385 | 89 |
c
|
criu
|
criu-master/criu/timens.c
|
#include <time.h>
#include <sched.h>
#include "types.h"
#include "proc_parse.h"
#include "namespaces.h"
#include "timens.h"
#include "cr_options.h"
#include "protobuf.h"
#include "images/timens.pb-c.h"
int dump_time_ns(int ns_id)
{
struct cr_img *img;
TimensEntry te = TIMENS_ENTRY__INIT;
Timespec b = TIMESPEC__INIT, m = TIMESPEC__INIT;
struct timespec ts;
int ret;
img = open_image(CR_FD_TIMENS, O_DUMP, ns_id);
if (!img)
return -1;
clock_gettime(CLOCK_MONOTONIC, &ts);
te.monotonic = &m;
te.monotonic->tv_sec = ts.tv_sec;
te.monotonic->tv_nsec = ts.tv_nsec;
clock_gettime(CLOCK_BOOTTIME, &ts);
te.boottime = &b;
te.boottime->tv_sec = ts.tv_sec;
te.boottime->tv_nsec = ts.tv_nsec;
ret = pb_write_one(img, &te, PB_TIMENS);
close_image(img);
return ret < 0 ? -1 : 0;
}
static void normalize_timespec(struct timespec *ts)
{
while (ts->tv_nsec >= NSEC_PER_SEC) {
ts->tv_nsec -= NSEC_PER_SEC;
++ts->tv_sec;
}
while (ts->tv_nsec < 0) {
ts->tv_nsec += NSEC_PER_SEC;
--ts->tv_sec;
}
}
int prepare_timens(int id)
{
int exit_code = -1;
int ret, fd = -1;
struct cr_img *img;
TimensEntry *te;
struct timespec ts;
struct timespec prev_moff = {}, prev_boff = {};
if (opts.unprivileged)
return 0;
img = open_image(CR_FD_TIMENS, O_RSTR, id);
if (!img)
return -1;
if (id == 0 && empty_image(img)) {
pr_warn("Clocks values have not been dumped\n");
close_image(img);
return 0;
}
ret = pb_read_one(img, &te, PB_TIMENS);
close_image(img);
if (ret < 0)
goto err;
if (unshare(CLONE_NEWTIME)) {
pr_perror("Unable to create a new time namespace");
return -1;
}
if (parse_timens_offsets(&prev_boff, &prev_moff))
goto err;
fd = open_proc_rw(PROC_SELF, "timens_offsets");
if (fd < 0)
goto err;
clock_gettime(CLOCK_MONOTONIC, &ts);
ts.tv_sec = ts.tv_sec - prev_moff.tv_sec;
ts.tv_nsec = ts.tv_nsec - prev_moff.tv_nsec;
ts.tv_sec = te->monotonic->tv_sec - ts.tv_sec;
ts.tv_nsec = te->monotonic->tv_nsec - ts.tv_nsec;
normalize_timespec(&ts);
pr_debug("timens: monotonic %ld %ld\n", ts.tv_sec, ts.tv_nsec);
if (dprintf(fd, "%d %ld %ld\n", CLOCK_MONOTONIC, ts.tv_sec, ts.tv_nsec) < 0) {
pr_perror("Unable to set a monotonic clock offset");
goto err;
}
clock_gettime(CLOCK_BOOTTIME, &ts);
ts.tv_sec = ts.tv_sec - prev_boff.tv_sec;
ts.tv_nsec = ts.tv_nsec - prev_boff.tv_nsec;
ts.tv_sec = te->boottime->tv_sec - ts.tv_sec;
ts.tv_nsec = te->boottime->tv_nsec - ts.tv_nsec;
normalize_timespec(&ts);
pr_debug("timens: boottime %ld %ld\n", ts.tv_sec, ts.tv_nsec);
if (dprintf(fd, "%d %ld %ld\n", CLOCK_BOOTTIME, ts.tv_sec, ts.tv_nsec) < 0) {
pr_perror("Unable to set a boottime clock offset");
goto err;
}
timens_entry__free_unpacked(te, NULL);
close_safe(&fd);
fd = open_proc(PROC_SELF, "ns/time_for_children");
if (fd < 0) {
pr_perror("Unable to open ns/time_for_children");
goto err;
}
if (switch_ns_by_fd(fd, &time_ns_desc, NULL))
goto err;
exit_code = 0;
err:
close_safe(&fd);
return exit_code;
}
struct ns_desc time_ns_desc = NS_DESC_ENTRY(CLONE_NEWTIME, "time");
struct ns_desc time_for_children_ns_desc = NS_DESC_ENTRY(CLONE_NEWTIME, "time_for_children");
| 3,166 | 22.116788 | 93 |
c
|
criu
|
criu-master/criu/timerfd.c
|
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/timerfd.h>
#include <sys/ioctl.h>
#include "protobuf.h"
#include "images/timerfd.pb-c.h"
#include "fdinfo.h"
#include "rst-malloc.h"
#include "cr_options.h"
#include "restorer.h"
#include "timerfd.h"
#include "pstree.h"
#include "files.h"
#include "imgset.h"
#include "util.h"
#include "log.h"
#include "common/bug.h"
#undef LOG_PREFIX
#define LOG_PREFIX "timerfd: "
struct timerfd_dump_arg {
u32 id;
const struct fd_parms *p;
};
struct timerfd_info {
TimerfdEntry *tfe;
struct file_desc d;
int t_fd;
struct list_head rlist;
};
static LIST_HEAD(rst_timerfds);
int check_timerfd(void)
{
int fd, ret = -1;
fd = timerfd_create(CLOCK_MONOTONIC, 0);
if (fd < 0) {
pr_perror("timerfd_create failed");
return -1;
} else {
ret = ioctl(fd, TFD_IOC_SET_TICKS, NULL);
if (ret < 0) {
if (errno != EFAULT)
pr_perror("No timerfd support for c/r");
else
ret = 0;
}
}
close(fd);
return ret;
}
int is_timerfd_link(char *link)
{
return is_anon_link_type(link, "[timerfd]");
}
static int dump_one_timerfd(int lfd, u32 id, const struct fd_parms *p)
{
TimerfdEntry tfe = TIMERFD_ENTRY__INIT;
FileEntry fe = FILE_ENTRY__INIT;
if (parse_fdinfo(lfd, FD_TYPES__TIMERFD, &tfe))
return -1;
tfe.id = id;
tfe.flags = p->flags;
tfe.fown = (FownEntry *)&p->fown;
pr_info("Dumping id %#x clockid %d it_value(%llu, %llu) it_interval(%llu, %llu)\n", tfe.id, tfe.clockid,
(unsigned long long)tfe.vsec, (unsigned long long)tfe.vnsec, (unsigned long long)tfe.isec,
(unsigned long long)tfe.insec);
fe.type = FD_TYPES__TIMERFD;
fe.id = tfe.id;
fe.tfd = &tfe;
return pb_write_one(img_from_set(glob_imgset, CR_FD_FILES), &fe, PB_FILE);
}
const struct fdtype_ops timerfd_dump_ops = {
.type = FD_TYPES__TIMERFD,
.dump = dump_one_timerfd,
};
int prepare_timerfds(struct task_restore_args *ta)
{
struct timerfd_info *ti;
struct restore_timerfd *t;
ta->timerfd = (struct restore_timerfd *)rst_mem_align_cpos(RM_PRIVATE);
ta->timerfd_n = 0;
list_for_each_entry(ti, &rst_timerfds, rlist) {
TimerfdEntry *tfe = ti->tfe;
t = rst_mem_alloc(sizeof(*t), RM_PRIVATE);
if (!t)
return -1;
t->id = tfe->id;
t->fd = ti->t_fd;
t->clockid = tfe->clockid;
t->ticks = (unsigned long)tfe->ticks;
t->settime_flags = tfe->settime_flags;
t->val.it_interval.tv_sec = (time_t)tfe->isec;
t->val.it_interval.tv_nsec = (long)tfe->insec;
t->val.it_value.tv_sec = (time_t)tfe->vsec;
t->val.it_value.tv_nsec = (long)tfe->vnsec;
ta->timerfd_n++;
}
return 0;
}
static int timerfd_open(struct file_desc *d, int *new_fd)
{
struct timerfd_info *info;
TimerfdEntry *tfe;
int tmp = -1;
info = container_of(d, struct timerfd_info, d);
tfe = info->tfe;
pr_info("Creating timerfd id %#x clockid %d settime_flags %x ticks %llu "
"it_value(%llu, %llu) it_interval(%llu, %llu)\n",
tfe->id, tfe->clockid, tfe->settime_flags, (unsigned long long)tfe->ticks,
(unsigned long long)tfe->vsec, (unsigned long long)tfe->vnsec, (unsigned long long)tfe->isec,
(unsigned long long)tfe->insec);
tmp = timerfd_create(tfe->clockid, 0);
if (tmp < 0) {
pr_perror("Can't create for %#x", tfe->id);
return -1;
}
if (rst_file_params(tmp, tfe->fown, tfe->flags)) {
pr_perror("Can't restore params for %#x", tfe->id);
goto err_close;
}
info->t_fd = file_master(d)->fe->fd;
list_add_tail(&info->rlist, &rst_timerfds);
*new_fd = tmp;
return 0;
err_close:
close_safe(&tmp);
return -1;
}
static struct file_desc_ops timerfd_desc_ops = {
.type = FD_TYPES__TIMERFD,
.open = timerfd_open,
};
static int collect_one_timerfd(void *o, ProtobufCMessage *msg, struct cr_img *i)
{
struct timerfd_info *info = o;
info->tfe = pb_msg(msg, TimerfdEntry);
if (verify_timerfd(info->tfe)) {
pr_err("Verification failed for %#x\n", info->tfe->id);
return -1;
}
info->t_fd = -1;
return file_desc_add(&info->d, info->tfe->id, &timerfd_desc_ops);
}
struct collect_image_info timerfd_cinfo = {
.fd_type = CR_FD_TIMERFD,
.pb_type = PB_TIMERFD,
.priv_size = sizeof(struct timerfd_info),
.collect = collect_one_timerfd,
};
| 4,139 | 21.139037 | 105 |
c
|
criu
|
criu-master/criu/tls.c
|
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <linux/limits.h>
#include <gnutls/gnutls.h>
#include "cr_options.h"
#include "xmalloc.h"
/* Compatibility with GnuTLS version < 3.5 */
#ifndef GNUTLS_E_CERTIFICATE_VERIFICATION_ERROR
#define GNUTLS_E_CERTIFICATE_VERIFICATION_ERROR GNUTLS_E_CERTIFICATE_ERROR
#endif
#undef LOG_PREFIX
#define LOG_PREFIX "tls: "
#define CRIU_PKI_DIR SYSCONFDIR "/pki"
#define CRIU_CACERT CRIU_PKI_DIR "/CA/cacert.pem"
#define CRIU_CACRL CRIU_PKI_DIR "/CA/cacrl.pem"
#define CRIU_CERT CRIU_PKI_DIR "/criu/cert.pem"
#define CRIU_KEY CRIU_PKI_DIR "/criu/private/key.pem"
#define SPLICE_BUF_SZ_MAX (PIPE_BUF * 100)
#define tls_perror(msg, ret) pr_err("%s: %s\n", msg, gnutls_strerror(ret))
static gnutls_session_t session;
static gnutls_certificate_credentials_t x509_cred;
static int tls_sk = -1;
static int tls_sk_flags = 0;
void tls_terminate_session(bool async)
{
int ret;
if (!opts.tls)
return;
if (session) {
do {
/*
* Initiate a connection shutdown but don't
* wait for peer to close connection.
*/
ret = gnutls_bye(session, async ? GNUTLS_SHUT_WR : GNUTLS_SHUT_RDWR);
} while (ret == GNUTLS_E_AGAIN || ret == GNUTLS_E_INTERRUPTED);
/* Free the session object */
gnutls_deinit(session);
}
tls_sk = -1;
/* Free the credentials object */
if (x509_cred)
gnutls_certificate_free_credentials(x509_cred);
}
ssize_t tls_send(const void *buf, size_t len, int flags)
{
ssize_t ret;
tls_sk_flags = flags;
ret = gnutls_record_send(session, buf, len);
tls_sk_flags = 0;
if (ret < 0) {
switch (ret) {
case GNUTLS_E_AGAIN:
errno = EAGAIN;
break;
case GNUTLS_E_INTERRUPTED:
errno = EINTR;
break;
case GNUTLS_E_UNEXPECTED_PACKET_LENGTH:
errno = ENOMSG;
break;
default:
tls_perror("Failed to send data", ret);
errno = EIO;
break;
}
}
return ret;
}
/*
* Read data from a file descriptor, then encrypt and send it with GnuTLS.
* This function is used for cases when we would otherwise use splice()
* to transfer data from PIPE to TCP socket.
*/
int tls_send_data_from_fd(int fd, unsigned long len)
{
ssize_t copied;
unsigned long buf_size = min(len, (unsigned long)SPLICE_BUF_SZ_MAX);
void *buf = xmalloc(buf_size);
if (!buf)
return -1;
while (len > 0) {
ssize_t ret, sent;
copied = read(fd, buf, min(len, buf_size));
if (copied <= 0) {
pr_perror("Can't read from pipe");
goto err;
}
for (sent = 0; sent < copied; sent += ret) {
ret = tls_send((buf + sent), (copied - sent), 0);
if (ret < 0) {
tls_perror("Failed sending data", ret);
goto err;
}
}
len -= copied;
}
err:
xfree(buf);
return (len > 0);
}
ssize_t tls_recv(void *buf, size_t len, int flags)
{
ssize_t ret;
tls_sk_flags = flags;
ret = gnutls_record_recv(session, buf, len);
tls_sk_flags = 0;
/* Check if there are any data to receive in the gnutls buffers. */
if (flags == MSG_DONTWAIT && (ret == GNUTLS_E_AGAIN || ret == GNUTLS_E_INTERRUPTED)) {
size_t pending = gnutls_record_check_pending(session);
if (pending > 0) {
pr_debug("Receiving pending data (%zu bytes)\n", pending);
ret = gnutls_record_recv(session, buf, len);
}
}
if (ret < 0) {
switch (ret) {
case GNUTLS_E_AGAIN:
errno = EAGAIN;
break;
case GNUTLS_E_INTERRUPTED:
errno = EINTR;
break;
default:
tls_perror("Failed receiving data", ret);
errno = EIO;
break;
}
ret = -1;
}
return ret;
}
/*
* Read and decrypt data with GnuTLS, then write it to a file descriptor.
* This function is used for cases when we would otherwise use splice()
* to transfer data from a TCP socket to a PIPE.
*/
int tls_recv_data_to_fd(int fd, unsigned long len)
{
gnutls_packet_t packet;
while (len > 0) {
ssize_t ret, w;
gnutls_datum_t pdata;
ret = gnutls_record_recv_packet(session, &packet);
if (ret == 0) {
pr_info("Connection closed by peer\n");
break;
} else if (ret < 0) {
tls_perror("Received corrupted data", ret);
break;
}
gnutls_packet_get(packet, &pdata, NULL);
for (w = 0; w < pdata.size; w += ret) {
ret = write(fd, (pdata.data + w), (pdata.size - w));
if (ret < 0) {
pr_perror("Failed writing to fd");
goto err;
}
}
len -= pdata.size;
}
err:
gnutls_packet_deinit(packet);
return (len > 0);
}
static inline void tls_handshake_verification_status_print(int ret, unsigned status)
{
gnutls_datum_t out;
int type = gnutls_certificate_type_get(session);
if (!gnutls_certificate_verification_status_print(status, type, &out, 0))
pr_err("%s\n", out.data);
gnutls_free(out.data);
}
static int tls_x509_verify_peer_cert(void)
{
int ret;
unsigned status;
const char *hostname = NULL;
if (!opts.tls_no_cn_verify)
hostname = opts.addr;
ret = gnutls_certificate_verify_peers3(session, hostname, &status);
if (ret != GNUTLS_E_SUCCESS) {
tls_perror("Unable to verify TLS peer", ret);
return -1;
}
if (status != 0) {
pr_err("Invalid certificate\n");
tls_handshake_verification_status_print(GNUTLS_E_CERTIFICATE_VERIFICATION_ERROR, status);
return -1;
}
return 0;
}
static int tls_handshake(void)
{
int ret = -1;
while (ret != GNUTLS_E_SUCCESS) {
/* Establish TLS session */
ret = gnutls_handshake(session);
if (gnutls_error_is_fatal(ret)) {
tls_perror("TLS handshake failed", ret);
return -1;
}
}
pr_info("TLS handshake completed\n");
return 0;
}
static int tls_x509_setup_creds(void)
{
int ret;
char *cacert = CRIU_CACERT;
char *cacrl = CRIU_CACRL;
char *cert = CRIU_CERT;
char *key = CRIU_KEY;
gnutls_x509_crt_fmt_t pem = GNUTLS_X509_FMT_PEM;
if (opts.tls_cacert)
cacert = opts.tls_cacert;
if (opts.tls_cacrl)
cacrl = opts.tls_cacrl;
if (opts.tls_cert)
cert = opts.tls_cert;
if (opts.tls_key)
key = opts.tls_key;
/* Load the trusted CA certificates */
ret = gnutls_certificate_allocate_credentials(&x509_cred);
if (ret != GNUTLS_E_SUCCESS) {
tls_perror("Failed to allocate x509 credentials", ret);
return -1;
}
if (!opts.tls_cacert) {
ret = gnutls_certificate_set_x509_system_trust(x509_cred);
if (ret < 0) {
tls_perror("Failed to load default trusted CAs", ret);
return -1;
}
}
ret = gnutls_certificate_set_x509_trust_file(x509_cred, cacert, pem);
if (ret == 0) {
pr_info("No trusted CA certificates added (%s)\n", cacert);
if (opts.tls_cacert)
return -1;
}
if (!access(cacrl, R_OK)) {
ret = gnutls_certificate_set_x509_crl_file(x509_cred, cacrl, pem);
if (ret < 0) {
tls_perror("Can't set certificate revocation list", ret);
return -1;
}
} else if (opts.tls_cacrl) {
pr_perror("Can't read certificate revocation list %s", cacrl);
return -1;
}
ret = gnutls_certificate_set_x509_key_file(x509_cred, cert, key, pem);
if (ret != GNUTLS_E_SUCCESS) {
tls_perror("Failed to set certificate/private key pair", ret);
return -1;
}
return 0;
}
/**
* A function used by gnutls to send data. It returns a positive
* number indicating the bytes sent, and -1 on error.
*/
static ssize_t _tls_push_cb(void *p, const void *data, size_t sz)
{
int fd = *(int *)(p);
ssize_t ret = send(fd, data, sz, tls_sk_flags);
if (ret < 0 && errno != EAGAIN) {
int _errno = errno;
pr_perror("Push callback send failed");
errno = _errno;
}
return ret;
}
/**
* A callback function used by gnutls to receive data.
* It returns 0 on connection termination, a positive number
* indicating the number of bytes received, and -1 on error.
*/
static ssize_t _tls_pull_cb(void *p, void *data, size_t sz)
{
int fd = *(int *)(p);
ssize_t ret = recv(fd, data, sz, tls_sk_flags);
if (ret < 0 && errno != EAGAIN) {
int _errno = errno;
pr_perror("Pull callback recv failed");
errno = _errno;
}
return ret;
}
static int tls_x509_setup_session(unsigned int flags)
{
int ret;
/* Create the session object */
ret = gnutls_init(&session, flags);
if (ret != GNUTLS_E_SUCCESS) {
tls_perror("Failed to initialize session", ret);
return -1;
}
/* Install the trusted certificates */
ret = gnutls_credentials_set(session, GNUTLS_CRD_CERTIFICATE, x509_cred);
if (ret != GNUTLS_E_SUCCESS) {
tls_perror("Failed to set session credentials", ret);
return -1;
}
/* Configure the cipher preferences */
ret = gnutls_set_default_priority(session);
if (ret != GNUTLS_E_SUCCESS) {
tls_perror("Failed to set priority", ret);
return -1;
}
/* Associate the socket with the session object */
gnutls_transport_set_ptr(session, &tls_sk);
/* Set a push function for gnutls to use to send data */
gnutls_transport_set_push_function(session, _tls_push_cb);
/* set a pull function for gnutls to use to receive data */
gnutls_transport_set_pull_function(session, _tls_pull_cb);
if (flags == GNUTLS_SERVER) {
/* Require client certificate */
gnutls_certificate_server_set_request(session, GNUTLS_CERT_REQUIRE);
/* Do not advertise trusted CAs to the client */
gnutls_certificate_send_x509_rdn_sequence(session, 1);
}
return 0;
}
int tls_x509_init(int sockfd, bool is_server)
{
if (!opts.tls)
return 0;
tls_sk = sockfd;
if (tls_x509_setup_creds())
goto err;
if (tls_x509_setup_session(is_server ? GNUTLS_SERVER : GNUTLS_CLIENT))
goto err;
if (tls_handshake())
goto err;
if (tls_x509_verify_peer_cert())
goto err;
return 0;
err:
tls_terminate_session(true);
return -1;
}
| 9,361 | 22.116049 | 91 |
c
|
criu
|
criu-master/criu/vdso-compat.c
|
#include <sys/syscall.h>
#include <signal.h>
#include <string.h>
#include <unistd.h>
#include "types.h"
#include "parasite-syscall.h"
#include "parasite.h"
#include "vdso.h"
static void exit_on(int ret, int err_fd, char *reason)
{
if (ret) {
syscall(__NR_write, err_fd, reason, strlen(reason));
syscall(__NR_exit, ret);
__builtin_unreachable();
}
}
/*
* Because of restrictions of ARCH_MAP_VDSO_* API, new vDSO blob
* can be mapped only if there is no vDSO blob present for a process.
* This is a helper process, it unmaps 64-bit vDSO and maps 32-bit vDSO.
* Then it copies vDSO blob to shared with CRIU mapping.
*
* The purpose is to fill compat vdso's symtable (vdso_compat_rt).
* It's an optimization to fill symtable only once at CRIU restore
* for all restored tasks.
*
* @native - 64-bit vDSO blob (for easy unmap)
* @pipe_fd - to get size of compat blob from /proc/.../maps
* @err_fd - to print error messages
* @vdso_buf, buf_size - shared with CRIU buffer
*
* WARN: This helper shouldn't call pr_err() or any syscall with
* Glibc's wrapper function - it may very likely blow up.
*/
void compat_vdso_helper(struct vdso_maps *native, int pipe_fd, int err_fd, void *vdso_buf, size_t buf_size)
{
void *vdso_addr;
long vdso_size;
long ret;
if (native->vdso_start != VDSO_BAD_ADDR) {
ret = syscall(__NR_munmap, native->vdso_start, native->sym.vdso_size);
exit_on(ret, err_fd, "Error: Failed to unmap native vdso\n");
}
if (native->vvar_start != VVAR_BAD_ADDR) {
ret = syscall(__NR_munmap, native->vvar_start, native->sym.vvar_size);
exit_on(ret, err_fd, "Error: Failed to unmap native vvar\n");
}
ret = syscall(__NR_arch_prctl, ARCH_MAP_VDSO_32, native->vdso_start);
if (ret < 0)
exit_on(ret, err_fd, "Error: ARCH_MAP_VDSO failed\n");
vdso_size = ret;
if (vdso_size > buf_size)
exit_on(-1, err_fd, "Error: Compatible vdso's size is bigger than reserved buf\n");
/* Stop so CRIU could parse smaps to find 32-bit vdso's size */
ret = syscall(__NR_kill, syscall(__NR_getpid), SIGSTOP);
exit_on(ret, err_fd, "Error: Can't stop myself with SIGSTOP (having a good time)\n");
ret = syscall(__NR_read, pipe_fd, &vdso_addr, sizeof(void *));
if (ret != sizeof(void *))
exit_on(-1, err_fd, "Error: Can't read size of mapped vdso from pipe\n");
memcpy(vdso_buf, vdso_addr, vdso_size);
syscall(__NR_exit, 0);
}
| 2,372 | 31.506849 | 107 |
c
|
criu
|
criu-master/criu/vdso.c
|
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <elf.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include "types.h"
#include "parasite-syscall.h"
#include "parasite.h"
#include "common/compiler.h"
#include "kerndat.h"
#include "vdso.h"
#include "util.h"
#include "criu-log.h"
#include "mem.h"
#include "vma.h"
#include <compel/plugins/std/syscall.h>
#ifdef LOG_PREFIX
#undef LOG_PREFIX
#endif
#define LOG_PREFIX "vdso: "
u64 vdso_pfn = VDSO_BAD_PFN;
struct vdso_maps vdso_maps = VDSO_MAPS_INIT;
struct vdso_maps vdso_maps_compat = VDSO_MAPS_INIT;
/*
* Starting with 3.16 the [vdso]/[vvar] marks are reported correctly
* even when they are remapped into a new place, but only since that
* particular version of the kernel!
* On previous kernels we need to check if vma is vdso by some means:
* - if pagemap is present, by pfn
* - by parsing ELF and filling vdso symtable otherwise
*/
enum vdso_check_t {
/* from slowest to fastest */
VDSO_CHECK_SYMS = 0,
VDSO_CHECK_PFN,
VDSO_NO_CHECK,
};
static enum vdso_check_t get_vdso_check_type(struct parasite_ctl *ctl)
{
/*
* ia32 C/R depends on mremap() for vdso patches (v4.8),
* so we can omit any check and be sure that "[vdso]"
* hint stays in /proc/../maps file and is correct.
*/
if (!compel_mode_native(ctl)) {
pr_info("Don't check vdso for compat task\n");
return VDSO_NO_CHECK;
}
if (kdat.vdso_hint_reliable) {
pr_info("vDSO hint is reliable - omit checking\n");
return VDSO_NO_CHECK;
}
if (kdat.pmap == PM_FULL) {
pr_info("Check vdso by pfn from pagemap\n");
return VDSO_CHECK_PFN;
}
pr_info("Pagemap is unavailable, check vdso by filling symtable\n");
return VDSO_CHECK_SYMS;
}
static int check_vdso_by_pfn(int pagemap_fd, struct vma_area *vma, bool *has_vdso_pfn)
{
u64 pfn = VDSO_BAD_PFN;
if (vaddr_to_pfn(pagemap_fd, vma->e->start, &pfn))
return -1;
if (!pfn) {
pr_err("Unexpected page frame number 0\n");
return -1;
}
if ((pfn == vdso_pfn && pfn != VDSO_BAD_PFN))
*has_vdso_pfn = true;
else
*has_vdso_pfn = false;
return 0;
}
static bool not_vvar_or_vdso(struct vma_area *vma)
{
if (!vma_area_is(vma, VMA_AREA_REGULAR))
return true;
if (vma_area_is(vma, VMA_FILE_SHARED))
return true;
if (vma_area_is(vma, VMA_FILE_PRIVATE))
return true;
if (vma->e->start > kdat.task_size)
return true;
if (vma->e->flags & MAP_GROWSDOWN)
return true;
BUILD_BUG_ON(!(VDSO_PROT & VVAR_PROT));
if ((vma->e->prot & VVAR_PROT) != VVAR_PROT)
return true;
return false;
}
/* Contains addresses from vdso mark */
struct vdso_quarter {
unsigned long orig_vdso;
unsigned long orig_vvar;
unsigned long rt_vdso;
unsigned long rt_vvar;
};
static void drop_rt_vdso(struct vm_area_list *vma_area_list, struct vdso_quarter *addr, struct vma_area *rt_vdso_marked)
{
struct vma_area *rt_vvar_marked = NULL;
struct vma_area *vma;
if (!rt_vdso_marked)
return;
/*
* There is marked vdso, it means such vdso is autogenerated
* and must be dropped from vma list.
*/
pr_debug("vdso: Found marked at %lx (orig vDSO at %lx VVAR at %lx)\n", (long)rt_vdso_marked->e->start,
addr->orig_vdso, addr->orig_vvar);
/*
* Don't forget to restore the proxy vdso/vvar status, since
* they're unknown to the kernel.
* Also BTW search for rt-vvar to remove it later.
*/
list_for_each_entry(vma, &vma_area_list->h, list) {
if (vma->e->start == addr->orig_vdso) {
vma->e->status |= VMA_AREA_REGULAR | VMA_AREA_VDSO;
pr_debug("vdso: Restore orig vDSO status at %lx\n", (long)vma->e->start);
} else if (vma->e->start == addr->orig_vvar) {
vma->e->status |= VMA_AREA_REGULAR | VMA_AREA_VVAR;
pr_debug("vdso: Restore orig VVAR status at %lx\n", (long)vma->e->start);
} else if (addr->rt_vvar != VVAR_BAD_ADDR && addr->rt_vvar == vma->e->start) {
BUG_ON(rt_vvar_marked);
if (not_vvar_or_vdso(vma)) {
pr_warn("Mark in rt-vdso points to vma, that doesn't look like vvar - skipping unmap\n");
continue;
}
rt_vvar_marked = vma;
}
}
pr_debug("vdso: Dropping marked vdso at %lx\n", (long)rt_vdso_marked->e->start);
list_del(&rt_vdso_marked->list);
xfree(rt_vdso_marked);
vma_area_list->nr--;
if (rt_vvar_marked) {
pr_debug("vdso: Dropping marked vvar at %lx\n", (long)rt_vvar_marked->e->start);
list_del(&rt_vvar_marked->list);
xfree(rt_vvar_marked);
vma_area_list->nr--;
}
}
/*
* I need to poke every potentially marked vma,
* otherwise if task never called for vdso functions
* page frame number won't be reported.
*
* Moreover, if page frame numbers are not accessible
* we have to scan the vma zone for vDSO elf structure
* which gonna be a slow way.
*/
static int check_if_vma_is_vdso(enum vdso_check_t vcheck, int pagemap_fd, struct parasite_ctl *ctl,
struct vma_area *vma, struct vma_area **rt_vdso_marked, struct vdso_quarter *addr)
{
struct parasite_vdso_vma_entry *args;
bool has_vdso_pfn = false;
args = compel_parasite_args(ctl, struct parasite_vdso_vma_entry);
if (not_vvar_or_vdso(vma))
return 0;
if ((vma->e->prot & VDSO_PROT) != VDSO_PROT)
return 0;
args->start = vma->e->start;
args->len = vma_area_len(vma);
args->try_fill_symtable = (vcheck == VDSO_CHECK_SYMS);
args->is_vdso = false;
if (compel_rpc_call_sync(PARASITE_CMD_CHECK_VDSO_MARK, ctl)) {
pr_err("Parasite failed to poke for mark\n");
return -1;
}
if (unlikely(args->is_marked)) {
if (*rt_vdso_marked) {
pr_err("Ow! Second vdso mark detected!\n");
return -1;
}
*rt_vdso_marked = vma;
addr->orig_vdso = args->orig_vdso_addr;
addr->orig_vvar = args->orig_vvar_addr;
addr->rt_vvar = args->rt_vvar_addr;
return 0;
}
if (vcheck == VDSO_NO_CHECK)
return 0;
if (vcheck == VDSO_CHECK_PFN) {
if (check_vdso_by_pfn(pagemap_fd, vma, &has_vdso_pfn) < 0) {
pr_err("Failed checking vdso by pfn\n");
return -1;
}
}
if (has_vdso_pfn || args->is_vdso) {
if (!vma_area_is(vma, VMA_AREA_VDSO)) {
pr_debug("Restore vDSO status by pfn/symtable at %lx\n", (long)vma->e->start);
vma->e->status |= VMA_AREA_VDSO;
}
} else {
if (unlikely(vma_area_is(vma, VMA_AREA_VDSO))) {
pr_debug("Drop mishinted vDSO status at %lx\n", (long)vma->e->start);
vma->e->status &= ~VMA_AREA_VDSO;
}
}
return 0;
}
/*
* The VMAs list might have proxy vdso/vvar areas left
* from previous dump/restore cycle so we need to detect
* them and eliminated from the VMAs list, they will be
* generated again on restore if needed.
*/
int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid, struct vm_area_list *vma_area_list)
{
struct vma_area *rt_vdso_marked = NULL;
struct vdso_quarter addr = {
.orig_vdso = VDSO_BAD_ADDR,
.orig_vvar = VVAR_BAD_ADDR,
.rt_vdso = VDSO_BAD_ADDR,
.rt_vvar = VVAR_BAD_ADDR,
};
enum vdso_check_t vcheck;
struct vma_area *vma;
int fd = -1;
/* vDSO is not provided by kernel */
if (kdat.vdso_sym.vdso_size == VDSO_BAD_SIZE)
return 0;
vcheck = get_vdso_check_type(ctl);
if (vcheck == VDSO_CHECK_PFN) {
BUG_ON(vdso_pfn == VDSO_BAD_PFN);
fd = open_proc(pid, "pagemap");
if (fd < 0)
return -1;
}
list_for_each_entry(vma, &vma_area_list->h, list) {
/*
* Defer handling marked vdso until we walked over
* all vmas and restore potentially remapped vDSO
* area status.
*/
if (check_if_vma_is_vdso(vcheck, fd, ctl, vma, &rt_vdso_marked, &addr)) {
close_safe(&fd);
return -1;
}
}
drop_rt_vdso(vma_area_list, &addr, rt_vdso_marked);
close_safe(&fd);
return 0;
}
static int vdso_parse_maps(pid_t pid, struct vdso_maps *s)
{
int exit_code = -1;
char *buf;
struct bfd f;
*s = (struct vdso_maps)VDSO_MAPS_INIT;
f.fd = open_proc(pid, "maps");
if (f.fd < 0)
return -1;
if (bfdopenr(&f))
goto err;
while (1) {
unsigned long start, end;
char *has_vdso, *has_vvar;
buf = breadline(&f);
if (buf == NULL)
break;
if (IS_ERR(buf))
goto err;
has_vdso = strstr(buf, "[vdso]");
if (!has_vdso)
has_vvar = strstr(buf, "[vvar]");
else
has_vvar = NULL;
if (!has_vdso && !has_vvar)
continue;
if (sscanf(buf, "%lx-%lx", &start, &end) != 2) {
pr_err("Can't find vDSO/VVAR bounds\n");
goto err;
}
if (has_vdso) {
if (s->vdso_start != VDSO_BAD_ADDR) {
pr_err("Got second vDSO entry\n");
goto err;
}
s->vdso_start = start;
s->sym.vdso_size = end - start;
} else {
if (s->vvar_start != VVAR_BAD_ADDR) {
pr_err("Got second VVAR entry\n");
goto err;
}
s->vvar_start = start;
s->sym.vvar_size = end - start;
}
}
if (s->vdso_start != VDSO_BAD_ADDR && s->vvar_start != VVAR_BAD_ADDR)
s->sym.vdso_before_vvar = (s->vdso_start < s->vvar_start);
exit_code = 0;
err:
bclose(&f);
return exit_code;
}
static int validate_vdso_addr(struct vdso_maps *s)
{
unsigned long vdso_end = s->vdso_start + s->sym.vdso_size;
unsigned long vvar_end = s->vvar_start + s->sym.vvar_size;
/*
* Validate its structure -- for new vDSO format the
* structure must be like
*
* 7fff1f5fd000-7fff1f5fe000 r-xp 00000000 00:00 0 [vdso]
* 7fff1f5fe000-7fff1f600000 r--p 00000000 00:00 0 [vvar]
*
* The areas may be in reverse order.
*
* 7fffc3502000-7fffc3504000 r--p 00000000 00:00 0 [vvar]
* 7fffc3504000-7fffc3506000 r-xp 00000000 00:00 0 [vdso]
*
*/
if (s->vdso_start != VDSO_BAD_ADDR) {
if (s->vvar_start != VVAR_BAD_ADDR) {
if (vdso_end != s->vvar_start && vvar_end != s->vdso_start) {
pr_err("Unexpected rt vDSO area bounds\n");
return -1;
}
}
} else {
pr_err("Can't find rt vDSO\n");
return -1;
}
return 0;
}
static int vdso_fill_self_symtable(struct vdso_maps *s)
{
if (s->vdso_start == VDSO_BAD_ADDR || s->sym.vdso_size == VDSO_BAD_SIZE)
return -1;
if (vdso_fill_symtable(s->vdso_start, s->sym.vdso_size, &s->sym))
return -1;
if (validate_vdso_addr(s))
return -1;
pr_debug("rt [vdso] %lx-%lx [vvar] %lx-%lx\n", s->vdso_start, s->vdso_start + s->sym.vdso_size, s->vvar_start,
s->vvar_start + s->sym.vvar_size);
return 0;
}
#ifdef CONFIG_COMPAT
static int vdso_mmap_compat(struct vdso_maps *native, struct vdso_maps *compat, void *vdso_buf, size_t buf_size)
{
pid_t pid;
int status, ret = -1;
int fds[2];
if (pipe(fds)) {
pr_perror("Failed to open pipe");
return -1;
}
pid = fork();
if (pid == 0) {
if (close(fds[1])) {
pr_perror("Failed to close pipe");
syscall(__NR_exit, 1);
}
compat_vdso_helper(native, fds[0], log_get_fd(), vdso_buf, buf_size);
BUG();
}
if (close(fds[0])) {
pr_perror("Failed to close pipe");
goto out_kill;
}
waitpid(pid, &status, WUNTRACED);
if (WIFEXITED(status)) {
pr_err("Compat vdso helper exited with %d\n", WEXITSTATUS(status));
goto out_kill;
}
if (!WIFSTOPPED(status)) {
pr_err("Compat vdso helper isn't stopped\n");
goto out_kill;
}
if (vdso_parse_maps(pid, compat))
goto out_kill;
if (validate_vdso_addr(compat))
goto out_kill;
if (kill(pid, SIGCONT)) {
pr_perror("Failed to kill(SIGCONT) for compat vdso helper");
goto out_kill;
}
if (write(fds[1], &compat->vdso_start, sizeof(void *)) != sizeof(compat->vdso_start)) {
pr_perror("Failed write to pipe");
goto out_kill;
}
waitpid(pid, &status, WUNTRACED);
if (WIFEXITED(status)) {
ret = WEXITSTATUS(status);
if (ret)
pr_err("Helper for mmaping compat vdso failed with %d\n", ret);
goto out_close;
}
pr_err("Compat vDSO helper didn't exit, status: %d\n", status);
out_kill:
kill(pid, SIGKILL);
out_close:
if (close(fds[1]))
pr_perror("Failed to close pipe");
return ret;
}
#define COMPAT_VDSO_BUF_SZ (PAGE_SIZE * 4)
static int vdso_fill_compat_symtable(struct vdso_maps *native, struct vdso_maps *compat)
{
void *vdso_mmap;
int ret = -1;
if (!kdat.compat_cr)
return 0;
vdso_mmap = mmap(NULL, COMPAT_VDSO_BUF_SZ, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (vdso_mmap == MAP_FAILED) {
pr_perror("Failed to mmap buf for compat vdso");
return -1;
}
if (vdso_mmap_compat(native, compat, vdso_mmap, COMPAT_VDSO_BUF_SZ)) {
pr_err("Failed to mmap compatible vdso with helper process\n");
goto out_unmap;
}
if (vdso_fill_symtable_compat((uintptr_t)vdso_mmap, compat->sym.vdso_size, &compat->sym)) {
pr_err("Failed to parse mmapped compatible vdso blob\n");
goto out_unmap;
}
pr_debug("compat [vdso] %lx-%lx [vvar] %lx-%lx\n", compat->vdso_start,
compat->vdso_start + compat->sym.vdso_size, compat->vvar_start,
compat->vvar_start + compat->sym.vvar_size);
ret = 0;
out_unmap:
if (munmap(vdso_mmap, COMPAT_VDSO_BUF_SZ))
pr_perror("Failed to unmap buf for compat vdso");
return ret;
}
#endif /* CONFIG_COMPAT */
/*
* Check vdso/vvar sized read from maps to kdat values.
* We do not read /proc/self/maps for compatible vdso as it's
* not parked as run-time vdso in restorer, but mapped with
* arch_prlctl(MAP_VDSO_32) API.
* By that reason we verify only native sizes.
*/
static int is_kdat_vdso_sym_valid(void)
{
if (vdso_maps.sym.vdso_size != kdat.vdso_sym.vdso_size)
return false;
if (vdso_maps.sym.vvar_size != kdat.vdso_sym.vvar_size)
return false;
return true;
}
int vdso_init_dump(void)
{
if (vdso_parse_maps(PROC_SELF, &vdso_maps)) {
pr_err("Failed reading self/maps for filling vdso/vvar bounds\n");
return -1;
}
if (!is_kdat_vdso_sym_valid()) {
pr_err("Kdat sizes of vdso/vvar differ to maps file \n");
return -1;
}
if (kdat.vdso_sym.vdso_size == VDSO_BAD_SIZE) {
pr_debug("Kdat has empty vdso symtable - probably CONFIG_VDSO is not set\n");
return 0;
}
if (kdat.pmap != PM_FULL)
pr_info("VDSO detection turned off\n");
else if (vaddr_to_pfn(-1, vdso_maps.vdso_start, &vdso_pfn))
return -1;
return 0;
}
int vdso_init_restore(void)
{
if (kdat.vdso_sym.vdso_size == VDSO_BAD_SIZE) {
pr_debug("Kdat has empty vdso symtable - probably CONFIG_VDSO is not set\n");
return 0;
}
/* Already filled vdso_maps during kdat test */
if (vdso_maps.vdso_start != VDSO_BAD_ADDR)
return 0;
/*
* Parsing self-maps here only to find vvar/vdso vmas in
* criu's address space, for further remapping to restorer's
* parking zone. Don't need to do this if map-vdso API
* is present.
*/
if (!kdat.can_map_vdso) {
if (vdso_parse_maps(PROC_SELF, &vdso_maps)) {
pr_err("Failed reading self/maps for filling vdso/vvar bounds\n");
return -1;
}
if (!is_kdat_vdso_sym_valid()) {
pr_err("Kdat sizes of vdso/vvar differ to maps file \n");
return -1;
}
}
vdso_maps.sym = kdat.vdso_sym;
#ifdef CONFIG_COMPAT
vdso_maps_compat.sym = kdat.vdso_sym_compat;
vdso_maps_compat.compatible = true;
#endif
return 0;
}
int kerndat_vdso_fill_symtable(void)
{
if (vdso_parse_maps(PROC_SELF, &vdso_maps)) {
pr_err("Failed reading self/maps for filling vdso/vvar bounds\n");
return -1;
}
if (!vdso_is_present(&vdso_maps)) {
pr_debug("Kernel doesn't premap vDSO - probably CONFIG_VDSO is not set\n");
kdat.vdso_sym = vdso_maps.sym;
return 0;
}
if (vdso_fill_self_symtable(&vdso_maps)) {
pr_err("Failed to fill self vdso symtable\n");
return -1;
}
kdat.vdso_sym = vdso_maps.sym;
#ifdef CONFIG_COMPAT
if (vdso_fill_compat_symtable(&vdso_maps, &vdso_maps_compat)) {
pr_err("Failed to fill compat vdso symtable\n");
return -1;
}
vdso_maps_compat.compatible = true;
kdat.vdso_sym_compat = vdso_maps_compat.sym;
#endif
return 0;
}
/*
* On x86 pre-v3.16 kernels can lose "[vdso]" hint
* in /proc/.../maps file after mremap()'ing vdso vma.
* Depends on kerndat_vdso_fill_symtable() - assuming that
* vdso_maps and vdso_maps_compat are filled.
*/
int kerndat_vdso_preserves_hint(void)
{
struct vdso_maps vdso_maps_after;
int status, ret = -1;
pid_t child;
kdat.vdso_hint_reliable = 0;
if (!vdso_is_present(&vdso_maps))
return 0;
child = fork();
if (child < 0) {
pr_perror("fork() failed");
return -1;
}
if (child == 0) {
unsigned long vdso_addr = vdso_maps.vdso_start;
unsigned long vdso_size = vdso_maps.sym.vdso_size;
void *new_addr;
new_addr = mmap(0, vdso_size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (new_addr == MAP_FAILED)
exit(1);
child = getpid();
new_addr = (void *)syscall(SYS_mremap, vdso_addr, vdso_size, vdso_size, MREMAP_MAYMOVE | MREMAP_FIXED,
new_addr);
if (new_addr == MAP_FAILED)
syscall(SYS_exit, 2);
syscall(SYS_kill, child, SIGSTOP);
syscall(SYS_exit, 3);
}
waitpid(child, &status, WUNTRACED);
if (WIFEXITED(status)) {
int ret = WEXITSTATUS(status);
pr_err("Child unexpectedly exited with %d\n", ret);
goto out;
} else if (WIFSIGNALED(status)) {
int sig = WTERMSIG(status);
pr_err("Child unexpectedly signaled with %d: %s\n", sig, strsignal(sig));
goto out;
} else if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGSTOP) {
pr_err("Child is unstoppable or was stopped by other means\n");
goto out_kill;
}
if (vdso_parse_maps(child, &vdso_maps_after)) {
pr_err("Failed parsing maps for child helper\n");
goto out_kill;
}
if (vdso_is_present(&vdso_maps_after))
kdat.vdso_hint_reliable = 1;
ret = 0;
out_kill:
kill(child, SIGKILL);
waitpid(child, &status, 0);
out:
return ret;
}
| 17,161 | 23.447293 | 120 |
c
|
criu
|
criu-master/criu/arch/aarch64/crtools.c
|
#include <string.h>
#include <unistd.h>
#include <linux/elf.h>
#include "types.h"
#include <compel/asm/processor-flags.h>
#include <compel/asm/infect-types.h>
#include "asm/restorer.h"
#include "common/compiler.h"
#include <compel/ptrace.h>
#include "asm/dump.h"
#include "protobuf.h"
#include "images/core.pb-c.h"
#include "images/creds.pb-c.h"
#include "parasite-syscall.h"
#include "log.h"
#include "util.h"
#include "cpu.h"
#include "restorer.h"
#include "compel/infect.h"
#define assign_reg(dst, src, e) dst->e = (__typeof__(dst->e))(src)->e
int save_task_regs(void *x, user_regs_struct_t *regs, user_fpregs_struct_t *fpsimd)
{
int i;
CoreEntry *core = x;
// Save the Aarch64 CPU state
for (i = 0; i < 31; ++i)
assign_reg(core->ti_aarch64->gpregs, regs, regs[i]);
assign_reg(core->ti_aarch64->gpregs, regs, sp);
assign_reg(core->ti_aarch64->gpregs, regs, pc);
assign_reg(core->ti_aarch64->gpregs, regs, pstate);
// Save the FP/SIMD state
for (i = 0; i < 32; ++i) {
core->ti_aarch64->fpsimd->vregs[2 * i] = fpsimd->vregs[i];
core->ti_aarch64->fpsimd->vregs[2 * i + 1] = fpsimd->vregs[i] >> 64;
}
assign_reg(core->ti_aarch64->fpsimd, fpsimd, fpsr);
assign_reg(core->ti_aarch64->fpsimd, fpsimd, fpcr);
return 0;
}
int arch_alloc_thread_info(CoreEntry *core)
{
ThreadInfoAarch64 *ti_aarch64;
UserAarch64RegsEntry *gpregs;
UserAarch64FpsimdContextEntry *fpsimd;
ti_aarch64 = xmalloc(sizeof(*ti_aarch64));
if (!ti_aarch64)
goto err;
thread_info_aarch64__init(ti_aarch64);
core->ti_aarch64 = ti_aarch64;
gpregs = xmalloc(sizeof(*gpregs));
if (!gpregs)
goto err;
user_aarch64_regs_entry__init(gpregs);
gpregs->regs = xmalloc(31 * sizeof(uint64_t));
if (!gpregs->regs)
goto err;
gpregs->n_regs = 31;
ti_aarch64->gpregs = gpregs;
fpsimd = xmalloc(sizeof(*fpsimd));
if (!fpsimd)
goto err;
user_aarch64_fpsimd_context_entry__init(fpsimd);
ti_aarch64->fpsimd = fpsimd;
fpsimd->vregs = xmalloc(64 * sizeof(fpsimd->vregs[0]));
fpsimd->n_vregs = 64;
if (!fpsimd->vregs)
goto err;
return 0;
err:
return -1;
}
void arch_free_thread_info(CoreEntry *core)
{
if (CORE_THREAD_ARCH_INFO(core)) {
if (CORE_THREAD_ARCH_INFO(core)->fpsimd) {
xfree(CORE_THREAD_ARCH_INFO(core)->fpsimd->vregs);
xfree(CORE_THREAD_ARCH_INFO(core)->fpsimd);
}
xfree(CORE_THREAD_ARCH_INFO(core)->gpregs->regs);
xfree(CORE_THREAD_ARCH_INFO(core)->gpregs);
xfree(CORE_THREAD_ARCH_INFO(core));
CORE_THREAD_ARCH_INFO(core) = NULL;
}
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core)
{
int i;
struct fpsimd_context *fpsimd = RT_SIGFRAME_FPU(sigframe);
if (core->ti_aarch64->fpsimd->n_vregs != 64)
return 1;
for (i = 0; i < 32; ++i)
fpsimd->vregs[i] = (__uint128_t)core->ti_aarch64->fpsimd->vregs[2 * i] |
((__uint128_t)core->ti_aarch64->fpsimd->vregs[2 * i + 1] << 64);
assign_reg(fpsimd, core->ti_aarch64->fpsimd, fpsr);
assign_reg(fpsimd, core->ti_aarch64->fpsimd, fpcr);
fpsimd->head.magic = FPSIMD_MAGIC;
fpsimd->head.size = sizeof(*fpsimd);
return 0;
}
int restore_gpregs(struct rt_sigframe *f, UserRegsEntry *r)
{
#define CPREG1(d) f->uc.uc_mcontext.d = r->d
int i;
for (i = 0; i < 31; ++i)
CPREG1(regs[i]);
CPREG1(sp);
CPREG1(pc);
CPREG1(pstate);
#undef CPREG1
return 0;
}
| 3,273 | 22.724638 | 83 |
c
|
criu
|
criu-master/criu/arch/aarch64/vdso-pie.c
|
#include <unistd.h>
#include "asm/types.h"
#include <compel/plugins/std/syscall.h>
#include "parasite-vdso.h"
#include "log.h"
#include "common/bug.h"
#ifdef LOG_PREFIX
#undef LOG_PREFIX
#endif
#define LOG_PREFIX "vdso: "
int vdso_redirect_calls(unsigned long base_to, unsigned long base_from, struct vdso_symtable *to,
struct vdso_symtable *from, bool __always_unused compat_vdso)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(to->symbols); i++) {
if (vdso_symbol_empty(&from->symbols[i]))
continue;
pr_debug("br: %lx/%lx -> %lx/%lx (index %d)\n", base_from, from->symbols[i].offset, base_to,
to->symbols[i].offset, i);
write_intraprocedure_branch(base_to + to->symbols[i].offset, base_from + from->symbols[i].offset);
}
return 0;
}
| 761 | 22.8125 | 100 |
c
|
criu
|
criu-master/criu/arch/aarch64/include/asm/restore.h
|
#ifndef __CR_ASM_RESTORE_H__
#define __CR_ASM_RESTORE_H__
#include "asm/restorer.h"
#include "images/core.pb-c.h"
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
asm volatile( \
"and sp, %0, #~15 \n" \
"mov x0, %2 \n" \
"br %1 \n" \
: \
: "r"(new_sp), \
"r"(restore_task_exec_start), \
"r"(task_args) \
: "x0", "memory")
/* clang-format on */
static inline void core_get_tls(CoreEntry *pcore, tls_t *ptls)
{
*ptls = pcore->ti_aarch64->tls;
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core);
#endif
| 637 | 20.266667 | 64 |
h
|
criu
|
criu-master/criu/arch/aarch64/include/asm/restorer.h
|
#ifndef __CR_ASM_RESTORER_H__
#define __CR_ASM_RESTORER_H__
#include <asm/sigcontext.h>
#include <sys/ucontext.h>
#include "asm/types.h"
#include "images/core.pb-c.h"
#include <compel/asm/sigframe.h>
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
"clone_emul: \n" \
"ldr x1, %2 \n" \
"and x1, x1, #~15 \n" \
"sub x1, x1, #16 \n" \
"stp %5, %6, [x1] \n" \
"mov x0, %1 \n" \
"mov x2, %3 \n" \
"mov x3, %4 \n" \
"mov x8, #"__stringify(__NR_clone)" \n" \
"svc #0 \n" \
\
"cbz x0, thread_run \n" \
\
"mov %0, x0 \n" \
"b clone_end \n" \
\
"thread_run: \n" \
"ldp x1, x0, [sp] \n" \
"br x1 \n" \
\
"clone_end: \n" \
: "=r"(ret) \
: "r"(clone_flags), \
"m"(new_sp), \
"r"(&parent_tid), \
"r"(&thread_args[i].pid), \
"r"(clone_restore_fn), \
"r"(&thread_args[i]) \
: "x0", "x1", "x2", "x3", "x8", "memory")
/*
* Based on sysdeps/unix/sysv/linux/aarch64/clone.S
*
* int clone(int (*fn)(void *arg), x0
* void *child_stack, x1
* int flags, x2
* void *arg, x3
* pid_t *ptid, x4
* struct user_desc *tls, x5
* pid_t *ctid); x6
*
* int clone3(struct clone_args *args, x0
* size_t size); x1
*
* Always consult the CLONE3 wrappers for other architectures
* for additional details.
*
*/
#define RUN_CLONE3_RESTORE_FN(ret, clone_args, size, args, \
clone_restore_fn) \
asm volatile( \
/* In contrast to the clone() wrapper above this does not put
* the thread function and its arguments on the child stack,
* but uses registers to pass these parameters to the child process.
* Based on the glibc clone() wrapper at
* sysdeps/unix/sysv/linux/aarch64/clone.S.
*/ \
"clone3_emul: \n" \
/*
* Based on the glibc clone() wrapper, which uses x10 and x11
* to save the arguments for the child process, this does the same.
* x10 for the thread function and x11 for the thread arguments.
*/ \
"mov x10, %3 /* clone_restore_fn */ \n" \
"mov x11, %4 /* args */ \n" \
"mov x0, %1 /* &clone_args */ \n" \
"mov x1, %2 /* size */ \n" \
/* Load syscall number */ \
"mov x8, #"__stringify(__NR_clone3)" \n" \
/* Do the syscall */ \
"svc #0 \n" \
\
"cbz x0, clone3_thread_run \n" \
\
"mov %0, x0 \n" \
"b clone3_end \n" \
\
"clone3_thread_run: \n" \
/* Move args to x0 */ \
"mov x0, x11 \n" \
/* Jump to clone_restore_fn */ \
"br x10 \n" \
\
"clone3_end: \n" \
: "=r"(ret) \
: "r"(&clone_args), \
"r"(size), \
"r"(clone_restore_fn), \
"r"(args) \
: "x0", "x1", "x8", "x10", "x11", "memory")
#define ARCH_FAIL_CORE_RESTORE \
asm volatile( \
"mov sp, %0 \n" \
"mov x0, #0 \n" \
"b x0 \n" \
: \
: "r"(ret) \
: "sp", "x0", "memory")
/* clang-format on */
#define arch_map_vdso(map, compat) -1
int restore_gpregs(struct rt_sigframe *f, UserAarch64RegsEntry *r);
int restore_nonsigframe_gpregs(UserAarch64RegsEntry *r);
static inline void restore_tls(tls_t *ptls)
{
asm("msr tpidr_el0, %0" : : "r"(*ptls));
}
static inline void *alloc_compat_syscall_stack(void)
{
return NULL;
}
static inline void free_compat_syscall_stack(void *stack32)
{
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
return -1;
}
static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
return -1;
}
#endif
| 3,930 | 26.110345 | 75 |
h
|
criu
|
criu-master/criu/arch/aarch64/include/asm/thread_pointer.h
|
/* __thread_pointer definition. Generic version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#ifndef _SYS_THREAD_POINTER_H
#define _SYS_THREAD_POINTER_H
static inline void *__criu_thread_pointer(void)
{
return __builtin_thread_pointer();
}
#endif /* _SYS_THREAD_POINTER_H */
| 1,021 | 35.5 | 71 |
h
|
criu
|
criu-master/criu/arch/aarch64/include/asm/vdso.h
|
#ifndef __CR_ASM_VDSO_H__
#define __CR_ASM_VDSO_H__
#include "asm/int.h"
#include "common/compiler.h"
#include "asm-generic/vdso.h"
/*
* This is a minimal amount of symbols
* we should support at the moment.
*/
#define VDSO_SYMBOL_MAX 4
#define VDSO_SYMBOL_GTOD 2
/*
* Workaround for VDSO array symbol table's relocation.
* XXX: remove when compel/piegen will support aarch64.
*/
#define ARCH_VDSO_SYMBOLS_LIST \
const char *aarch_vdso_symbol1 = "__kernel_clock_getres"; \
const char *aarch_vdso_symbol2 = "__kernel_clock_gettime"; \
const char *aarch_vdso_symbol3 = "__kernel_gettimeofday"; \
const char *aarch_vdso_symbol4 = "__kernel_rt_sigreturn";
#define ARCH_VDSO_SYMBOLS aarch_vdso_symbol1, aarch_vdso_symbol2, aarch_vdso_symbol3, aarch_vdso_symbol4
extern void write_intraprocedure_branch(unsigned long to, unsigned long from);
#endif /* __CR_ASM_VDSO_H__ */
| 922 | 29.766667 | 104 |
h
|
criu
|
criu-master/criu/arch/arm/crtools.c
|
#include <string.h>
#include <unistd.h>
#include "types.h"
#include <compel/asm/processor-flags.h>
#include <compel/asm/infect-types.h>
#include "asm/restorer.h"
#include "common/compiler.h"
#include "asm/dump.h"
#include <compel/ptrace.h>
#include "protobuf.h"
#include "images/core.pb-c.h"
#include "images/creds.pb-c.h"
#include "log.h"
#include "util.h"
#include "cpu.h"
#include "elf.h"
#include "parasite-syscall.h"
#include "restorer.h"
#include "compel/infect.h"
#define assign_reg(dst, src, e) dst->e = (__typeof__(dst->e))((src)->ARM_##e)
int save_task_regs(void *x, user_regs_struct_t *regs, user_fpregs_struct_t *fpregs)
{
CoreEntry *core = x;
// Save the ARM CPU state
assign_reg(core->ti_arm->gpregs, regs, r0);
assign_reg(core->ti_arm->gpregs, regs, r1);
assign_reg(core->ti_arm->gpregs, regs, r2);
assign_reg(core->ti_arm->gpregs, regs, r3);
assign_reg(core->ti_arm->gpregs, regs, r4);
assign_reg(core->ti_arm->gpregs, regs, r5);
assign_reg(core->ti_arm->gpregs, regs, r6);
assign_reg(core->ti_arm->gpregs, regs, r7);
assign_reg(core->ti_arm->gpregs, regs, r8);
assign_reg(core->ti_arm->gpregs, regs, r9);
assign_reg(core->ti_arm->gpregs, regs, r10);
assign_reg(core->ti_arm->gpregs, regs, fp);
assign_reg(core->ti_arm->gpregs, regs, ip);
assign_reg(core->ti_arm->gpregs, regs, sp);
assign_reg(core->ti_arm->gpregs, regs, lr);
assign_reg(core->ti_arm->gpregs, regs, pc);
assign_reg(core->ti_arm->gpregs, regs, cpsr);
core->ti_arm->gpregs->orig_r0 = regs->ARM_ORIG_r0;
// Save the VFP state
memcpy(CORE_THREAD_ARCH_INFO(core)->fpstate->vfp_regs, &fpregs->fpregs, sizeof(fpregs->fpregs));
CORE_THREAD_ARCH_INFO(core)->fpstate->fpscr = fpregs->fpscr;
return 0;
}
int arch_alloc_thread_info(CoreEntry *core)
{
ThreadInfoArm *ti_arm;
UserArmRegsEntry *gpregs;
UserArmVfpstateEntry *fpstate;
ti_arm = xmalloc(sizeof(*ti_arm));
if (!ti_arm)
goto err;
thread_info_arm__init(ti_arm);
core->ti_arm = ti_arm;
gpregs = xmalloc(sizeof(*gpregs));
user_arm_regs_entry__init(gpregs);
ti_arm->gpregs = gpregs;
fpstate = xmalloc(sizeof(*fpstate));
if (!fpstate)
goto err;
user_arm_vfpstate_entry__init(fpstate);
ti_arm->fpstate = fpstate;
fpstate->vfp_regs = xmalloc(32 * sizeof(unsigned long long));
fpstate->n_vfp_regs = 32;
if (!fpstate->vfp_regs)
goto err;
return 0;
err:
return -1;
}
void arch_free_thread_info(CoreEntry *core)
{
if (CORE_THREAD_ARCH_INFO(core)) {
if (CORE_THREAD_ARCH_INFO(core)->fpstate) {
xfree(CORE_THREAD_ARCH_INFO(core)->fpstate->vfp_regs);
xfree(CORE_THREAD_ARCH_INFO(core)->fpstate);
}
xfree(CORE_THREAD_ARCH_INFO(core)->gpregs);
xfree(CORE_THREAD_ARCH_INFO(core));
CORE_THREAD_ARCH_INFO(core) = NULL;
}
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core)
{
struct aux_sigframe *aux = (struct aux_sigframe *)&sigframe->sig.uc.uc_regspace;
memcpy(&aux->vfp.ufp.fpregs, CORE_THREAD_ARCH_INFO(core)->fpstate->vfp_regs, sizeof(aux->vfp.ufp.fpregs));
aux->vfp.ufp.fpscr = CORE_THREAD_ARCH_INFO(core)->fpstate->fpscr;
aux->vfp.magic = VFP_MAGIC;
aux->vfp.size = VFP_STORAGE_SIZE;
return 0;
}
int restore_gpregs(struct rt_sigframe *f, UserArmRegsEntry *r)
{
#define CPREG1(d) f->sig.uc.uc_mcontext.arm_##d = r->d
#define CPREG2(d, s) f->sig.uc.uc_mcontext.arm_##d = r->s
CPREG1(r0);
CPREG1(r1);
CPREG1(r2);
CPREG1(r3);
CPREG1(r4);
CPREG1(r5);
CPREG1(r6);
CPREG1(r7);
CPREG1(r8);
CPREG1(r9);
CPREG1(r10);
CPREG1(fp);
CPREG1(ip);
CPREG1(sp);
CPREG1(lr);
CPREG1(pc);
CPREG1(cpsr);
#undef CPREG1
#undef CPREG2
return 0;
}
| 3,567 | 24.304965 | 107 |
c
|
criu
|
criu-master/criu/arch/arm/restorer.c
|
#include <unistd.h>
#include "restorer.h"
#include "asm/restorer.h"
#include <compel/plugins/std/syscall.h>
#include "log.h"
#include <compel/asm/fpu.h>
#include "cpu.h"
#include "page.h"
#include "common/err.h"
int restore_nonsigframe_gpregs(UserArmRegsEntry *r)
{
return 0;
}
/*
* On ARMv6 CPUs with VIPT caches there are aliasing issues:
* if two different cache line indexes correspond to the same physical
* address, then changes made to one of the alias might be lost or they
* can overwrite each other. To overcome aliasing issues, page coloring
* with 4 pages align for shared mappings was introduced (SHMLBA) in kernel.
* Which resulted in unique physical address after any tag in cache
* (because two upper bits corresponding to page address get unused in tags).
*
* The problem here is in shmat() syscall:
* 1. if shmaddr is NULL then do_shmat() uses arch_get_unmapped_area()
* to allocate shared mapping. Which checks if CPU cache is VIPT
* and only then use SHMLBA alignment.
* 2. if shmaddr is specified then do_shmat() checks that address has
* SHMLBA alignment regardless to CPU cache aliasing.
*
* All above means that on non-VIPT CPU (like any ARMv7) we can get
* non-SHMLBA, but page-aligned address with shmat(shmid, NULL, shmflg),
* but we can't restore it with shmat(shmid, shmaddr, shmflg).
* Which results that we can dump e.g., application with shmem aligned
* on 2 pages, but can't restore it on the same ARMv7 CPU.
*
* To workaround this kernel feature, use mremap() on shmem mapping,
* allocated with shmat(shmid, NULL, shmflg).
*/
#define SHMLBA (4UL * PAGE_SIZE)
unsigned long arch_shmat(int shmid, void *shmaddr, int shmflg, unsigned long size)
{
unsigned long smap;
/* SHMLBA-aligned, direct call shmat() */
if (!((unsigned long)shmaddr & (SHMLBA - 1)))
return sys_shmat(shmid, shmaddr, shmflg);
smap = sys_shmat(shmid, NULL, shmflg);
if (IS_ERR_VALUE(smap)) {
pr_err("shmat() with NULL shmaddr failed: %d\n", (int)smap);
return smap;
}
/* We're lucky! */
if (smap == (unsigned long)shmaddr)
return smap;
/* Warn ALOUD */
pr_warn("Restoring shmem %p unaligned to SHMLBA.\n", shmaddr);
pr_warn("Make sure that you don't migrate shmem from non-VIPT cached CPU to VIPT cached (e.g., ARMv7 -> ARMv6)\n");
pr_warn("Otherwise YOU HAVE A CHANCE OF DATA CORRUPTIONS in writeable shmem\n");
smap = sys_mremap(smap, size, size, MREMAP_FIXED | MREMAP_MAYMOVE, (unsigned long)shmaddr);
if (IS_ERR_VALUE(smap))
pr_err("mremap() for shmem failed: %d\n", (int)smap);
return smap;
}
| 2,567 | 34.666667 | 116 |
c
|
criu
|
criu-master/criu/arch/arm/vdso-pie.c
|
#include <unistd.h>
#include "asm/types.h"
#include <compel/plugins/std/string.h>
#include <compel/plugins/std/syscall.h>
#include "parasite-vdso.h"
#include "log.h"
#include "common/bug.h"
#ifdef LOG_PREFIX
#undef LOG_PREFIX
#endif
#define LOG_PREFIX "vdso: "
static void insert_trampoline(uintptr_t from, uintptr_t to)
{
struct {
uint32_t ldr_pc;
uint32_t imm32;
uint32_t guards;
} __packed jmp = {
.ldr_pc = 0xe51ff004, /* ldr pc, [pc, #-4] */
.imm32 = to,
.guards = 0xe1200070, /* bkpt 0x0000 */
};
void *iflush_start = (void *)from;
void *iflush_end = iflush_start + sizeof(jmp);
memcpy((void *)from, &jmp, sizeof(jmp));
__builtin___clear_cache(iflush_start, iflush_end);
}
int vdso_redirect_calls(unsigned long base_to, unsigned long base_from, struct vdso_symtable *sto,
struct vdso_symtable *sfrom, bool compat_vdso)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(sto->symbols); i++) {
uintptr_t from, to;
if (vdso_symbol_empty(&sfrom->symbols[i]))
continue;
pr_debug("jmp: %lx/%lx -> %lx/%lx (index %d)\n", base_from, sfrom->symbols[i].offset, base_to,
sto->symbols[i].offset, i);
from = base_from + sfrom->symbols[i].offset;
to = base_to + sto->symbols[i].offset;
insert_trampoline(from, to);
}
return 0;
}
| 1,273 | 21.350877 | 98 |
c
|
criu
|
criu-master/criu/arch/arm/include/asm/restore.h
|
#ifndef __CR_ASM_RESTORE_H__
#define __CR_ASM_RESTORE_H__
#include "asm/restorer.h"
#include "images/core.pb-c.h"
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
asm volatile( \
"mov sp, %0 \n" \
"mov r1, %1 \n" \
"mov r0, %2 \n" \
"bx r1 \n" \
: \
: "r"(new_sp), \
"r"(restore_task_exec_start), \
"r"(task_args) \
: "r0", "r1", "memory")
/* clang-format on */
static inline void core_get_tls(CoreEntry *pcore, tls_t *ptls)
{
*ptls = pcore->ti_arm->tls;
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core);
#endif
| 720 | 22.258065 | 65 |
h
|
criu
|
criu-master/criu/arch/arm/include/asm/restorer.h
|
#ifndef __CR_ASM_RESTORER_H__
#define __CR_ASM_RESTORER_H__
#include "asm/types.h"
#include "images/core.pb-c.h"
#include <compel/asm/sigframe.h>
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
"clone_emul: \n" \
"ldr r1, %2 \n" \
"sub r1, #16 \n" \
"mov r0, %6 \n" \
"str r0, [r1, #4] \n" \
"mov r0, %5 \n" \
"str r0, [r1] \n" \
"mov r0, %1 \n" \
"mov r2, %3 \n" \
"mov r3, %4 \n" \
"mov r7, #"__stringify(__NR_clone)" \n" \
"svc #0 \n" \
\
"cmp r0, #0 \n" \
"beq thread_run \n" \
\
"mov %0, r0 \n" \
"b clone_end \n" \
\
"thread_run: \n" \
"pop { r1 } \n" \
"pop { r0 } \n" \
"bx r1 \n" \
\
"clone_end: \n" \
: "=r"(ret) \
: "r"(clone_flags), \
"m"(new_sp), \
"r"(&parent_tid), \
"r"(&thread_args[i].pid), \
"r"(clone_restore_fn), \
"r"(&thread_args[i]) \
: "r0", "r1", "r2", "r3", "r7", "memory")
/*
* The clone3() assembler wrapper is based on the clone() wrapper above
* and on code from the glibc wrapper at
* sysdeps/unix/sysv/linux/arm/clone.S
*
* For arm it is necessary to change the child stack as on x86_64 as
* it seems there are not registers which stay the same over a syscall
* like on s390x, ppc64le and aarch64.
*
* Changing the child stack means that this code has to deal with the
* kernel doing stack + stack_size implicitly.
*
* int clone3(struct clone_args *args, size_t size)
*/
#define RUN_CLONE3_RESTORE_FN(ret, clone_args, size, args, \
clone_restore_fn) \
asm volatile( \
"clone3_emul: \n" \
/* Load thread stack pointer */ \
"ldr r1, [%3] \n" \
/* Load thread stack size */ \
"mov r2, %4 \n" \
/* Goto to the end of stack */ \
"add r1, r1, r2 \n" \
/* Load thread function and arguments and push on stack */ \
"mov r2, %6 /* args */ \n" \
"str r2, [r1, #4] /* args */ \n" \
"mov r2, %5 /* function */ \n" \
"str r2, [r1] /* function */ \n" \
"mov r0, %1 /* clone_args */ \n" \
"mov r1, %2 /* size */ \n" \
"mov r7, #"__stringify(__NR_clone3)" \n" \
"svc #0 \n" \
\
"cmp r0, #0 \n" \
"beq thread3_run \n" \
\
"mov %0, r0 \n" \
"b clone3_end \n" \
\
"thread3_run: \n" \
"pop { r1 } \n" \
"pop { r0 } \n" \
"bx r1 \n" \
\
"clone3_end: \n" \
: "=r"(ret) \
: "r"(&clone_args), \
"r"(size), \
"r"(&clone_args.stack), \
"r"(clone_args.stack_size), \
"r"(clone_restore_fn), \
"r"(args) \
: "r0", "r1", "r2", "r7", "memory")
#define ARCH_FAIL_CORE_RESTORE \
asm volatile( \
"mov sp, %0 \n" \
"mov r0, #0 \n" \
"bx r0 \n" \
: \
: "r"(ret) \
: "memory")
/* clang-format on */
#define arch_map_vdso(map, compat) -1
int restore_gpregs(struct rt_sigframe *f, UserArmRegsEntry *r);
int restore_nonsigframe_gpregs(UserArmRegsEntry *r);
#define ARCH_HAS_SHMAT_HOOK
unsigned long arch_shmat(int shmid, void *shmaddr, int shmflg, unsigned long size);
static inline void restore_tls(tls_t *ptls)
{
asm("mov r7, #15 \n"
"lsl r7, #16 \n"
"mov r0, #5 \n"
"add r7, r0 \n" /* r7 = 0xF005 */
"ldr r0, [%0] \n"
"svc #0 \n"
:
: "r"(ptls)
: "r0", "r7");
}
static inline void *alloc_compat_syscall_stack(void)
{
return NULL;
}
static inline void free_compat_syscall_stack(void *stack32)
{
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
return -1;
}
static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
return -1;
}
#endif
| 4,001 | 25.503311 | 83 |
h
|
criu
|
criu-master/criu/arch/arm/include/asm/thread_pointer.h
|
/* __thread_pointer definition. Generic version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#ifndef _SYS_THREAD_POINTER_H
#define _SYS_THREAD_POINTER_H
static inline void *__criu_thread_pointer(void)
{
return __builtin_thread_pointer();
}
#endif /* _SYS_THREAD_POINTER_H */
| 1,021 | 35.5 | 71 |
h
|
criu
|
criu-master/criu/arch/arm/include/asm/vdso.h
|
#ifndef __CR_ASM_VDSO_H__
#define __CR_ASM_VDSO_H__
#include "asm/int.h"
#include "asm-generic/vdso.h"
/* This definition is used in pie/util-vdso.c to initialize the vdso symbol
* name string table 'vdso_symbols'
*
* Poke from kernel file arch/arm/vdso/vdso.lds.S
*/
#define VDSO_SYMBOL_MAX 2
#define VDSO_SYMBOL_GTOD 1
#define ARCH_VDSO_SYMBOLS_LIST \
const char *aarch_vdso_symbol1 = "__vdso_clock_gettime"; \
const char *aarch_vdso_symbol2 = "__vdso_gettimeofday";
#define ARCH_VDSO_SYMBOLS aarch_vdso_symbol1, aarch_vdso_symbol2,
#endif /* __CR_ASM_VDSO_H__ */
| 610 | 29.55 | 75 |
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.