repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
criu
|
criu-master/criu/arch/mips/crtools.c
|
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <elf.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/auxv.h>
#include <sys/wait.h>
#include "types.h"
#include "log.h"
#include "asm/parasite-syscall.h"
#include "asm/restorer.h"
#include <compel/asm/fpu.h>
#include "asm/dump.h"
#include "cr_options.h"
#include "common/compiler.h"
#include "restorer.h"
#include "parasite-syscall.h"
#include "util.h"
#include "cpu.h"
#include <compel/plugins/std/syscall-codes.h>
#include "kerndat.h"
#include "protobuf.h"
#include "images/core.pb-c.h"
#include "images/creds.pb-c.h"
int save_task_regs(void *x, user_regs_struct_t *regs, user_fpregs_struct_t *fpregs)
{
CoreEntry *core = x;
/* Save the MIPS CPU state */
core->ti_mips->gpregs->r0 = regs->regs[0];
core->ti_mips->gpregs->r1 = regs->regs[1];
core->ti_mips->gpregs->r2 = regs->regs[2];
core->ti_mips->gpregs->r3 = regs->regs[3];
core->ti_mips->gpregs->r4 = regs->regs[4];
core->ti_mips->gpregs->r5 = regs->regs[5];
core->ti_mips->gpregs->r6 = regs->regs[6];
core->ti_mips->gpregs->r7 = regs->regs[7];
core->ti_mips->gpregs->r8 = regs->regs[8];
core->ti_mips->gpregs->r9 = regs->regs[9];
core->ti_mips->gpregs->r10 = regs->regs[10];
core->ti_mips->gpregs->r11 = regs->regs[11];
core->ti_mips->gpregs->r12 = regs->regs[12];
core->ti_mips->gpregs->r13 = regs->regs[13];
core->ti_mips->gpregs->r14 = regs->regs[14];
core->ti_mips->gpregs->r15 = regs->regs[15];
core->ti_mips->gpregs->r16 = regs->regs[16];
core->ti_mips->gpregs->r17 = regs->regs[17];
core->ti_mips->gpregs->r18 = regs->regs[18];
core->ti_mips->gpregs->r19 = regs->regs[19];
core->ti_mips->gpregs->r20 = regs->regs[20];
core->ti_mips->gpregs->r21 = regs->regs[21];
core->ti_mips->gpregs->r22 = regs->regs[22];
core->ti_mips->gpregs->r23 = regs->regs[23];
core->ti_mips->gpregs->r24 = regs->regs[24];
core->ti_mips->gpregs->r25 = regs->regs[25];
core->ti_mips->gpregs->r26 = regs->regs[26];
core->ti_mips->gpregs->r27 = regs->regs[27];
core->ti_mips->gpregs->r28 = regs->regs[28];
core->ti_mips->gpregs->r29 = regs->regs[29];
core->ti_mips->gpregs->r30 = regs->regs[30];
core->ti_mips->gpregs->r31 = regs->regs[31];
core->ti_mips->gpregs->lo = regs->lo;
core->ti_mips->gpregs->hi = regs->hi;
core->ti_mips->gpregs->cp0_epc = regs->cp0_epc;
core->ti_mips->gpregs->cp0_badvaddr = regs->cp0_badvaddr;
core->ti_mips->gpregs->cp0_status = regs->cp0_status;
core->ti_mips->gpregs->cp0_cause = regs->cp0_cause;
core->ti_mips->fpregs->r0 = fpregs->regs[0];
core->ti_mips->fpregs->r1 = fpregs->regs[1];
core->ti_mips->fpregs->r2 = fpregs->regs[2];
core->ti_mips->fpregs->r3 = fpregs->regs[3];
core->ti_mips->fpregs->r4 = fpregs->regs[4];
core->ti_mips->fpregs->r5 = fpregs->regs[5];
core->ti_mips->fpregs->r6 = fpregs->regs[6];
core->ti_mips->fpregs->r7 = fpregs->regs[7];
core->ti_mips->fpregs->r8 = fpregs->regs[8];
core->ti_mips->fpregs->r9 = fpregs->regs[9];
core->ti_mips->fpregs->r10 = fpregs->regs[10];
core->ti_mips->fpregs->r11 = fpregs->regs[11];
core->ti_mips->fpregs->r12 = fpregs->regs[12];
core->ti_mips->fpregs->r13 = fpregs->regs[13];
core->ti_mips->fpregs->r14 = fpregs->regs[14];
core->ti_mips->fpregs->r15 = fpregs->regs[15];
core->ti_mips->fpregs->r16 = fpregs->regs[16];
core->ti_mips->fpregs->r17 = fpregs->regs[17];
core->ti_mips->fpregs->r18 = fpregs->regs[18];
core->ti_mips->fpregs->r19 = fpregs->regs[19];
core->ti_mips->fpregs->r20 = fpregs->regs[20];
core->ti_mips->fpregs->r21 = fpregs->regs[21];
core->ti_mips->fpregs->r22 = fpregs->regs[22];
core->ti_mips->fpregs->r23 = fpregs->regs[23];
core->ti_mips->fpregs->r24 = fpregs->regs[24];
core->ti_mips->fpregs->r25 = fpregs->regs[25];
core->ti_mips->fpregs->r26 = fpregs->regs[26];
core->ti_mips->fpregs->r27 = fpregs->regs[27];
core->ti_mips->fpregs->r28 = fpregs->regs[28];
core->ti_mips->fpregs->r29 = fpregs->regs[29];
core->ti_mips->fpregs->r30 = fpregs->regs[30];
core->ti_mips->fpregs->r31 = fpregs->regs[31];
core->ti_mips->fpregs->fpu_fcr31 = fpregs->fpu_fcr31;
core->ti_mips->fpregs->fpu_id = fpregs->fpu_id;
return 0;
}
int arch_alloc_thread_info(CoreEntry *core)
{
ThreadInfoMips *ti_mips;
UserMipsRegsEntry *gpregs;
UserMipsFpregsEntry *fpregs;
ti_mips = xmalloc(sizeof(*ti_mips));
if (!ti_mips)
goto err;
thread_info_mips__init(ti_mips);
core->ti_mips = ti_mips;
gpregs = xmalloc(sizeof(*gpregs));
if (!gpregs) {
xfree(ti_mips);
goto err;
}
user_mips_regs_entry__init(gpregs);
ti_mips->gpregs = gpregs;
fpregs = xmalloc(sizeof(*fpregs));
if (!fpregs) {
xfree(ti_mips);
xfree(gpregs);
goto err;
}
user_mips_fpregs_entry__init(fpregs);
ti_mips->fpregs = fpregs;
return 0;
err:
return -1;
}
void arch_free_thread_info(CoreEntry *core)
{
if (!core->ti_mips)
return;
if (core->ti_mips->gpregs)
xfree(core->ti_mips->gpregs);
if (core->ti_mips->fpregs)
xfree(core->ti_mips->fpregs);
xfree(core->ti_mips);
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core)
{
struct rt_sigframe *f = sigframe;
UserMipsFpregsEntry *r = core->ti_mips->fpregs;
f->rs_uc.uc_mcontext.sc_fpregs[0] = r->r0;
f->rs_uc.uc_mcontext.sc_fpregs[1] = r->r1;
f->rs_uc.uc_mcontext.sc_fpregs[2] = r->r2;
f->rs_uc.uc_mcontext.sc_fpregs[3] = r->r3;
f->rs_uc.uc_mcontext.sc_fpregs[4] = r->r4;
f->rs_uc.uc_mcontext.sc_fpregs[5] = r->r5;
f->rs_uc.uc_mcontext.sc_fpregs[6] = r->r6;
f->rs_uc.uc_mcontext.sc_fpregs[7] = r->r7;
f->rs_uc.uc_mcontext.sc_fpregs[8] = r->r8;
f->rs_uc.uc_mcontext.sc_fpregs[9] = r->r9;
f->rs_uc.uc_mcontext.sc_fpregs[10] = r->r10;
f->rs_uc.uc_mcontext.sc_fpregs[11] = r->r11;
f->rs_uc.uc_mcontext.sc_fpregs[12] = r->r12;
f->rs_uc.uc_mcontext.sc_fpregs[13] = r->r13;
f->rs_uc.uc_mcontext.sc_fpregs[14] = r->r14;
f->rs_uc.uc_mcontext.sc_fpregs[15] = r->r15;
f->rs_uc.uc_mcontext.sc_fpregs[16] = r->r16;
f->rs_uc.uc_mcontext.sc_fpregs[17] = r->r17;
f->rs_uc.uc_mcontext.sc_fpregs[18] = r->r18;
f->rs_uc.uc_mcontext.sc_fpregs[19] = r->r19;
f->rs_uc.uc_mcontext.sc_fpregs[20] = r->r20;
f->rs_uc.uc_mcontext.sc_fpregs[21] = r->r21;
f->rs_uc.uc_mcontext.sc_fpregs[22] = r->r22;
f->rs_uc.uc_mcontext.sc_fpregs[23] = r->r23;
f->rs_uc.uc_mcontext.sc_fpregs[24] = r->r24;
f->rs_uc.uc_mcontext.sc_fpregs[25] = r->r25;
f->rs_uc.uc_mcontext.sc_fpregs[26] = r->r26;
f->rs_uc.uc_mcontext.sc_fpregs[27] = r->r27;
f->rs_uc.uc_mcontext.sc_fpregs[28] = r->r28;
f->rs_uc.uc_mcontext.sc_fpregs[29] = r->r29;
f->rs_uc.uc_mcontext.sc_fpregs[30] = r->r30;
f->rs_uc.uc_mcontext.sc_fpregs[31] = r->r31;
return 0;
}
int restore_gpregs(struct rt_sigframe *f, UserMipsRegsEntry *r)
{
f->rs_uc.uc_mcontext.sc_regs[0] = r->r0;
f->rs_uc.uc_mcontext.sc_regs[1] = r->r1;
f->rs_uc.uc_mcontext.sc_regs[2] = r->r2;
f->rs_uc.uc_mcontext.sc_regs[3] = r->r3;
f->rs_uc.uc_mcontext.sc_regs[4] = r->r4;
f->rs_uc.uc_mcontext.sc_regs[5] = r->r5;
f->rs_uc.uc_mcontext.sc_regs[6] = r->r6;
f->rs_uc.uc_mcontext.sc_regs[7] = r->r7;
f->rs_uc.uc_mcontext.sc_regs[8] = r->r8;
f->rs_uc.uc_mcontext.sc_regs[9] = r->r9;
f->rs_uc.uc_mcontext.sc_regs[10] = r->r10;
f->rs_uc.uc_mcontext.sc_regs[11] = r->r11;
f->rs_uc.uc_mcontext.sc_regs[12] = r->r12;
f->rs_uc.uc_mcontext.sc_regs[13] = r->r13;
f->rs_uc.uc_mcontext.sc_regs[14] = r->r14;
f->rs_uc.uc_mcontext.sc_regs[15] = r->r15;
f->rs_uc.uc_mcontext.sc_regs[16] = r->r16;
f->rs_uc.uc_mcontext.sc_regs[17] = r->r17;
f->rs_uc.uc_mcontext.sc_regs[18] = r->r18;
f->rs_uc.uc_mcontext.sc_regs[19] = r->r19;
f->rs_uc.uc_mcontext.sc_regs[20] = r->r20;
f->rs_uc.uc_mcontext.sc_regs[21] = r->r21;
f->rs_uc.uc_mcontext.sc_regs[22] = r->r22;
f->rs_uc.uc_mcontext.sc_regs[23] = r->r23;
f->rs_uc.uc_mcontext.sc_regs[24] = r->r24;
f->rs_uc.uc_mcontext.sc_regs[25] = r->r25;
f->rs_uc.uc_mcontext.sc_regs[26] = r->r26;
f->rs_uc.uc_mcontext.sc_regs[27] = r->r27;
f->rs_uc.uc_mcontext.sc_regs[28] = r->r28;
f->rs_uc.uc_mcontext.sc_regs[29] = r->r29;
f->rs_uc.uc_mcontext.sc_regs[30] = r->r30;
f->rs_uc.uc_mcontext.sc_regs[31] = r->r31;
f->rs_uc.uc_mcontext.sc_mdlo = r->lo;
f->rs_uc.uc_mcontext.sc_mdhi = r->hi;
f->rs_uc.uc_mcontext.sc_pc = r->cp0_epc;
return 0;
}
int get_task_futex_robust_list_compat(pid_t pid, ThreadCoreEntry *info)
{
return 0;
}
| 8,280 | 31.992032 | 83 |
c
|
criu
|
criu-master/criu/arch/mips/restorer.c
|
#include <unistd.h>
#include "types.h"
#include "restorer.h"
#include "asm/restorer.h"
#include <compel/asm/fpu.h>
#include <compel/plugins/std/syscall-codes.h>
#include <compel/plugins/std/string.h>
#include <compel/plugins/std/syscall.h>
#include "log.h"
#include "cpu.h"
int restore_nonsigframe_gpregs(UserMipsRegsEntry *r)
{
return 0;
}
#define SHMLBA 0x40000
unsigned long arch_shmat(int shmid, void *shmaddr, int shmflg, unsigned long size)
{
unsigned long smap;
/* SHMLBA-aligned, direct call shmat() */
if (!((unsigned long)shmaddr & (SHMLBA - 1)))
return sys_shmat(shmid, shmaddr, shmflg);
smap = sys_shmat(shmid, NULL, shmflg);
if (IS_ERR_VALUE(smap)) {
pr_err("shmat() with NULL shmaddr failed: %d\n", (int)smap);
return smap;
}
/* We're lucky! */
if (smap == (unsigned long)shmaddr)
return smap;
/* Warn ALOUD */
pr_warn("Restoring shmem %p unaligned to SHMLBA.\n", shmaddr);
pr_warn("Make sure that you don't migrate shmem from non-VIPT cached CPU to VIPT cached \n");
pr_warn("Otherwise YOU HAVE A CHANCE OF DATA CORRUPTIONS in writeable shmem\n");
smap = sys_mremap(smap, size, size, MREMAP_FIXED | MREMAP_MAYMOVE, (unsigned long)shmaddr);
if (IS_ERR_VALUE(smap))
pr_err("mremap() for shmem failed: %d\n", (int)smap);
return smap;
}
| 1,283 | 25.75 | 94 |
c
|
criu
|
criu-master/criu/arch/mips/vdso-pie.c
|
#include <unistd.h>
#include "asm/types.h"
#include <compel/plugins/std/string.h>
#include <compel/plugins/std/syscall.h>
#include "parasite-vdso.h"
#include "log.h"
#include "common/bug.h"
#ifdef LOG_PREFIX
#undef LOG_PREFIX
#endif
#define LOG_PREFIX "vdso: "
static void insert_trampoline(uintptr_t from, uintptr_t to)
{
struct {
uint32_t ldr_pc;
uint32_t imm32;
uint32_t guards;
} __packed jmp = {
.ldr_pc = 0x1000fffe, /* b -4 */
.imm32 = to,
.guards = 0x0000000d, /* break */
};
void *iflush_start = (void *)from;
void *iflush_end = iflush_start + sizeof(jmp);
memcpy((void *)from, &jmp, sizeof(jmp));
sys_cacheflush(iflush_start, sizeof(jmp), 0);
}
int vdso_redirect_calls(unsigned long base_to, unsigned long base_from, struct vdso_symtable *sto,
struct vdso_symtable *sfrom, bool compat_vdso)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(sto->symbols); i++) {
uintptr_t from, to;
if (vdso_symbol_empty(&sfrom->symbols[i]))
continue;
pr_debug("jmp: %lx/%lx -> %lx/%lx (index %d)\n", base_from, sfrom->symbols[i].offset, base_to,
sto->symbols[i].offset, i);
from = base_from + sfrom->symbols[i].offset;
to = base_to + sto->symbols[i].offset;
insert_trampoline(from, to);
}
return 0;
}
| 1,247 | 21.690909 | 98 |
c
|
criu
|
criu-master/criu/arch/mips/include/asm/restore.h
|
#ifndef __CR_ASM_RESTORE_H__
#define __CR_ASM_RESTORE_H__
#include "asm/restorer.h"
#include "images/core.pb-c.h"
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, task_args) \
asm volatile( \
"move $4, %0 \n" \
"move $25, %1 \n" \
"move $5, %2 \n" \
"move $29, $5 \n" \
"jalr $25 \n" \
"nop \n" \
: \
:"r"(task_args),"r"(restore_task_exec_start), \
"g"(new_sp) \
: "$25", "$4","$5")
/* clang-format on */
static inline void core_get_tls(CoreEntry *pcore, tls_t *ptls)
{
*ptls = pcore->ti_mips->tls;
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core);
#endif
| 687 | 21.933333 | 75 |
h
|
criu
|
criu-master/criu/arch/mips/include/asm/restorer.h
|
#ifndef __CR_ASM_RESTORER_H__
#define __CR_ASM_RESTORER_H__
#include "asm/types.h"
#include <compel/asm/fpu.h>
#include "images/core.pb-c.h"
#include <compel/plugins/std/syscall-codes.h>
#include <compel/asm/sigframe.h>
static inline void restore_tls(tls_t *ptls)
{
/* clang-format off */
asm volatile("move $4, %0 \n"
"li $2, " __stringify(__NR_set_thread_area) " \n"
"syscall \n"
:
: "r"(*ptls)
: "$4", "$2", "memory");
/* clang-format on */
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
return -1;
}
static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
return -1;
}
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
"ld $5,%2 \n" /* a1 = new_sp */ \
"dsubu $5,32 \n" \
"sd %5,0($5) \n" \
"sd %6,8($5) \n" \
"sd %1,16($5) \n" \
"move $4,%1 \n" /* a0=flags */ \
"move $6,%3 \n" /* a2=parent_tid */ \
"li $7,0 \n" /* a3 = tls is 0 */ \
"move $8,%4 \n" /* a4 = child_tid */ \
"li $2, "__stringify(__NR_clone)" \n" \
"syscall \n" /* syscall */ \
"sync \n" \
"bnez $7,err \n" \
"nop \n" \
"beqz $2,thread_start \n" \
"nop \n" \
"move %0,$2 \n" \
"b end \n" \
"err:break \n" \
"thread_start: \n" \
"ld $25,0($29) \n" \
"ld $4,8($29) \n" \
"jal $25 \n" \
"nop \n" \
"end: \n" \
: "=r"(ret) \
: "r"(clone_flags), \
"m"(new_sp), \
"r"(&parent_tid), \
"r"(&thread_args[i].pid), \
"r"(clone_restore_fn), \
"r"(&thread_args[i]) \
:"$2","$4","$5","$6","$7","$8","$25","memory")
#define RUN_CLONE3_RESTORE_FN(ret, clone_args, size, args, \
clone_restore_fn) do { \
pr_err("This architecture does not support clone3() with set_tid, yet!\n"); \
ret = -1; \
} while (0)
/* clang-format on */
#define kdat_compatible_cr() 0
#define arch_map_vdso(map, compat) -1
static inline void *alloc_compat_syscall_stack(void)
{
return NULL;
}
static inline void free_compat_syscall_stack(void *stack32)
{
}
int restore_gpregs(struct rt_sigframe *f, UserMipsRegsEntry *r);
int restore_nonsigframe_gpregs(UserMipsRegsEntry *r);
#define ARCH_HAS_SHMAT_HOOK
unsigned long arch_shmat(int shmid, void *shmaddr, int shmflg, unsigned long size);
#endif
| 2,619 | 27.478261 | 83 |
h
|
criu
|
criu-master/criu/arch/mips/include/asm/syscall32.h
|
#ifndef __CR_SYSCALL32_H__
#define __CR_SYSCALL32_H__
extern long sys_socket(int domain, int type, int protocol);
extern long sys_connect(int sockfd, struct sockaddr *addr, int addrlen);
extern long sys_sendto(int sockfd, void *buff, size_t len, unsigned int flags, struct sockaddr *addr, int addr_len);
extern long sys_recvfrom(int sockfd, void *ubuf, size_t size, unsigned int flags, struct sockaddr *addr, int *addr_len);
extern long sys_sendmsg(int sockfd, const struct msghdr *msg, int flags);
extern long sys_recvmsg(int sockfd, struct msghdr *msg, int flags);
extern long sys_shutdown(int sockfd, int how);
extern long sys_bind(int sockfd, const struct sockaddr *addr, int addrlen);
extern long sys_setsockopt(int sockfd, int level, int optname, const void *optval, unsigned int optlen);
extern long sys_getsockopt(int sockfd, int level, int optname, const void *optval, unsigned int *optlen);
extern long sys_shmat(int shmid, void *shmaddr, int shmflag);
extern long sys_pread(unsigned int fd, char *ubuf, u32 count, u64 pos);
#endif /* __CR_SYSCALL32_H__ */
| 1,069 | 58.444444 | 120 |
h
|
criu
|
criu-master/criu/arch/mips/include/asm/thread_pointer.h
|
/* __thread_pointer definition. Generic version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#ifndef _SYS_THREAD_POINTER_H
#define _SYS_THREAD_POINTER_H
static inline void *__criu_thread_pointer(void)
{
return __builtin_thread_pointer();
}
#endif /* _SYS_THREAD_POINTER_H */
| 1,021 | 35.5 | 71 |
h
|
criu
|
criu-master/criu/arch/mips/include/asm/vdso.h
|
#ifndef __CR_ASM_VDSO_H__
#define __CR_ASM_VDSO_H__
#include "asm/int.h"
#include "asm-generic/vdso.h"
/* This definition is used in pie/util-vdso.c to initialize the vdso symbol
* name string table 'vdso_symbols'
*/
/*
* This is a minimal amount of symbols
* we should support at the moment.
*/
#define VDSO_SYMBOL_MAX 3
#define VDSO_SYMBOL_GTOD 0
#define ARCH_VDSO_SYMBOLS_LIST \
const char *aarch_vdso_symbol1 = "__vdso_clock_gettime"; \
const char *aarch_vdso_symbol2 = "__vdso_gettimeofday"; \
const char *aarch_vdso_symbol3 = "__vdso_clock_getres";
#define ARCH_VDSO_SYMBOLS aarch_vdso_symbol1, aarch_vdso_symbol2, aarch_vdso_symbol3,
#endif /* __CR_ASM_VDSO_H__ */
| 720 | 29.041667 | 85 |
h
|
criu
|
criu-master/criu/arch/ppc64/cpu.c
|
#undef LOG_PREFIX
#define LOG_PREFIX "cpu: "
#include <sys/auxv.h>
#include <errno.h>
#include <asm/cputable.h>
#include "asm/types.h"
#include "cr_options.h"
#include "image.h"
#include "util.h"
#include "log.h"
#include "cpu.h"
#include "protobuf.h"
#include "images/cpuinfo.pb-c.h"
static compel_cpuinfo_t rt_cpuinfo;
#ifdef __LITTLE_ENDIAN__
#define CURRENT_ENDIANNESS CPUINFO_PPC64_ENTRY__ENDIANNESS__LITTLEENDIAN
#else
#define CURRENT_ENDIANNESS CPUINFO_PPC64_ENTRY__ENDIANESS__BIGENDIAN
#endif
int cpu_init(void)
{
return compel_cpuid(&rt_cpuinfo);
}
int cpu_dump_cpuinfo(void)
{
CpuinfoEntry cpu_info = CPUINFO_ENTRY__INIT;
CpuinfoPpc64Entry cpu_ppc64_info = CPUINFO_PPC64_ENTRY__INIT;
CpuinfoPpc64Entry *cpu_ppc64_info_ptr = &cpu_ppc64_info;
struct cr_img *img;
int ret = -1;
img = open_image(CR_FD_CPUINFO, O_DUMP);
if (!img)
return -1;
cpu_info.ppc64_entry = &cpu_ppc64_info_ptr;
cpu_info.n_ppc64_entry = 1;
cpu_ppc64_info.endian = CURRENT_ENDIANNESS;
cpu_ppc64_info.n_hwcap = 2;
cpu_ppc64_info.hwcap = rt_cpuinfo.hwcap;
ret = pb_write_one(img, &cpu_info, PB_CPUINFO);
close_image(img);
return ret;
}
int cpu_validate_cpuinfo(void)
{
CpuinfoEntry *cpu_info;
CpuinfoPpc64Entry *cpu_ppc64_entry;
struct cr_img *img;
int ret = -1;
img = open_image(CR_FD_CPUINFO, O_RSTR);
if (!img)
return -1;
if (pb_read_one(img, &cpu_info, PB_CPUINFO) < 0)
goto error;
if (cpu_info->n_ppc64_entry != 1) {
pr_err("No PPC64 related entry in image\n");
goto error;
}
cpu_ppc64_entry = cpu_info->ppc64_entry[0];
if (cpu_ppc64_entry->endian != CURRENT_ENDIANNESS) {
pr_err("Bad endianness\n");
goto error;
}
if (cpu_ppc64_entry->n_hwcap != 2) {
pr_err("Hardware capabilities information missing\n");
goto error;
}
#define CHECK_FEATURE(s, f) \
do { \
if ((cpu_ppc64_entry->hwcap[s] & f) && !(rt_cpuinfo.hwcap[s] & f)) { \
pr_err("CPU Feature %s required by image " \
"is not supported on host.\n", \
#f); \
goto error; \
} \
} while (0)
#define REQUIRE_FEATURE(s, f) \
do { \
if (!(cpu_ppc64_entry->hwcap[s] & f)) { \
pr_err("CPU Feature %s missing in image.\n", #f); \
goto error; \
} \
} while (0)
REQUIRE_FEATURE(0, PPC_FEATURE_64);
REQUIRE_FEATURE(0, PPC_FEATURE_HAS_FPU);
REQUIRE_FEATURE(0, PPC_FEATURE_HAS_MMU);
REQUIRE_FEATURE(0, PPC_FEATURE_HAS_VSX);
REQUIRE_FEATURE(1, PPC_FEATURE2_ARCH_2_07);
CHECK_FEATURE(0, PPC_FEATURE_TRUE_LE);
CHECK_FEATURE(1, PPC_FEATURE2_HTM);
CHECK_FEATURE(1, PPC_FEATURE2_DSCR);
CHECK_FEATURE(1, PPC_FEATURE2_EBB);
CHECK_FEATURE(1, PPC_FEATURE2_ISEL);
CHECK_FEATURE(1, PPC_FEATURE2_TAR);
CHECK_FEATURE(1, PPC_FEATURE2_VEC_CRYPTO);
ret = 0;
error:
close_image(img);
return ret;
}
int cpuinfo_dump(void)
{
if (cpu_init())
return -1;
if (cpu_dump_cpuinfo())
return -1;
return 0;
}
int cpuinfo_check(void)
{
if (cpu_init())
return -1;
if (cpu_validate_cpuinfo())
return 1;
return 0;
}
| 3,507 | 23.193103 | 86 |
c
|
criu
|
criu-master/criu/arch/ppc64/crtools.c
|
#include <string.h>
#include <unistd.h>
#include <elf.h>
#include <sys/user.h>
#include <asm/unistd.h>
#include <sys/uio.h>
#include "types.h"
#include <compel/asm/fpu.h>
#include "asm/restorer.h"
#include "asm/dump.h"
#include "cr_options.h"
#include "common/compiler.h"
#include <compel/ptrace.h>
#include "parasite-syscall.h"
#include "log.h"
#include "util.h"
#include "cpu.h"
#include "compel/infect.h"
#include "protobuf.h"
#include "images/core.pb-c.h"
#include "images/creds.pb-c.h"
static UserPpc64FpstateEntry *copy_fp_regs(uint64_t *fpregs)
{
UserPpc64FpstateEntry *fpe;
int i;
fpe = xmalloc(sizeof(UserPpc64FpstateEntry));
if (!fpe)
return NULL;
user_ppc64_fpstate_entry__init(fpe);
fpe->n_fpregs = NFPREG;
fpe->fpregs = xmalloc(fpe->n_fpregs * sizeof(fpe->fpregs[0]));
if (!fpe->fpregs) {
xfree(fpe);
return NULL;
}
/* FPSRC is the last (33th) register in the set */
for (i = 0; i < NFPREG; i++)
fpe->fpregs[i] = fpregs[i];
return fpe;
}
static void put_fpu_regs(mcontext_t *mc, UserPpc64FpstateEntry *fpe)
{
uint64_t *mcfp = (uint64_t *)mc->fp_regs;
size_t i;
for (i = 0; i < fpe->n_fpregs; i++)
mcfp[i] = fpe->fpregs[i];
}
static UserPpc64VrstateEntry *copy_altivec_regs(__vector128 *vrregs)
{
UserPpc64VrstateEntry *vse;
uint64_t *p64;
uint32_t *p32;
int i;
vse = xmalloc(sizeof(*vse));
if (!vse)
return NULL;
user_ppc64_vrstate_entry__init(vse);
/* protocol buffer store only 64bit entries and we need 128bit */
vse->n_vrregs = (NVRREG - 1) * 2;
vse->vrregs = xmalloc(vse->n_vrregs * sizeof(vse->vrregs[0]));
if (!vse->vrregs) {
xfree(vse);
return NULL;
}
/* Vectors are 2*64bits entries */
for (i = 0; i < (NVRREG - 1); i++) {
p64 = (uint64_t *)&vrregs[i];
vse->vrregs[i * 2] = p64[0];
vse->vrregs[i * 2 + 1] = p64[1];
}
p32 = (uint32_t *)&vrregs[NVRREG - 1];
vse->vrsave = *p32;
return vse;
}
static int put_altivec_regs(mcontext_t *mc, UserPpc64VrstateEntry *vse)
{
vrregset_t *v_regs = (vrregset_t *)(((unsigned long)mc->vmx_reserve + 15) & ~0xful);
pr_debug("Restoring Altivec registers\n");
if (vse->n_vrregs != (NVRREG - 1) * 2) {
pr_err("Corrupted Altivec dump data\n");
return -1;
}
/* Note that this should only be done in the case MSR_VEC is set but
* this is not a big deal to do that in all cases.
*/
memcpy(&v_regs->vrregs[0][0], vse->vrregs, sizeof(uint64_t) * 2 * (NVRREG - 1));
/* vscr has been restored with the previous memcpy which copied 32
* 128bits registers + a 128bits field containing the vscr value in
* the low part.
*/
v_regs->vrsave = vse->vrsave;
mc->v_regs = v_regs;
return 0;
}
static UserPpc64VsxstateEntry *copy_vsx_regs(uint64_t *vsregs)
{
UserPpc64VsxstateEntry *vse;
int i;
vse = xmalloc(sizeof(*vse));
if (!vse)
return NULL;
user_ppc64_vsxstate_entry__init(vse);
vse->n_vsxregs = NVSXREG;
vse->vsxregs = xmalloc(vse->n_vsxregs * sizeof(vse->vsxregs[0]));
if (!vse->vsxregs) {
xfree(vse);
return NULL;
}
for (i = 0; i < vse->n_vsxregs; i++)
vse->vsxregs[i] = vsregs[i];
return vse;
}
static int put_vsx_regs(mcontext_t *mc, UserPpc64VsxstateEntry *vse)
{
uint64_t *buf;
int i;
pr_debug("Restoring VSX registers\n");
if (!mc->v_regs) {
/* VSX implies Altivec so v_regs should be set */
pr_err("Internal error\n");
return -1;
}
/* point after the Altivec registers */
buf = (uint64_t *)(mc->v_regs + 1);
/* Copy the value saved by get_vsx_regs in the sigframe */
for (i = 0; i < vse->n_vsxregs; i++)
buf[i] = vse->vsxregs[i];
return 0;
}
static void copy_gp_regs(UserPpc64RegsEntry *dst, user_regs_struct_t *src)
{
int i;
#define assign_reg(e) \
do { \
dst->e = (__typeof__(dst->e))src->e; \
} while (0)
for (i = 0; i < 32; i++)
assign_reg(gpr[i]);
assign_reg(nip);
assign_reg(msr);
assign_reg(orig_gpr3);
assign_reg(ctr);
assign_reg(link);
assign_reg(xer);
assign_reg(ccr);
assign_reg(trap);
#undef assign_reg
}
static void restore_gp_regs(mcontext_t *dst, UserPpc64RegsEntry *src)
{
int i;
/* r0 to r31 */
for (i = 0; i < 32; i++)
dst->gp_regs[i] = src->gpr[i];
dst->gp_regs[PT_NIP] = src->nip;
dst->gp_regs[PT_MSR] = src->msr;
dst->gp_regs[PT_ORIG_R3] = src->orig_gpr3;
dst->gp_regs[PT_CTR] = src->ctr;
dst->gp_regs[PT_LNK] = src->link;
dst->gp_regs[PT_XER] = src->xer;
dst->gp_regs[PT_CCR] = src->ccr;
dst->gp_regs[PT_TRAP] = src->trap;
}
static UserPpc64RegsEntry *allocate_gp_regs(void)
{
UserPpc64RegsEntry *gpregs;
gpregs = xmalloc(sizeof(*gpregs));
if (!gpregs)
return NULL;
user_ppc64_regs_entry__init(gpregs);
gpregs->n_gpr = 32;
gpregs->gpr = xmalloc(32 * sizeof(uint64_t));
if (!gpregs->gpr) {
xfree(gpregs);
return NULL;
}
return gpregs;
}
/****************************************************************************
* TRANSACTIONAL MEMORY SUPPORT
*/
static void xfree_tm_state(UserPpc64TmRegsEntry *tme)
{
if (tme) {
if (tme->fpstate) {
xfree(tme->fpstate->fpregs);
xfree(tme->fpstate);
}
if (tme->vrstate) {
xfree(tme->vrstate->vrregs);
xfree(tme->vrstate);
}
if (tme->vsxstate) {
xfree(tme->vsxstate->vsxregs);
xfree(tme->vsxstate);
}
if (tme->gpregs) {
if (tme->gpregs->gpr)
xfree(tme->gpregs->gpr);
xfree(tme->gpregs);
}
xfree(tme);
}
}
static int put_tm_regs(struct rt_sigframe *f, UserPpc64TmRegsEntry *tme)
{
/*
* WARNING: As stated in kernel's restore_tm_sigcontexts, TEXASR has to be
* restored by the process itself :
* TEXASR was set by the signal delivery reclaim, as was TFIAR.
* Users doing anything abhorrent like thread-switching w/ signals for
* TM-Suspended code will have to back TEXASR/TFIAR up themselves.
* For the case of getting a signal and simply returning from it,
* we don't need to re-copy them here.
*/
ucontext_t *tm_uc = &f->uc_transact;
pr_debug("Restoring TM registers FP:%d VR:%d VSX:%d\n", !!(tme->fpstate), !!(tme->vrstate), !!(tme->vsxstate));
restore_gp_regs(&tm_uc->uc_mcontext, tme->gpregs);
if (tme->fpstate)
put_fpu_regs(&tm_uc->uc_mcontext, tme->fpstate);
if (tme->vrstate && put_altivec_regs(&tm_uc->uc_mcontext, tme->vrstate))
return -1;
if (tme->vsxstate && put_vsx_regs(&tm_uc->uc_mcontext, tme->vsxstate))
return -1;
f->uc.uc_link = tm_uc;
return 0;
}
/****************************************************************************/
static int copy_tm_regs(user_regs_struct_t *regs, user_fpregs_struct_t *fpregs, CoreEntry *core)
{
UserPpc64TmRegsEntry *tme;
UserPpc64RegsEntry *gpregs = core->ti_ppc64->gpregs;
pr_debug("Copying TM registers\n");
tme = xmalloc(sizeof(*tme));
if (!tme)
return -1;
user_ppc64_tm_regs_entry__init(tme);
tme->gpregs = allocate_gp_regs();
if (!tme->gpregs)
goto out_free;
gpregs->has_tfhar = true;
gpregs->tfhar = fpregs->tm.tm_spr_regs.tfhar;
gpregs->has_texasr = true;
gpregs->texasr = fpregs->tm.tm_spr_regs.texasr;
gpregs->has_tfiar = true;
gpregs->tfiar = fpregs->tm.tm_spr_regs.tfiar;
/* This is the checkpointed state, we must save it in place of the
* current state because the signal handler is made in this way.
* We invert the 2 states instead of when building the signal frame,
* because we can't modify the gpregs manipulated by the common layer.
*/
copy_gp_regs(gpregs, &fpregs->tm.regs);
if (fpregs->tm.flags & USER_FPREGS_FL_FP) {
core->ti_ppc64->fpstate = copy_fp_regs(fpregs->tm.fpregs);
if (!core->ti_ppc64->fpstate)
goto out_free;
}
if (fpregs->tm.flags & USER_FPREGS_FL_ALTIVEC) {
core->ti_ppc64->vrstate = copy_altivec_regs(fpregs->tm.vrregs);
if (!core->ti_ppc64->vrstate)
goto out_free;
/*
* Force the MSR_VEC bit of the restored MSR otherwise the
* kernel will not restore them from the signal frame.
*/
gpregs->msr |= MSR_VEC;
if (fpregs->tm.flags & USER_FPREGS_FL_VSX) {
core->ti_ppc64->vsxstate = copy_vsx_regs(fpregs->tm.vsxregs);
if (!core->ti_ppc64->vsxstate)
goto out_free;
/*
* Force the MSR_VSX bit of the restored MSR otherwise
* the kernel will not restore them from the signal
* frame.
*/
gpregs->msr |= MSR_VSX;
}
}
core->ti_ppc64->tmstate = tme;
return 0;
out_free:
xfree_tm_state(tme);
return -1;
}
static int __copy_task_regs(user_regs_struct_t *regs, user_fpregs_struct_t *fpregs, CoreEntry *core)
{
UserPpc64RegsEntry *gpregs;
UserPpc64FpstateEntry **fpstate;
UserPpc64VrstateEntry **vrstate;
UserPpc64VsxstateEntry **vsxstate;
/* Copy retrieved registers in the proto data
* If TM is in the loop we switch the saved register set because
* the signal frame is built with checkpointed registers on top to not
* confused TM unaware process, while ptrace is retrieving the
* checkpointed set through the TM specific ELF notes.
*/
if (fpregs->flags & USER_FPREGS_FL_TM) {
if (copy_tm_regs(regs, fpregs, core))
return -1;
gpregs = core->ti_ppc64->tmstate->gpregs;
fpstate = &(core->ti_ppc64->tmstate->fpstate);
vrstate = &(core->ti_ppc64->tmstate->vrstate);
vsxstate = &(core->ti_ppc64->tmstate->vsxstate);
} else {
gpregs = core->ti_ppc64->gpregs;
fpstate = &(core->ti_ppc64->fpstate);
vrstate = &(core->ti_ppc64->vrstate);
vsxstate = &(core->ti_ppc64->vsxstate);
}
copy_gp_regs(gpregs, regs);
if (fpregs->flags & USER_FPREGS_FL_FP) {
*fpstate = copy_fp_regs(fpregs->fpregs);
if (!*fpstate)
return -1;
}
if (fpregs->flags & USER_FPREGS_FL_ALTIVEC) {
*vrstate = copy_altivec_regs(fpregs->vrregs);
if (!*vrstate)
return -1;
/*
* Force the MSR_VEC bit of the restored MSR otherwise the
* kernel will not restore them from the signal frame.
*/
gpregs->msr |= MSR_VEC;
if (fpregs->flags & USER_FPREGS_FL_VSX) {
*vsxstate = copy_vsx_regs(fpregs->vsxregs);
if (!*vsxstate)
return -1;
/*
* Force the MSR_VSX bit of the restored MSR otherwise
* the kernel will not restore them from the signal
* frame.
*/
gpregs->msr |= MSR_VSX;
}
}
return 0;
}
int save_task_regs(void *arg, user_regs_struct_t *u, user_fpregs_struct_t *f)
{
return __copy_task_regs(u, f, (CoreEntry *)arg);
}
/****************************************************************************/
int arch_alloc_thread_info(CoreEntry *core)
{
ThreadInfoPpc64 *ti_ppc64;
ti_ppc64 = xmalloc(sizeof(*ti_ppc64));
if (!ti_ppc64)
return -1;
thread_info_ppc64__init(ti_ppc64);
ti_ppc64->gpregs = allocate_gp_regs();
if (!ti_ppc64->gpregs) {
xfree(ti_ppc64);
return -1;
}
CORE_THREAD_ARCH_INFO(core) = ti_ppc64;
return 0;
}
void arch_free_thread_info(CoreEntry *core)
{
if (CORE_THREAD_ARCH_INFO(core)) {
if (CORE_THREAD_ARCH_INFO(core)->fpstate) {
xfree(CORE_THREAD_ARCH_INFO(core)->fpstate->fpregs);
xfree(CORE_THREAD_ARCH_INFO(core)->fpstate);
}
if (CORE_THREAD_ARCH_INFO(core)->vrstate) {
xfree(CORE_THREAD_ARCH_INFO(core)->vrstate->vrregs);
xfree(CORE_THREAD_ARCH_INFO(core)->vrstate);
}
if (CORE_THREAD_ARCH_INFO(core)->vsxstate) {
xfree(CORE_THREAD_ARCH_INFO(core)->vsxstate->vsxregs);
xfree(CORE_THREAD_ARCH_INFO(core)->vsxstate);
}
xfree_tm_state(CORE_THREAD_ARCH_INFO(core)->tmstate);
xfree(CORE_THREAD_ARCH_INFO(core)->gpregs->gpr);
xfree(CORE_THREAD_ARCH_INFO(core)->gpregs);
xfree(CORE_THREAD_ARCH_INFO(core));
CORE_THREAD_ARCH_INFO(core) = NULL;
}
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core)
{
int ret = 0;
if (CORE_THREAD_ARCH_INFO(core)->fpstate)
put_fpu_regs(&sigframe->uc.uc_mcontext, CORE_THREAD_ARCH_INFO(core)->fpstate);
if (CORE_THREAD_ARCH_INFO(core)->vrstate)
ret = put_altivec_regs(&sigframe->uc.uc_mcontext, CORE_THREAD_ARCH_INFO(core)->vrstate);
else if (core->ti_ppc64->gpregs->msr & MSR_VEC) {
pr_err("Register's data mismatch, corrupted image ?\n");
ret = -1;
}
if (!ret && CORE_THREAD_ARCH_INFO(core)->vsxstate)
ret = put_vsx_regs(&sigframe->uc.uc_mcontext, CORE_THREAD_ARCH_INFO(core)->vsxstate);
else if (core->ti_ppc64->gpregs->msr & MSR_VSX) {
pr_err("VSX register's data mismatch, corrupted image ?\n");
ret = -1;
}
if (!ret && CORE_THREAD_ARCH_INFO(core)->tmstate)
ret = put_tm_regs(sigframe, CORE_THREAD_ARCH_INFO(core)->tmstate);
else if (MSR_TM_ACTIVE(core->ti_ppc64->gpregs->msr)) {
pr_err("TM register's data mismatch, corrupted image ?\n");
ret = -1;
}
return ret;
}
int restore_gpregs(struct rt_sigframe *f, UserPpc64RegsEntry *r)
{
restore_gp_regs(&f->uc.uc_mcontext, r);
return 0;
}
| 12,472 | 24.300203 | 112 |
c
|
criu
|
criu-master/criu/arch/ppc64/sigframe.c
|
#include <stdlib.h>
#include <stdint.h>
#include "asm/sigframe.h"
#include "asm/types.h"
#include "log.h"
#include "common/bug.h"
/*
* The signal frame has been built using local addresses. Since it has to be
* used in the context of the checkpointed process, the v_regs pointer in the
* signal frame must be updated to match the address in the remote stack.
*/
static inline void update_vregs(mcontext_t *lcontext, mcontext_t *rcontext)
{
if (lcontext->v_regs) {
uint64_t offset = (uint64_t)(lcontext->v_regs) - (uint64_t)lcontext;
lcontext->v_regs = (vrregset_t *)((uint64_t)rcontext + offset);
pr_debug("Updated v_regs:%llx (rcontext:%llx)\n", (unsigned long long)lcontext->v_regs,
(unsigned long long)rcontext);
}
}
int sigreturn_prep_fpu_frame(struct rt_sigframe *frame, struct rt_sigframe *rframe)
{
uint64_t msr = frame->uc.uc_mcontext.gp_regs[PT_MSR];
update_vregs(&frame->uc.uc_mcontext, &rframe->uc.uc_mcontext);
/* Sanity check: If TM so uc_link should be set, otherwise not */
if (MSR_TM_ACTIVE(msr) ^ (!!(frame->uc.uc_link))) {
BUG();
return 1;
}
/* Updating the transactional state address if any */
if (frame->uc.uc_link) {
update_vregs(&frame->uc_transact.uc_mcontext, &rframe->uc_transact.uc_mcontext);
frame->uc.uc_link = &rframe->uc_transact;
}
return 0;
}
| 1,318 | 27.673913 | 89 |
c
|
criu
|
criu-master/criu/arch/ppc64/vdso-pie.c
|
#include <unistd.h>
#include "asm/types.h"
#include <compel/plugins/std/string.h>
#include <compel/plugins/std/syscall.h>
#include "parasite-vdso.h"
#include "log.h"
#include "common/bug.h"
#ifdef LOG_PREFIX
#undef LOG_PREFIX
#endif
#define LOG_PREFIX "vdso: "
/* This symbols are defined in vdso-trampoline.S */
extern char *vdso_trampoline, *vdso_trampoline_end;
static inline void invalidate_caches(unsigned long at)
{
asm volatile("isync \n"
"li 3,0 \n"
"dcbf 3,%0 \n"
"sync \n"
"icbi 3,%0 \n"
"isync \n"
: /* no output */
: "r"(at)
: "memory", "r3");
}
/* This is the size of the trampoline call :
* mlfr r0
* bl trampoline
* <64 bit address>
*/
#define TRAMP_CALL_SIZE (2 * sizeof(uint32_t) + sizeof(uint64_t))
/*
* put_trampoline does 2 things :
*
* 1. it looks for a place in the checkpointed vDSO where to put the
* trampoline code (see vdso-trampoline.S).
*
* 2. for each symbol from the checkpointed vDSO, it checks that there are
* enough place to put the call to the vDSO trampoline (see
* TRAMP_CALL_SIZE's comment above).
* This done by checking that there is no interesting symbols in the range
* of current one's offset -> (current one's offset + TRAMP_CALL_SIZE).
* Unfortunately the symbols are not sorted by address so we have to look
* for the complete table all the time. Since the vDSO is small, this is
* not a big issue.
*/
static unsigned long put_trampoline(unsigned long at, struct vdso_symtable *sym)
{
int i, j;
unsigned long size;
unsigned long trampoline = 0;
/* First of all we have to find a place where to put the trampoline
* code.
*/
size = (unsigned long)&vdso_trampoline_end - (unsigned long)&vdso_trampoline;
for (i = 0; i < ARRAY_SIZE(sym->symbols); i++) {
if (vdso_symbol_empty(&sym->symbols[i]))
continue;
pr_debug("Checking '%s' at %lx\n", sym->symbols[i].name, sym->symbols[i].offset);
/* find the nearest following symbol we are interested in */
for (j = 0; j < ARRAY_SIZE(sym->symbols); j++) {
if (i == j || vdso_symbol_empty(&sym->symbols[j]))
continue;
if (sym->symbols[j].offset <= sym->symbols[i].offset)
/* this symbol is above the current one */
continue;
if ((sym->symbols[i].offset + TRAMP_CALL_SIZE) > sym->symbols[j].offset) {
/* we have a major issue here since we cannot
* even put the trampoline call for this symbol
*/
pr_err("Can't handle small vDSO symbol %s\n", sym->symbols[i].name);
return 0;
}
if (trampoline)
/* no need to put it twice */
continue;
if ((sym->symbols[j].offset - (sym->symbols[i].offset + TRAMP_CALL_SIZE)) <= size)
/* not enough place */
continue;
/* We can put the trampoline there */
trampoline = at + sym->symbols[i].offset;
trampoline += TRAMP_CALL_SIZE;
pr_debug("Putting vDSO trampoline in %s at %lx\n", sym->symbols[i].name, trampoline);
memcpy((void *)trampoline, &vdso_trampoline, size);
invalidate_caches(trampoline);
}
}
return trampoline;
}
static inline void put_trampoline_call(unsigned long at, unsigned long to, unsigned long tr)
{
uint32_t *addr = (uint32_t *)at;
*addr++ = 0x7C0802a6; /* mflr r0 */
*addr++ = 0x48000001 | ((long)(tr - at - 4) & 0x3fffffc); /* bl tr */
*(uint64_t *)addr = to; /* the address to read by the trampoline */
invalidate_caches(at);
}
int vdso_redirect_calls(unsigned long base_to, unsigned long base_from, struct vdso_symtable *to,
struct vdso_symtable *from, bool __always_unused compat_vdso)
{
unsigned int i;
unsigned long trampoline;
trampoline = (unsigned long)put_trampoline(base_from, from);
if (!trampoline)
return 1;
for (i = 0; i < ARRAY_SIZE(to->symbols); i++) {
if (vdso_symbol_empty(&from->symbols[i]))
continue;
pr_debug("br: %lx/%lx -> %lx/%lx (index %d) '%s'\n", base_from, from->symbols[i].offset, base_to,
to->symbols[i].offset, i, from->symbols[i].name);
put_trampoline_call(base_from + from->symbols[i].offset, base_to + to->symbols[i].offset, trampoline);
}
return 0;
}
| 4,087 | 27.788732 | 104 |
c
|
criu
|
criu-master/criu/arch/ppc64/include/asm/restore.h
|
#ifndef __CR_ASM_RESTORE_H__
#define __CR_ASM_RESTORE_H__
#include "asm/restorer.h"
#include "images/core.pb-c.h"
/*
* Set R2 to blob + 8000 which is the default value
* Jump to restore_task_exec_start + 8 since R2 is already set (local call)
*/
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
asm volatile( \
"mr 1,%0 \n" \
"mr 12,%1 \n" \
"mtctr 12 \n" \
"mr 3,%2 \n" \
"bctr \n" \
: \
: "r"(new_sp), \
"r"((unsigned long)restore_task_exec_start), \
"r"(task_args) \
: "3", "12")
/* clang-format on */
/* There is nothing to do since TLS is accessed through r13 */
#define core_get_tls(pcore, ptls)
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core);
#endif /* __CR_ASM_RESTORE_H__ */
| 845 | 23.882353 | 75 |
h
|
criu
|
criu-master/criu/arch/ppc64/include/asm/restorer.h
|
#ifndef __CR_ASM_RESTORER_H__
#define __CR_ASM_RESTORER_H__
#include <asm/ptrace.h>
#include <asm/elf.h>
#include <asm/types.h>
#include "asm/types.h"
#include <compel/asm/infect-types.h>
#include <compel/asm/sigframe.h>
/*
* Clone trampoline
*
* See glibc sysdeps/powerpc/powerpc64/sysdep.h for FRAME_MIN_SIZE defines
*/
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
"clone_emul: \n" \
"/* Save fn, args, stack across syscall. */ \n" \
"mr 14, %5 /* clone_restore_fn in r14 */ \n" \
"mr 15, %6 /* &thread_args[i] in r15 */ \n" \
"mr 3, %1 /* clone_flags */ \n" \
"ld 4, %2 /* new_sp */ \n" \
"mr 5, %3 /* &parent_tid */ \n" \
"li 6, 0 /* tls = 0 ? */ \n" \
"mr 7, %4 /* &thread_args[i].pid */ \n" \
"li 0,"__stringify(__NR_clone)" \n" \
"sc \n" \
"/* Check for child process. */ \n" \
"cmpdi cr1,3,0 \n" \
"crandc cr1*4+eq,cr1*4+eq,cr0*4+so \n" \
"bne- cr1,clone_end \n" \
"/* child */ \n" \
"addi 14, 14, 8 /* jump over r2 fixup */ \n" \
"mtctr 14 \n" \
"mr 3,15 \n" \
"bctr \n" \
"clone_end: \n" \
"mr %0,3 \n" \
: "=r"(ret) /* %0 */ \
: "r"(clone_flags), /* %1 */ \
"m"(new_sp), /* %2 */ \
"r"(&parent_tid), /* %3 */ \
"r"(&thread_args[i].pid), /* %4 */ \
"r"(clone_restore_fn), /* %5 */ \
"r"(&thread_args[i]) /* %6 */ \
: "memory","0","3","4","5","6","7","14","15")
#define RUN_CLONE3_RESTORE_FN(ret, clone_args, size, args, \
clone_restore_fn) \
/*
* The clone3() function accepts following parameters:
* int clone3(struct clone_args *args, size_t size)
*
* Always consult the CLONE3 wrappers for other architectures
* for additional details.
*
* For PPC64LE the first parameter (clone_args) is passed in r3 and
* the second parameter (size) is passed in r4.
*
* This clone3() wrapper is based on the clone() wrapper from above.
*/ \
asm volatile( \
"clone3_emul: \n" \
"/* Save fn, args across syscall. */ \n" \
"mr 14, %3 /* clone_restore_fn in r14 */ \n" \
"mr 15, %4 /* &thread_args[i] in r15 */ \n" \
"mr 3, %1 /* clone_args */ \n" \
"mr 4, %2 /* size */ \n" \
"li 0,"__stringify(__NR_clone3)" \n" \
"sc \n" \
"/* Check for child process. */ \n" \
"cmpdi cr1,3,0 \n" \
"crandc cr1*4+eq,cr1*4+eq,cr0*4+so \n" \
"bne- cr1,clone3_end \n" \
"/* child */ \n" \
"addi 14, 14, 8 /* jump over r2 fixup */ \n" \
"mtctr 14 \n" \
"mr 3,15 \n" \
"bctr \n" \
"clone3_end: \n" \
"mr %0,3 \n" \
: "=r"(ret) /* %0 */ \
: "r"(&clone_args), /* %1 */ \
"r"(size), /* %2 */ \
"r"(clone_restore_fn), /* %3 */ \
"r"(args) /* %4 */ \
: "memory","0","3","4","5","14","15")
/* clang-format on */
#define arch_map_vdso(map, compat) -1
int restore_gpregs(struct rt_sigframe *f, UserPpc64RegsEntry *r);
int restore_nonsigframe_gpregs(UserPpc64RegsEntry *r);
/* Nothing to do, TLS is accessed through r13 */
static inline void restore_tls(tls_t *ptls)
{
(void)ptls;
}
/*
* Defined in arch/ppc64/syscall-common-ppc64.S
*/
unsigned long sys_shmat(int shmid, const void *shmaddr, int shmflg);
static inline void *alloc_compat_syscall_stack(void)
{
return NULL;
}
static inline void free_compat_syscall_stack(void *stack32)
{
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
return -1;
}
static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
return -1;
}
#endif /*__CR_ASM_RESTORER_H__*/
| 3,663 | 27.850394 | 75 |
h
|
criu
|
criu-master/criu/arch/ppc64/include/asm/thread_pointer.h
|
/* __thread_pointer definition. powerpc version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#ifndef _SYS_THREAD_POINTER_H
#define _SYS_THREAD_POINTER_H
#ifdef __powerpc64__
register void *__thread_register asm("r13");
#else
register void *__thread_register asm("r2");
#endif
static inline void *__criu_thread_pointer(void)
{
return __thread_register;
}
#endif /* _SYS_THREAD_POINTER_H */
| 1,135 | 33.424242 | 71 |
h
|
criu
|
criu-master/criu/arch/ppc64/include/asm/types.h
|
#ifndef __CR_ASM_TYPES_H__
#define __CR_ASM_TYPES_H__
#include <stdbool.h>
#include <signal.h>
#include "images/core.pb-c.h"
#include "page.h"
#include "bitops.h"
#include "asm/int.h"
#include <compel/plugins/std/asm/syscall-types.h>
typedef UserPpc64RegsEntry UserRegsEntry;
#define CORE_ENTRY__MARCH CORE_ENTRY__MARCH__PPC64
#define core_is_compat(core) false
#define CORE_THREAD_ARCH_INFO(core) core->ti_ppc64
#define TI_IP(core) ((core)->ti_ppc64->gpregs->nip)
static inline void *decode_pointer(uint64_t v)
{
return (void *)v;
}
static inline uint64_t encode_pointer(void *p)
{
return (uint64_t)p;
}
/*
* Copied from the following kernel header files :
* include/linux/auxvec.h
* arch/powerpc/include/uapi/asm/auxvec.h
* include/linux/mm_types.h
*/
#define AT_VECTOR_SIZE_BASE 20
#if !defined AT_VECTOR_SIZE_ARCH
#define AT_VECTOR_SIZE_ARCH 6
#endif
#define AT_VECTOR_SIZE (2 * (AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
typedef uint64_t auxv_t;
/* Not used but the structure parasite_dump_thread needs a tls_t field */
typedef uint64_t tls_t;
#endif /* __CR_ASM_TYPES_H__ */
| 1,108 | 20.745098 | 76 |
h
|
criu
|
criu-master/criu/arch/ppc64/include/asm/vdso.h
|
#ifndef __CR_ASM_VDSO_H__
#define __CR_ASM_VDSO_H__
#include "asm/int.h"
#include "asm-generic/vdso.h"
/* This definition is used in pie/util-vdso.c to initialize the vdso symbol
* name string table 'vdso_symbols'
*
* Poke from kernel file arch/powerpc/kernel/vdso64/vdso64.lds.S
*
* Note that '__kernel_datapage_offset' is not a service but mostly a data
* inside the text page which should not be used as is from user space.
*/
#define VDSO_SYMBOL_MAX 10
#define VDSO_SYMBOL_GTOD 5
#define ARCH_VDSO_SYMBOLS_LIST \
const char *aarch_vdso_symbol1 = "__kernel_clock_getres"; \
const char *aarch_vdso_symbol2 = "__kernel_clock_gettime"; \
const char *aarch_vdso_symbol3 = "__kernel_get_syscall_map"; \
const char *aarch_vdso_symbol4 = "__kernel_get_tbfreq"; \
const char *aarch_vdso_symbol5 = "__kernel_getcpu"; \
const char *aarch_vdso_symbol6 = "__kernel_gettimeofday"; \
const char *aarch_vdso_symbol7 = "__kernel_sigtramp_rt64"; \
const char *aarch_vdso_symbol8 = "__kernel_sync_dicache"; \
const char *aarch_vdso_symbol9 = "__kernel_sync_dicache_p5"; \
const char *aarch_vdso_symbol10 = "__kernel_time";
#define ARCH_VDSO_SYMBOLS \
aarch_vdso_symbol1, aarch_vdso_symbol2, aarch_vdso_symbol3, aarch_vdso_symbol4, aarch_vdso_symbol5, \
aarch_vdso_symbol6, aarch_vdso_symbol7, aarch_vdso_symbol8, aarch_vdso_symbol9, aarch_vdso_symbol10
#endif /* __CR_ASM_VDSO_H__ */
| 1,541 | 44.352941 | 109 |
h
|
criu
|
criu-master/criu/arch/s390/cpu.c
|
#undef LOG_PREFIX
#define LOG_PREFIX "cpu: "
#include <sys/auxv.h>
#include <errno.h>
#include "asm/types.h"
#include "cr_options.h"
#include "image.h"
#include "util.h"
#include "log.h"
#include "cpu.h"
#include "protobuf.h"
#include "images/cpuinfo.pb-c.h"
static compel_cpuinfo_t rt_cpuinfo;
static const char *hwcap_str1[64] = {
"HWCAP_S390_ESAN3", "HWCAP_S390_ZARCH", "HWCAP_S390_STFLE", "HWCAP_S390_MSA", "HWCAP_S390_LDISP",
"HWCAP_S390_EIMM", "HWCAP_S390_DFP", "HWCAP_S390_HPAGE", "HWCAP_S390_ETF3EH", "HWCAP_S390_HIGH_GPRS",
"HWCAP_S390_TE", "HWCAP_S390_VXRS", "HWCAP_S390_VXRS_BCD", "HWCAP_S390_VXRS_EXT",
};
static const char *hwcap_str2[64] = {};
static const char **hwcap_str[2] = { hwcap_str1, hwcap_str2 };
static void print_hwcaps(const char *msg, unsigned long hwcap[2])
{
int nr, cap;
pr_debug("%s: Capabilities: %016lx %016lx\n", msg, hwcap[0], hwcap[1]);
for (nr = 0; nr < 2; nr++) {
for (cap = 0; cap < 64; cap++) {
if (!(hwcap[nr] & (1 << cap)))
continue;
if (hwcap_str[nr][cap])
pr_debug("%s\n", hwcap_str[nr][cap]);
else
pr_debug("Capability %d/0x%x\n", nr, 1 << cap);
}
}
}
int cpu_init(void)
{
int ret;
ret = compel_cpuid(&rt_cpuinfo);
print_hwcaps("Host (init)", rt_cpuinfo.hwcap);
return ret;
}
int cpu_dump_cpuinfo(void)
{
CpuinfoS390Entry cpu_s390_info = CPUINFO_S390_ENTRY__INIT;
CpuinfoS390Entry *cpu_s390_info_ptr = &cpu_s390_info;
CpuinfoEntry cpu_info = CPUINFO_ENTRY__INIT;
struct cr_img *img;
int ret = -1;
img = open_image(CR_FD_CPUINFO, O_DUMP);
if (!img)
return -1;
cpu_info.s390_entry = &cpu_s390_info_ptr;
cpu_info.n_s390_entry = 1;
cpu_s390_info.n_hwcap = 2;
cpu_s390_info.hwcap = rt_cpuinfo.hwcap;
ret = pb_write_one(img, &cpu_info, PB_CPUINFO);
close_image(img);
return ret;
}
int cpu_validate_cpuinfo(void)
{
CpuinfoS390Entry *cpu_s390_entry;
CpuinfoEntry *cpu_info;
struct cr_img *img;
int cap, nr, ret;
img = open_image(CR_FD_CPUINFO, O_RSTR);
if (!img)
return -1;
ret = 0;
if (pb_read_one(img, &cpu_info, PB_CPUINFO) < 0)
goto error;
if (cpu_info->n_s390_entry != 1) {
pr_err("No S390 related entry in image\n");
goto error;
}
cpu_s390_entry = cpu_info->s390_entry[0];
if (cpu_s390_entry->n_hwcap != 2) {
pr_err("Hardware capabilities information missing\n");
ret = -1;
goto error;
}
print_hwcaps("Host", rt_cpuinfo.hwcap);
print_hwcaps("Image", cpu_s390_entry->hwcap);
for (nr = 0; nr < 2; nr++) {
for (cap = 0; cap < 64; cap++) {
if (!(cpu_s390_entry->hwcap[nr] & (1 << cap)))
continue;
if (rt_cpuinfo.hwcap[nr] & (1 << cap))
continue;
if (hwcap_str[nr][cap])
pr_err("CPU Feature %s not supported on host\n", hwcap_str[nr][cap]);
else
pr_err("CPU Feature %d/%x not supported on host\n", nr, 1 << cap);
ret = -1;
}
}
if (ret == -1)
pr_err("See also: /usr/include/bits/hwcap.h\n");
error:
close_image(img);
return ret;
}
int cpuinfo_dump(void)
{
if (cpu_init())
return -1;
if (cpu_dump_cpuinfo())
return -1;
return 0;
}
int cpuinfo_check(void)
{
if (cpu_init())
return 1;
if (cpu_validate_cpuinfo())
return 1;
return 0;
}
| 3,150 | 20.582192 | 108 |
c
|
criu
|
criu-master/criu/arch/s390/crtools.c
|
#include <string.h>
#include <unistd.h>
#include <elf.h>
#include <sys/user.h>
#include <asm/unistd.h>
#include <sys/uio.h>
#include "types.h"
#include <compel/asm/fpu.h>
#include "asm/restorer.h"
#include "asm/dump.h"
#include "cr_options.h"
#include "common/compiler.h"
#include <compel/ptrace.h>
#include "parasite-syscall.h"
#include "log.h"
#include "util.h"
#include "cpu.h"
#include "compel/infect.h"
#include "protobuf.h"
#include "images/core.pb-c.h"
#include "images/creds.pb-c.h"
#include "ptrace.h"
#include "pstree.h"
#include "image.h"
#define NT_PRFPREG 2
#define NT_S390_VXRS_LOW 0x309
#define NT_S390_VXRS_HIGH 0x30a
#define NT_S390_GS_CB 0x30b
#define NT_S390_GS_BC 0x30c
#define NT_S390_RI_CB 0x30d
/*
* Print general purpose and access registers
*/
static void print_core_gpregs(const char *msg, UserS390RegsEntry *gpregs)
{
int i;
pr_debug("%s: General purpose registers\n", msg);
pr_debug(" psw %016lx %016lx\n", gpregs->psw_mask, gpregs->psw_addr);
pr_debug(" orig_gpr2 %016lx\n", gpregs->orig_gpr2);
for (i = 0; i < 16; i++)
pr_debug(" g%02d %016lx\n", i, gpregs->gprs[i]);
for (i = 0; i < 16; i++)
pr_debug(" a%02d %08x\n", i, gpregs->acrs[i]);
}
/*
* Print vector registers
*/
static void print_core_vx_regs(CoreEntry *core)
{
UserS390VxrsHighEntry *vxrs_high;
UserS390VxrsLowEntry *vxrs_low;
int i;
vxrs_high = CORE_THREAD_ARCH_INFO(core)->vxrs_high;
vxrs_low = CORE_THREAD_ARCH_INFO(core)->vxrs_low;
if (vxrs_low == NULL) {
pr_debug(" No VXRS\n");
return;
}
for (i = 0; i < 16; i++)
pr_debug(" vx_low%02d %016lx\n", i, vxrs_low->regs[i]);
for (i = 0; i < 32; i += 2)
pr_debug(" vx_high%02d %016lx %016lx\n", i / 2, vxrs_high->regs[i], vxrs_high->regs[i + 1]);
}
/*
* Print guarded-storage control block
*/
static void print_core_gs_cb(CoreEntry *core)
{
UserS390GsCbEntry *gs_cb;
int i;
gs_cb = CORE_THREAD_ARCH_INFO(core)->gs_cb;
if (!gs_cb) {
pr_debug(" No GS_CB\n");
return;
}
for (i = 0; i < 4; i++)
pr_debug(" gs_cb%d %lx\n", i, gs_cb->regs[i]);
}
/*
* Print guarded-storage broadcast control block
*/
static void print_core_gs_bc(CoreEntry *core)
{
UserS390GsCbEntry *gs_bc;
int i;
gs_bc = CORE_THREAD_ARCH_INFO(core)->gs_bc;
if (!gs_bc) {
pr_debug(" No GS_BC\n");
return;
}
for (i = 0; i < 4; i++)
pr_debug(" gs_bc%d %lx\n", i, gs_bc->regs[i]);
}
/*
* Print runtime-instrumentation control block
*/
static void print_core_ri_cb(CoreEntry *core)
{
UserS390RiEntry *ri_cb;
int i;
ri_cb = CORE_THREAD_ARCH_INFO(core)->ri_cb;
if (!ri_cb) {
pr_debug(" No RI_CB\n");
return;
}
for (i = 0; i < 8; i++)
pr_debug(" ri_cb%d %lx\n", i, ri_cb->regs[i]);
}
/*
* Print architecture registers
*/
static void print_core_fp_regs(const char *msg, CoreEntry *core)
{
UserS390FpregsEntry *fpregs;
int i;
fpregs = CORE_THREAD_ARCH_INFO(core)->fpregs;
pr_debug("%s: Floating point registers\n", msg);
pr_debug(" fpc %08x\n", fpregs->fpc);
for (i = 0; i < 16; i++)
pr_debug(" f%02d %016lx\n", i, fpregs->fprs[i]);
print_core_vx_regs(core);
print_core_gs_cb(core);
print_core_gs_bc(core);
print_core_ri_cb(core);
}
/*
* Allocate VxrsLow registers
*/
static UserS390VxrsLowEntry *allocate_vxrs_low_regs(void)
{
UserS390VxrsLowEntry *vxrs_low;
vxrs_low = xmalloc(sizeof(*vxrs_low));
if (!vxrs_low)
return NULL;
user_s390_vxrs_low_entry__init(vxrs_low);
vxrs_low->n_regs = 16;
vxrs_low->regs = xzalloc(16 * sizeof(uint64_t));
if (!vxrs_low->regs)
goto fail_free_vxrs_low;
return vxrs_low;
fail_free_vxrs_low:
xfree(vxrs_low);
return NULL;
}
/*
* Free VxrsLow registers
*/
static void free_vxrs_low_regs(UserS390VxrsLowEntry *vxrs_low)
{
if (vxrs_low) {
xfree(vxrs_low->regs);
xfree(vxrs_low);
}
}
/*
* Allocate VxrsHigh registers
*/
static UserS390VxrsHighEntry *allocate_vxrs_high_regs(void)
{
UserS390VxrsHighEntry *vxrs_high;
vxrs_high = xmalloc(sizeof(*vxrs_high));
if (!vxrs_high)
return NULL;
user_s390_vxrs_high_entry__init(vxrs_high);
vxrs_high->n_regs = 32;
vxrs_high->regs = xzalloc(32 * sizeof(uint64_t));
if (!vxrs_high->regs)
goto fail_free_vxrs_high;
return vxrs_high;
fail_free_vxrs_high:
xfree(vxrs_high);
return NULL;
}
/*
* Free VxrsHigh registers
*/
static void free_vxrs_high_regs(UserS390VxrsHighEntry *vxrs_high)
{
if (vxrs_high) {
xfree(vxrs_high->regs);
xfree(vxrs_high);
}
}
/*
* Allocate guarded-storage control block (GS_CB and GS_BC)
*/
static UserS390GsCbEntry *allocate_gs_cb(void)
{
UserS390GsCbEntry *gs_cb;
gs_cb = xmalloc(sizeof(*gs_cb));
if (!gs_cb)
return NULL;
user_s390_gs_cb_entry__init(gs_cb);
gs_cb->n_regs = 4;
gs_cb->regs = xzalloc(4 * sizeof(uint64_t));
if (!gs_cb->regs)
goto fail_free_gs_cb;
return gs_cb;
fail_free_gs_cb:
xfree(gs_cb);
return NULL;
}
/*
* Free Guarded Storage control blocks
*/
static void free_gs_cb(UserS390GsCbEntry *gs_cb)
{
if (gs_cb) {
xfree(gs_cb->regs);
xfree(gs_cb);
}
}
/*
* Allocate runtime-instrumentation control block
*/
static UserS390RiEntry *allocate_ri_cb(void)
{
UserS390RiEntry *ri_cb;
ri_cb = xmalloc(sizeof(*ri_cb));
if (!ri_cb)
return NULL;
user_s390_ri_entry__init(ri_cb);
ri_cb->ri_on = 0;
ri_cb->n_regs = 8;
ri_cb->regs = xzalloc(8 * sizeof(uint64_t));
if (!ri_cb->regs)
goto fail_free_ri_cb;
return ri_cb;
fail_free_ri_cb:
xfree(ri_cb);
return NULL;
}
/*
* Free runtime-instrumentation control block
*/
static void free_ri_cb(UserS390RiEntry *ri_cb)
{
if (ri_cb) {
xfree(ri_cb->regs);
xfree(ri_cb);
}
}
/*
* Copy internal structures into Google Protocol Buffers
*/
int save_task_regs(void *arg, user_regs_struct_t *u, user_fpregs_struct_t *f)
{
UserS390VxrsHighEntry *vxrs_high = NULL;
UserS390VxrsLowEntry *vxrs_low = NULL;
UserS390FpregsEntry *fpregs = NULL;
UserS390RegsEntry *gpregs = NULL;
UserS390GsCbEntry *gs_cb = NULL;
UserS390GsCbEntry *gs_bc = NULL;
UserS390RiEntry *ri_cb = NULL;
CoreEntry *core = arg;
gpregs = CORE_THREAD_ARCH_INFO(core)->gpregs;
fpregs = CORE_THREAD_ARCH_INFO(core)->fpregs;
/* Vector registers */
if (f->flags & USER_FPREGS_VXRS) {
vxrs_low = allocate_vxrs_low_regs();
if (!vxrs_low)
return -1;
vxrs_high = allocate_vxrs_high_regs();
if (!vxrs_high)
goto fail_free_vxrs_low;
memcpy(vxrs_low->regs, &f->vxrs_low, sizeof(f->vxrs_low));
memcpy(vxrs_high->regs, &f->vxrs_high, sizeof(f->vxrs_high));
CORE_THREAD_ARCH_INFO(core)->vxrs_low = vxrs_low;
CORE_THREAD_ARCH_INFO(core)->vxrs_high = vxrs_high;
}
/* Guarded-storage control block */
if (f->flags & USER_GS_CB) {
gs_cb = allocate_gs_cb();
if (!gs_cb)
goto fail_free_gs_cb;
memcpy(gs_cb->regs, &f->gs_cb, sizeof(f->gs_cb));
CORE_THREAD_ARCH_INFO(core)->gs_cb = gs_cb;
}
/* Guarded-storage broadcast control block */
if (f->flags & USER_GS_BC) {
gs_bc = allocate_gs_cb();
if (!gs_bc)
goto fail_free_gs_bc;
memcpy(gs_bc->regs, &f->gs_bc, sizeof(f->gs_bc));
CORE_THREAD_ARCH_INFO(core)->gs_bc = gs_bc;
}
/* Runtime-instrumentation control block */
if (f->flags & USER_RI_CB) {
ri_cb = allocate_ri_cb();
if (!ri_cb)
goto fail_free_ri_cb;
memcpy(ri_cb->regs, &f->ri_cb, sizeof(f->ri_cb));
CORE_THREAD_ARCH_INFO(core)->ri_cb = ri_cb;
/* We need to remember that the RI bit was on */
if (f->flags & USER_RI_ON)
ri_cb->ri_on = 1;
}
/* General purpose registers */
memcpy(gpregs->gprs, u->prstatus.gprs, sizeof(u->prstatus.gprs));
gpregs->psw_mask = u->prstatus.psw.mask;
gpregs->psw_addr = u->prstatus.psw.addr;
/* Access registers */
memcpy(gpregs->acrs, u->prstatus.acrs, sizeof(u->prstatus.acrs));
/* System call */
gpregs->system_call = u->system_call;
/* Floating point registers */
fpregs->fpc = f->prfpreg.fpc;
memcpy(fpregs->fprs, f->prfpreg.fprs, sizeof(f->prfpreg.fprs));
return 0;
fail_free_ri_cb:
free_ri_cb(ri_cb);
fail_free_gs_cb:
free_gs_cb(gs_cb);
fail_free_gs_bc:
free_gs_cb(gs_bc);
fail_free_vxrs_low:
free_vxrs_low_regs(vxrs_low);
return -1;
}
/*
* Copy general and access registers to signal frame
*/
int restore_gpregs(struct rt_sigframe *f, UserS390RegsEntry *src)
{
_sigregs *dst = &f->uc.uc_mcontext;
dst->regs.psw.mask = src->psw_mask;
dst->regs.psw.addr = src->psw_addr;
memcpy(dst->regs.gprs, src->gprs, sizeof(dst->regs.gprs));
memcpy(dst->regs.acrs, src->acrs, sizeof(dst->regs.acrs));
print_core_gpregs("restore_gpregs_regs", src);
return 0;
}
/*
* Copy floating point and vector registers to mcontext
*/
int restore_fpu(struct rt_sigframe *f, CoreEntry *core)
{
UserS390VxrsHighEntry *vxrs_high;
UserS390VxrsLowEntry *vxrs_low;
UserS390FpregsEntry *fpregs;
_sigregs *dst = &f->uc.uc_mcontext;
_sigregs_ext *dst_ext = &f->uc.uc_mcontext_ext;
fpregs = CORE_THREAD_ARCH_INFO(core)->fpregs;
vxrs_high = CORE_THREAD_ARCH_INFO(core)->vxrs_high;
vxrs_low = CORE_THREAD_ARCH_INFO(core)->vxrs_low;
dst->fpregs.fpc = fpregs->fpc;
memcpy(dst->fpregs.fprs, fpregs->fprs, sizeof(dst->fpregs.fprs));
if (vxrs_low) {
memcpy(&dst_ext->vxrs_low, vxrs_low->regs, sizeof(dst_ext->vxrs_low));
memcpy(&dst_ext->vxrs_high, vxrs_high->regs, sizeof(dst_ext->vxrs_high));
}
return 0;
}
/*
* Allocate floating point registers
*/
static UserS390FpregsEntry *allocate_fp_regs(void)
{
UserS390FpregsEntry *fpregs;
fpregs = xmalloc(sizeof(*fpregs));
if (!fpregs)
return NULL;
user_s390_fpregs_entry__init(fpregs);
fpregs->n_fprs = 16;
fpregs->fprs = xzalloc(16 * sizeof(uint64_t));
if (!fpregs->fprs)
goto fail_free_fpregs;
return fpregs;
fail_free_fpregs:
xfree(fpregs);
return NULL;
}
/*
* Free floating point registers
*/
static void free_fp_regs(UserS390FpregsEntry *fpregs)
{
xfree(fpregs->fprs);
xfree(fpregs);
}
/*
* Allocate general purpose and access registers
*/
static UserS390RegsEntry *allocate_gp_regs(void)
{
UserS390RegsEntry *gpregs;
gpregs = xmalloc(sizeof(*gpregs));
if (!gpregs)
return NULL;
user_s390_regs_entry__init(gpregs);
gpregs->n_gprs = 16;
gpregs->gprs = xzalloc(16 * sizeof(uint64_t));
if (!gpregs->gprs)
goto fail_free_gpregs;
gpregs->n_acrs = 16;
gpregs->acrs = xzalloc(16 * sizeof(uint32_t));
if (!gpregs->acrs)
goto fail_free_gprs;
return gpregs;
fail_free_gprs:
xfree(gpregs->gprs);
fail_free_gpregs:
xfree(gpregs);
return NULL;
}
/*
* Free general purpose and access registers
*/
static void free_gp_regs(UserS390RegsEntry *gpregs)
{
xfree(gpregs->gprs);
xfree(gpregs->acrs);
xfree(gpregs);
}
/*
* Allocate thread info
*/
int arch_alloc_thread_info(CoreEntry *core)
{
ThreadInfoS390 *ti_s390;
ti_s390 = xmalloc(sizeof(*ti_s390));
if (!ti_s390)
return -1;
thread_info_s390__init(ti_s390);
ti_s390->gpregs = allocate_gp_regs();
if (!ti_s390->gpregs)
goto fail_free_ti_s390;
ti_s390->fpregs = allocate_fp_regs();
if (!ti_s390->fpregs)
goto fail_free_gp_regs;
CORE_THREAD_ARCH_INFO(core) = ti_s390;
return 0;
fail_free_gp_regs:
free_gp_regs(ti_s390->gpregs);
fail_free_ti_s390:
xfree(ti_s390);
return -1;
}
/*
* Free thread info
*/
void arch_free_thread_info(CoreEntry *core)
{
if (!CORE_THREAD_ARCH_INFO(core))
return;
free_gp_regs(CORE_THREAD_ARCH_INFO(core)->gpregs);
free_fp_regs(CORE_THREAD_ARCH_INFO(core)->fpregs);
free_vxrs_low_regs(CORE_THREAD_ARCH_INFO(core)->vxrs_low);
free_vxrs_high_regs(CORE_THREAD_ARCH_INFO(core)->vxrs_high);
free_gs_cb(CORE_THREAD_ARCH_INFO(core)->gs_cb);
free_gs_cb(CORE_THREAD_ARCH_INFO(core)->gs_bc);
free_ri_cb(CORE_THREAD_ARCH_INFO(core)->ri_cb);
xfree(CORE_THREAD_ARCH_INFO(core));
CORE_THREAD_ARCH_INFO(core) = NULL;
}
/*
* Set regset for pid
*/
static int setregset(int pid, int set, const char *set_str, struct iovec *iov)
{
if (ptrace(PTRACE_SETREGSET, pid, set, iov) == 0)
return 0;
pr_perror("Couldn't set %s registers for pid %d", set_str, pid);
return -1;
}
/*
* Set floating point registers for pid from fpregs
*/
static int set_fp_regs(pid_t pid, user_fpregs_struct_t *fpregs)
{
struct iovec iov;
iov.iov_base = &fpregs->prfpreg;
iov.iov_len = sizeof(fpregs->prfpreg);
return setregset(pid, NT_PRFPREG, "PRFPREG", &iov);
}
/*
* Set vector registers
*/
static int set_vx_regs(pid_t pid, user_fpregs_struct_t *fpregs)
{
struct iovec iov;
if (!(fpregs->flags & USER_FPREGS_VXRS))
return 0;
iov.iov_base = &fpregs->vxrs_low;
iov.iov_len = sizeof(fpregs->vxrs_low);
if (setregset(pid, NT_S390_VXRS_LOW, "S390_VXRS_LOW", &iov))
return -1;
iov.iov_base = &fpregs->vxrs_high;
iov.iov_len = sizeof(fpregs->vxrs_high);
return setregset(pid, NT_S390_VXRS_HIGH, "S390_VXRS_HIGH", &iov);
}
/*
* Set guarded-storage control block
*/
static int set_gs_cb(pid_t pid, user_fpregs_struct_t *fpregs)
{
struct iovec iov;
if (fpregs->flags & USER_GS_CB) {
iov.iov_base = &fpregs->gs_cb;
iov.iov_len = sizeof(fpregs->gs_cb);
if (setregset(pid, NT_S390_GS_CB, "S390_GS_CB", &iov))
return -1;
}
if (!(fpregs->flags & USER_GS_BC))
return 0;
iov.iov_base = &fpregs->gs_bc;
iov.iov_len = sizeof(fpregs->gs_bc);
return setregset(pid, NT_S390_GS_BC, "S390_GS_BC", &iov);
}
/*
* Set runtime-instrumentation control block
*/
static int set_ri_cb(pid_t pid, user_fpregs_struct_t *fpregs)
{
struct iovec iov;
if (!(fpregs->flags & USER_RI_CB))
return 0;
iov.iov_base = &fpregs->ri_cb;
iov.iov_len = sizeof(fpregs->ri_cb);
return setregset(pid, NT_S390_RI_CB, "S390_RI_CB", &iov);
}
/*
* Set runtime-instrumentation bit
*
* The CPU collects information when the RI bit of the PSW is set.
* The RI control block is not part of the signal frame. Therefore during
* sigreturn it is not set. If the RI control block is present, the CPU
* writes into undefined storage. Hence, we have disabled the RI bit in
* the sigreturn PSW and set this bit after sigreturn by modifying the PSW
* of the task.
*/
static int set_ri_bit(pid_t pid)
{
user_regs_struct_t regs;
struct iovec iov;
psw_t *psw;
iov.iov_base = ®s.prstatus;
iov.iov_len = sizeof(regs.prstatus);
if (ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov) < 0) {
pr_perror("Fail to activate RI bit");
return -1;
}
psw = ®s.prstatus.psw;
psw->mask |= PSW_MASK_RI;
return ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov);
}
/*
* Restore registers not present in sigreturn signal frame
*/
static int set_task_regs_nosigrt(pid_t pid, CoreEntry *core)
{
user_fpregs_struct_t fpregs;
UserS390GsCbEntry *cgs_cb;
UserS390GsCbEntry *cgs_bc;
UserS390RiEntry *cri_cb;
int ret = 0;
memset(&fpregs, 0, sizeof(fpregs));
/* Guarded-storage control block (optional) */
cgs_cb = CORE_THREAD_ARCH_INFO(core)->gs_cb;
if (cgs_cb != NULL) {
fpregs.flags |= USER_GS_CB;
memcpy(&fpregs.gs_cb, cgs_cb->regs, sizeof(fpregs.gs_cb));
}
/* Guarded-storage broadcast control block (optional) */
cgs_bc = CORE_THREAD_ARCH_INFO(core)->gs_bc;
if (cgs_bc != NULL) {
fpregs.flags |= USER_GS_BC;
memcpy(&fpregs.gs_bc, cgs_bc->regs, sizeof(fpregs.gs_bc));
}
if (set_gs_cb(pid, &fpregs) < 0)
return -1;
/* Runtime-instrumentation control block (optional) */
cri_cb = CORE_THREAD_ARCH_INFO(core)->ri_cb;
if (cri_cb != NULL) {
fpregs.flags |= USER_RI_CB;
memcpy(&fpregs.ri_cb, cri_cb->regs, sizeof(fpregs.ri_cb));
if (set_ri_cb(pid, &fpregs) < 0)
return -1;
if (cri_cb->ri_on) {
fpregs.flags |= USER_RI_ON;
ret = set_ri_bit(pid);
}
}
return ret;
}
/*
* Restore registers for pid from core
*/
static int set_task_regs(pid_t pid, CoreEntry *core)
{
UserS390VxrsHighEntry *cvxrs_high;
UserS390VxrsLowEntry *cvxrs_low;
UserS390FpregsEntry *cfpregs;
user_fpregs_struct_t fpregs;
memset(&fpregs, 0, sizeof(fpregs));
/* Floating point registers */
cfpregs = CORE_THREAD_ARCH_INFO(core)->fpregs;
if (!cfpregs)
return -1;
fpregs.prfpreg.fpc = cfpregs->fpc;
memcpy(fpregs.prfpreg.fprs, cfpregs->fprs, sizeof(fpregs.prfpreg.fprs));
if (set_fp_regs(pid, &fpregs) < 0)
return -1;
/* Vector registers (optional) */
cvxrs_low = CORE_THREAD_ARCH_INFO(core)->vxrs_low;
if (cvxrs_low != NULL) {
cvxrs_high = CORE_THREAD_ARCH_INFO(core)->vxrs_high;
if (!cvxrs_high)
return -1;
fpregs.flags |= USER_FPREGS_VXRS;
memcpy(&fpregs.vxrs_low, cvxrs_low->regs, sizeof(fpregs.vxrs_low));
memcpy(&fpregs.vxrs_high, cvxrs_high->regs, sizeof(fpregs.vxrs_high));
if (set_vx_regs(pid, &fpregs) < 0)
return -1;
}
return set_task_regs_nosigrt(pid, core);
}
/*
* Restore registers for all threads:
* - Floating point registers
* - Vector registers
* - Guarded-storage control block
* - Guarded-storage broadcast control block
* - Runtime-instrumentation control block
*/
int arch_set_thread_regs(struct pstree_item *item, bool with_threads)
{
int i;
for_each_pstree_item(item) {
if (item->pid->state == TASK_DEAD || item->pid->state == TASK_ZOMBIE)
continue;
for (i = 0; i < item->nr_threads; i++) {
if (item->threads[i].state == TASK_DEAD || item->threads[i].state == TASK_ZOMBIE)
continue;
if (!with_threads && i > 0)
continue;
if (set_task_regs(item->threads[i].real, item->core[i])) {
pr_perror("Not set registers for task %d", item->threads[i].real);
return -1;
}
}
}
return 0;
}
static int open_core(int pid, CoreEntry **pcore)
{
struct cr_img *img;
int ret;
img = open_image(CR_FD_CORE, O_RSTR, pid);
if (!img) {
pr_err("Can't open core data for %d\n", pid);
return -1;
}
ret = pb_read_one(img, pcore, PB_CORE);
close_image(img);
return ret <= 0 ? -1 : 0;
}
/*
* Restore all registers not present in sigreturn signal frame
*
* - Guarded-storage control block
* - Guarded-storage broadcast control block
* - Runtime-instrumentation control block
*/
int arch_set_thread_regs_nosigrt(struct pid *pid)
{
CoreEntry *core;
core = xmalloc(sizeof(*core));
if (open_core(pid->ns[0].virt, &core) < 0) {
pr_perror("Cannot open core for virt pid %d", pid->ns[0].virt);
return -1;
}
if (set_task_regs_nosigrt(pid->real, core) < 0) {
pr_perror("Set register for pid %d", pid->real);
return -1;
}
print_core_fp_regs("restore_fp_regs", core);
return 0;
}
| 18,072 | 22.380336 | 94 |
c
|
criu
|
criu-master/criu/arch/s390/vdso-pie.c
|
#include <unistd.h>
#include "asm/types.h"
#include <compel/plugins/std/string.h>
#include <compel/plugins/std/syscall.h>
#include "parasite-vdso.h"
#include "log.h"
#include "common/bug.h"
#ifdef LOG_PREFIX
#undef LOG_PREFIX
#endif
#define LOG_PREFIX "vdso: "
/*
* Trampoline instruction sequence
*/
typedef struct {
u8 larl[6]; /* Load relative address of imm64 */
u8 lg[6]; /* Load %r1 with imm64 */
u8 br[2]; /* Branch to %r1 */
u64 addr; /* Jump address */
u32 guards; /* Guard bytes */
} __packed jmp_t;
/*
* Trampoline template: Use %r1 to jump
*/
jmp_t jmp = {
/* larl %r1,e (addr) */
.larl = { 0xc0, 0x10, 0x00, 0x00, 0x00, 0x07 },
/* lg %r1,0(%r1) */
.lg = { 0xe3, 0x10, 0x10, 0x00, 0x00, 0x04 },
/* br %r1 */
.br = { 0x07, 0xf1 },
.guards = 0xcccccccc,
};
/*
* Insert trampoline code into old vdso entry points to
* jump to new vdso functions.
*/
int vdso_redirect_calls(unsigned long base_to, unsigned long base_from, struct vdso_symtable *to,
struct vdso_symtable *from, bool __always_unused compat_vdso)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(to->symbols); i++) {
if (vdso_symbol_empty(&from->symbols[i]))
continue;
pr_debug("jmp: %s: %lx/%lx -> %lx/%lx (index %d)\n", from->symbols[i].name, base_from,
from->symbols[i].offset, base_to, to->symbols[i].offset, i);
jmp.addr = base_to + to->symbols[i].offset;
memcpy((void *)(base_from + from->symbols[i].offset), &jmp, sizeof(jmp));
}
return 0;
}
| 1,477 | 22.83871 | 97 |
c
|
criu
|
criu-master/criu/arch/s390/include/asm/restore.h
|
#ifndef __CR_ASM_RESTORE_H__
#define __CR_ASM_RESTORE_H__
#include "asm/restorer.h"
#include "images/core.pb-c.h"
/*
* Load stack to %r15, return address in %r14 and argument 1 into %r2
*/
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
asm volatile( \
"lgr %%r15,%0\n" \
"lgr %%r14,%1\n" \
"lgr %%r2,%2\n" \
"basr %%r14,%%r14\n" \
: \
: "d" (new_sp), \
"d"((unsigned long)restore_task_exec_start), \
"d" (task_args) \
: "2", "14", "memory")
/* clang-format on */
/* There is nothing to do since TLS is accessed through %a01 */
#define core_get_tls(pcore, ptls)
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core);
#endif
| 766 | 24.566667 | 69 |
h
|
criu
|
criu-master/criu/arch/s390/include/asm/restorer.h
|
#ifndef __CR_ASM_RESTORER_H__
#define __CR_ASM_RESTORER_H__
#include <asm/ptrace.h>
#include <asm/types.h>
#include "asm/types.h"
#include "sigframe.h"
/*
* Clone trampoline - see glibc sysdeps/unix/sysv/linux/s390/s390-64/clone.S
*/
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
"lgr %%r0,%6\n" /* Save thread_args in %r0 */ \
"lgr %%r1,%5\n" /* Save clone_restore_fn in %r1 */ \
"lgr %%r2,%2\n" /* Parameter 1: new_sp (child stack) */ \
"lgr %%r3,%1\n" /* Parameter 2: clone_flags */ \
"lgr %%r4,%3\n" /* Parameter 3: &parent_tid */ \
"lgr %%r5,%4\n" /* Parameter 4: &thread_args[i].pid */ \
"lghi %%r6,0\n" /* Parameter 5: tls = 0 */ \
"svc "__stringify(__NR_clone)"\n" \
"ltgr %0,%%r2\n" /* Set and check "ret" */ \
"jnz 0f\n" /* ret != 0: Continue caller */ \
"lgr %%r2,%%r0\n" /* Parameter 1: &thread_args */ \
"aghi %%r15,-160\n" /* Prepare stack frame */ \
"xc 0(8,%%r15),0(%%r15)\n" \
"basr %%r14,%%r1\n" /* Jump to clone_restore_fn() */ \
"j .+2\n" /* BUG(): Force PGM check */ \
"0:\n" /* Continue caller */ \
: "=d"(ret) \
: "d"(clone_flags), \
"a"(new_sp), \
"d"(&parent_tid), \
"d"(&thread_args[i].pid), \
"d"(clone_restore_fn), \
"d"(&thread_args[i]) \
: "0", "1", "2", "3", "4", "5", "6", "cc", "memory")
#define RUN_CLONE3_RESTORE_FN(ret, clone_args, size, args, \
clone_restore_fn) \
asm volatile( \
/*
* clone3 only needs two arguments (r2, r3), this means
* we can use r4 and r5 for args and thread function.
* r4 and r5 are callee-saved and are not overwritten.
* No need to put these values on the child stack.
*/ \
"lgr %%r4,%4\n" /* Save args in %r4 */ \
"lgr %%r5,%3\n" /* Save clone_restore_fn in %r5 */ \
"lgr %%r2,%1\n" /* Parameter 1: clone_args */ \
"lgr %%r3,%2\n" /* Parameter 2: size */ \
/*
* On s390x a syscall is done sc <syscall number>.
* That only works for syscalls < 255. clone3 is 435,
* therefore it is necessary to load the syscall number
* into r1 and do 'svc 0'.
*/ \
"lghi %%r1,"__stringify(__NR_clone3)"\n" \
"svc 0\n" \
"ltgr %0,%%r2\n" /* Set and check "ret" */ \
"jnz 0f\n" /* ret != 0: Continue caller */ \
"lgr %%r2,%%r4\n" /* Thread arguments taken from r4. */ \
"lgr %%r1,%%r5\n" /* Thread function taken from r5. */ \
"aghi %%r15,-160\n" /* Prepare stack frame */ \
"xc 0(8,%%r15),0(%%r15)\n" \
"basr %%r14,%%r1\n" /* Jump to clone_restore_fn() */ \
"j .+2\n" /* BUG(): Force PGM check */ \
"0:\n" /* Continue caller */ \
: "=d"(ret) \
: "a"(&clone_args), \
"d"(size), \
"d"(clone_restore_fn), \
"d"(args) \
: "0", "1", "2", "3", "4", "5", "cc", "memory")
/* clang-format on */
#define arch_map_vdso(map, compat) -1
int restore_gpregs(struct rt_sigframe *f, UserS390RegsEntry *r);
int restore_nonsigframe_gpregs(UserS390RegsEntry *r);
unsigned long sys_shmat(int shmid, const void *shmaddr, int shmflg);
unsigned long sys_mmap(void *addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long offset);
static inline void restore_tls(tls_t *ptls)
{
(void)ptls;
}
static inline void *alloc_compat_syscall_stack(void)
{
return NULL;
}
static inline void free_compat_syscall_stack(void *stack32)
{
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
return -1;
}
static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
return -1;
}
#endif /*__CR_ASM_RESTORER_H__*/
| 3,667 | 31.75 | 112 |
h
|
criu
|
criu-master/criu/arch/s390/include/asm/thread_pointer.h
|
/* __thread_pointer definition. Generic version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#ifndef _SYS_THREAD_POINTER_H
#define _SYS_THREAD_POINTER_H
static inline void *__criu_thread_pointer(void)
{
return __builtin_thread_pointer();
}
#endif /* _SYS_THREAD_POINTER_H */
| 1,021 | 35.5 | 71 |
h
|
criu
|
criu-master/criu/arch/s390/include/asm/vdso.h
|
#ifndef __CR_ASM_VDSO_H__
#define __CR_ASM_VDSO_H__
#include "asm/int.h"
#include "asm-generic/vdso.h"
/*
* This is a minimal amount of symbols
* we should support at the moment.
*/
#define VDSO_SYMBOL_MAX 4
#define VDSO_SYMBOL_GTOD 0
/*
* These definitions are used in pie/util-vdso.c to initialize the vdso symbol
* name string table 'vdso_symbols'
*/
#define ARCH_VDSO_SYMBOLS_LIST \
const char *aarch_vdso_symbol1 = "__kernel_gettimeofday"; \
const char *aarch_vdso_symbol2 = "__kernel_clock_gettime"; \
const char *aarch_vdso_symbol3 = "__kernel_clock_getres"; \
const char *aarch_vdso_symbol4 = "__kernel_getcpu";
#define ARCH_VDSO_SYMBOLS aarch_vdso_symbol1, aarch_vdso_symbol2, aarch_vdso_symbol3, aarch_vdso_symbol4
#endif /* __CR_ASM_VDSO_H__ */
| 809 | 30.153846 | 104 |
h
|
criu
|
criu-master/criu/arch/x86/cpu.c
|
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <sys/types.h>
#include "bitops.h"
#include "asm/cpu.h"
#include <compel/asm/fpu.h>
#include <compel/cpu.h>
#include "common/compiler.h"
#include "cr_options.h"
#include "image.h"
#include "util.h"
#include "log.h"
#include "cpu.h"
#include "protobuf.h"
#include "images/cpuinfo.pb-c.h"
#undef LOG_PREFIX
#define LOG_PREFIX "cpu: "
static compel_cpuinfo_t rt_cpu_info;
static int cpu_has_unsupported_features(void)
{
/*
* Put any unsupported features here.
*/
return 0;
}
int cpu_init(void)
{
compel_cpu_copy_cpuinfo(&rt_cpu_info);
BUILD_BUG_ON(sizeof(struct xsave_struct) != XSAVE_SIZE);
BUILD_BUG_ON(sizeof(struct i387_fxsave_struct) != FXSAVE_SIZE);
/*
* Make sure that at least FPU is onboard
* and fxsave is supported.
*/
if (compel_cpu_has_feature(X86_FEATURE_FPU)) {
if (!compel_cpu_has_feature(X86_FEATURE_FXSR)) {
pr_err("missing support fxsave/restore insns\n");
return -1;
}
}
pr_debug("fpu:%d fxsr:%d xsave:%d xsaveopt:%d xsavec:%d xgetbv1:%d xsaves:%d\n",
!!compel_cpu_has_feature(X86_FEATURE_FPU), !!compel_cpu_has_feature(X86_FEATURE_FXSR),
!!compel_cpu_has_feature(X86_FEATURE_OSXSAVE), !!compel_cpu_has_feature(X86_FEATURE_XSAVEOPT),
!!compel_cpu_has_feature(X86_FEATURE_XSAVEC), !!compel_cpu_has_feature(X86_FEATURE_XGETBV1),
!!compel_cpu_has_feature(X86_FEATURE_XSAVES));
return cpu_has_unsupported_features() ? -1 : 0;
}
int cpu_dump_cpuinfo(void)
{
CpuinfoEntry cpu_info = CPUINFO_ENTRY__INIT;
CpuinfoX86Entry cpu_x86_info = CPUINFO_X86_ENTRY__INIT;
CpuinfoX86Entry *cpu_x86_info_ptr = &cpu_x86_info;
struct cr_img *img;
img = open_image(CR_FD_CPUINFO, O_DUMP);
if (!img)
return -1;
cpu_info.x86_entry = &cpu_x86_info_ptr;
cpu_info.n_x86_entry = 1;
cpu_x86_info.vendor_id = (rt_cpu_info.x86_vendor == X86_VENDOR_INTEL) ? CPUINFO_X86_ENTRY__VENDOR__INTEL :
CPUINFO_X86_ENTRY__VENDOR__AMD;
cpu_x86_info.cpu_family = rt_cpu_info.x86_family;
cpu_x86_info.model = rt_cpu_info.x86_model;
cpu_x86_info.stepping = rt_cpu_info.x86_mask;
cpu_x86_info.capability_ver = 2;
cpu_x86_info.n_capability = ARRAY_SIZE(rt_cpu_info.x86_capability);
cpu_x86_info.capability = (void *)rt_cpu_info.x86_capability;
cpu_x86_info.has_xfeatures_mask = true;
cpu_x86_info.xfeatures_mask = rt_cpu_info.xfeatures_mask;
cpu_x86_info.has_xsave_size = true;
cpu_x86_info.xsave_size = rt_cpu_info.xsave_size;
cpu_x86_info.has_xsave_size_max = true;
cpu_x86_info.xsave_size_max = rt_cpu_info.xsave_size_max;
if (rt_cpu_info.x86_model_id[0])
cpu_x86_info.model_id = rt_cpu_info.x86_model_id;
if (pb_write_one(img, &cpu_info, PB_CPUINFO) < 0) {
close_image(img);
return -1;
}
close_image(img);
return 0;
}
#define __ins_bit(__l, __v) (1u << ((__v)-32u * (__l)))
// clang-format off
static uint32_t x86_ins_capability_mask[NCAPINTS] = {
[CPUID_1_EDX] =
__ins_bit(CPUID_1_EDX, X86_FEATURE_FPU) |
__ins_bit(CPUID_1_EDX, X86_FEATURE_TSC) |
__ins_bit(CPUID_1_EDX, X86_FEATURE_CX8) |
__ins_bit(CPUID_1_EDX, X86_FEATURE_SEP) |
__ins_bit(CPUID_1_EDX, X86_FEATURE_CMOV) |
__ins_bit(CPUID_1_EDX, X86_FEATURE_CLFLUSH) |
__ins_bit(CPUID_1_EDX, X86_FEATURE_MMX) |
__ins_bit(CPUID_1_EDX, X86_FEATURE_FXSR) |
__ins_bit(CPUID_1_EDX, X86_FEATURE_XMM) |
__ins_bit(CPUID_1_EDX, X86_FEATURE_XMM2),
[CPUID_8000_0001_EDX] =
__ins_bit(CPUID_8000_0001_EDX, X86_FEATURE_SYSCALL) |
__ins_bit(CPUID_8000_0001_EDX, X86_FEATURE_MMXEXT) |
__ins_bit(CPUID_8000_0001_EDX, X86_FEATURE_RDTSCP) |
__ins_bit(CPUID_8000_0001_EDX, X86_FEATURE_3DNOWEXT) |
__ins_bit(CPUID_8000_0001_EDX, X86_FEATURE_3DNOW),
[CPUID_LNX_1] =
__ins_bit(CPUID_LNX_1, X86_FEATURE_REP_GOOD) |
__ins_bit(CPUID_LNX_1, X86_FEATURE_NOPL),
[CPUID_1_ECX] =
__ins_bit(CPUID_1_ECX, X86_FEATURE_XMM3) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_PCLMULQDQ) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_MWAIT) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_SSSE3) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_CX16) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_XMM4_1) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_XMM4_2) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_MOVBE) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_POPCNT) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_AES) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_XSAVE) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_OSXSAVE) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_AVX) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_F16C) |
__ins_bit(CPUID_1_ECX, X86_FEATURE_RDRAND),
[CPUID_8000_0001_ECX] =
__ins_bit(CPUID_8000_0001_ECX, X86_FEATURE_ABM) |
__ins_bit(CPUID_8000_0001_ECX, X86_FEATURE_SSE4A) |
__ins_bit(CPUID_8000_0001_ECX, X86_FEATURE_MISALIGNSSE) |
__ins_bit(CPUID_8000_0001_ECX, X86_FEATURE_3DNOWPREFETCH) |
__ins_bit(CPUID_8000_0001_ECX, X86_FEATURE_XOP) |
__ins_bit(CPUID_8000_0001_ECX, X86_FEATURE_FMA4) |
__ins_bit(CPUID_8000_0001_ECX, X86_FEATURE_TBM),
[CPUID_7_0_EBX] =
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_FSGSBASE) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_BMI1) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_HLE) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_AVX2) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_BMI2) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_ERMS) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_RTM) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_MPX) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_AVX512F) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_AVX512DQ) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_RDSEED) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_ADX) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_CLFLUSHOPT) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_AVX512PF) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_AVX512ER) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_AVX512CD) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_SHA_NI) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_AVX512BW) |
__ins_bit(CPUID_7_0_EBX, X86_FEATURE_AVX512VL),
[CPUID_D_1_EAX] =
__ins_bit(CPUID_D_1_EAX, X86_FEATURE_XSAVEOPT) |
__ins_bit(CPUID_D_1_EAX, X86_FEATURE_XSAVEC) |
__ins_bit(CPUID_D_1_EAX, X86_FEATURE_XGETBV1),
[CPUID_7_0_ECX] =
__ins_bit(CPUID_7_0_ECX, X86_FEATURE_AVX512VBMI) |
__ins_bit(CPUID_7_0_ECX, X86_FEATURE_AVX512_VBMI2) |
__ins_bit(CPUID_7_0_ECX, X86_FEATURE_GFNI) |
__ins_bit(CPUID_7_0_ECX, X86_FEATURE_VAES) |
__ins_bit(CPUID_7_0_ECX, X86_FEATURE_VPCLMULQDQ) |
__ins_bit(CPUID_7_0_ECX, X86_FEATURE_AVX512_VNNI) |
__ins_bit(CPUID_7_0_ECX, X86_FEATURE_AVX512_BITALG) |
__ins_bit(CPUID_7_0_ECX, X86_FEATURE_TME) |
__ins_bit(CPUID_7_0_ECX, X86_FEATURE_AVX512_VPOPCNTDQ) |
__ins_bit(CPUID_7_0_ECX, X86_FEATURE_RDPID),
[CPUID_8000_0008_EBX] =
__ins_bit(CPUID_8000_0008_EBX, X86_FEATURE_CLZERO),
[CPUID_7_0_EDX] =
__ins_bit(CPUID_7_0_EDX, X86_FEATURE_AVX512_4VNNIW) |
__ins_bit(CPUID_7_0_EDX, X86_FEATURE_AVX512_4FMAPS),
};
// clang-format on
#undef __ins_bit
static int cpu_validate_ins_features(compel_cpuinfo_t *cpu_info)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(cpu_info->x86_capability); i++) {
uint32_t s = cpu_info->x86_capability[i] & x86_ins_capability_mask[i];
uint32_t d = rt_cpu_info.x86_capability[i] & x86_ins_capability_mask[i];
/*
* Destination might be more feature rich
* but not the reverse.
*/
if (s & ~d) {
pr_err("CPU instruction capabilities do not match run time\n");
return -1;
}
}
return 0;
}
static int cpu_validate_features(compel_cpuinfo_t *cpu_info)
{
if (cpu_has_unsupported_features())
return -1;
if (opts.cpu_cap & CPU_CAP_FPU) {
uint64_t m;
/*
* If we're requested to check FPU only ignore
* any other bit. It's up to a user if the
* rest of mismatches won't cause problems.
*/
#define __mismatch_fpu_bit(__bit) (test_bit(__bit, (void *)cpu_info->x86_capability) && !compel_cpu_has_feature(__bit))
if (__mismatch_fpu_bit(X86_FEATURE_FPU) || __mismatch_fpu_bit(X86_FEATURE_FXSR) ||
__mismatch_fpu_bit(X86_FEATURE_OSXSAVE) || __mismatch_fpu_bit(X86_FEATURE_XSAVES)) {
pr_err("FPU feature required by image "
"is not supported on host "
"(fpu:%d fxsr:%d osxsave:%d xsaves:%d)\n",
__mismatch_fpu_bit(X86_FEATURE_FPU), __mismatch_fpu_bit(X86_FEATURE_FXSR),
__mismatch_fpu_bit(X86_FEATURE_OSXSAVE), __mismatch_fpu_bit(X86_FEATURE_XSAVES));
return -1;
}
#undef __mismatch_fpu_bit
/*
* Make sure the xsave features are compatible. Check that on
* the destination there are all the features which were on the
* source.
*/
if ((m = cpu_info->xfeatures_mask & ~rt_cpu_info.xfeatures_mask)) {
pr_err("CPU xfeatures has unsupported bits (%#" PRIx64 ")\n", m);
return -1;
}
/*
* Make sure the xsave sizes are compatible. We already hit the
* issue with libc where we've checkpointed the container on
* old machine but restored on more modern one and libc fetched
* new xsave frame size directly by xsave instruction with
* greedy feature mask causing programs to misbehave.
*/
if (cpu_info->xsave_size != rt_cpu_info.xsave_size) {
pr_err("CPU xsave size mismatch (%u/%u)\n", cpu_info->xsave_size, rt_cpu_info.xsave_size);
return -1;
}
if (cpu_info->xsave_size_max != rt_cpu_info.xsave_size_max) {
pr_err("CPU xsave max size mismatch (%u/%u)\n", cpu_info->xsave_size_max,
rt_cpu_info.xsave_size_max);
return -1;
}
}
/*
* Capability on instructions level only.
*/
if (opts.cpu_cap & CPU_CAP_INS) {
if (cpu_validate_ins_features(cpu_info))
return -1;
}
/*
* Strict capability mode. Everything must match.
*/
if (opts.cpu_cap & CPU_CAP_CPU) {
if (memcmp(cpu_info->x86_capability, rt_cpu_info.x86_capability, sizeof(cpu_info->x86_capability))) {
pr_err("CPU capabilities do not match run time\n");
return -1;
}
}
return 0;
}
static const struct {
const uint32_t capability_ver;
const uint32_t ncapints;
} ncapints[] = {
{ .capability_ver = 1, .ncapints = NCAPINTS_V1 },
{ .capability_ver = 2, .ncapints = NCAPINTS_V2 },
};
static compel_cpuinfo_t *img_to_cpuinfo(CpuinfoX86Entry *img_x86_entry)
{
compel_cpuinfo_t *cpu_info;
size_t size, i;
BUILD_BUG_ON(sizeof(img_x86_entry->capability[0]) != sizeof(cpu_info->x86_capability[0]));
BUILD_BUG_ON(ARRAY_SIZE(rt_cpu_info.x86_capability) != NCAPINTS);
if (img_x86_entry->vendor_id != CPUINFO_X86_ENTRY__VENDOR__INTEL &&
img_x86_entry->vendor_id != CPUINFO_X86_ENTRY__VENDOR__AMD) {
pr_err("Image carries unknown vendor %u\n", (unsigned)img_x86_entry->vendor_id);
return NULL;
}
for (i = 0; i < ARRAY_SIZE(ncapints); i++) {
if (img_x86_entry->capability_ver == ncapints[i].capability_ver) {
if (img_x86_entry->n_capability != ncapints[i].ncapints) {
pr_err("Image carries %u words while %u expected\n",
(unsigned)img_x86_entry->n_capability, (unsigned)ncapints[i].ncapints);
return NULL;
}
break;
}
}
if (i >= ARRAY_SIZE(ncapints)) {
pr_err("Image carries unknown capability version %d\n", (unsigned)img_x86_entry->capability_ver);
return NULL;
}
cpu_info = xzalloc(sizeof(*cpu_info));
if (!cpu_info)
return NULL;
/*
* Copy caps from image and fill the left ones from
* run-time information for easier compatibility testing.
*/
size = sizeof(img_x86_entry->capability[0]) * img_x86_entry->n_capability;
memcpy(cpu_info->x86_capability, img_x86_entry->capability, size);
if (img_x86_entry->capability_ver == 1) {
memcpy(&cpu_info->x86_capability[NCAPINTS_V1], &rt_cpu_info.x86_capability[NCAPINTS_V1],
(NCAPINTS_V2 - NCAPINTS_V1) * sizeof(rt_cpu_info.x86_capability[0]));
}
if (img_x86_entry->vendor_id == CPUINFO_X86_ENTRY__VENDOR__INTEL)
cpu_info->x86_vendor = X86_VENDOR_INTEL;
else
cpu_info->x86_vendor = X86_VENDOR_AMD;
cpu_info->x86_family = img_x86_entry->cpu_family;
cpu_info->x86_model = img_x86_entry->model;
cpu_info->x86_mask = img_x86_entry->stepping;
cpu_info->extended_cpuid_level = rt_cpu_info.extended_cpuid_level;
cpu_info->cpuid_level = rt_cpu_info.cpuid_level;
cpu_info->x86_power = rt_cpu_info.x86_power;
memcpy(cpu_info->x86_vendor_id, rt_cpu_info.x86_model_id, sizeof(cpu_info->x86_vendor_id));
strncpy(cpu_info->x86_model_id, img_x86_entry->model_id, sizeof(cpu_info->x86_model_id) - 1);
/*
* For old images where no xfeatures_mask present we
* simply fetch runtime cpu mask because later we will
* do either instruction capability check, either strict
* check for capabilities.
*/
if (!img_x86_entry->has_xfeatures_mask) {
cpu_info->xfeatures_mask = rt_cpu_info.xfeatures_mask;
} else
cpu_info->xfeatures_mask = img_x86_entry->xfeatures_mask;
/*
* Same for other fields.
*/
if (!img_x86_entry->has_xsave_size)
cpu_info->xsave_size = rt_cpu_info.xsave_size;
else
cpu_info->xsave_size = img_x86_entry->xsave_size;
if (!img_x86_entry->has_xsave_size_max)
cpu_info->xsave_size_max = rt_cpu_info.xsave_size_max;
else
cpu_info->xsave_size_max = img_x86_entry->xsave_size_max;
return cpu_info;
}
int cpu_validate_cpuinfo(void)
{
compel_cpuinfo_t *cpu_info = NULL;
CpuinfoX86Entry *img_x86_entry;
CpuinfoEntry *img_cpu_info;
struct cr_img *img;
int ret = -1;
img = open_image(CR_FD_CPUINFO, O_RSTR);
if (!img)
return -1;
if (pb_read_one(img, &img_cpu_info, PB_CPUINFO) < 0)
goto err;
if (img_cpu_info->n_x86_entry != 1) {
pr_err("No x86 related cpuinfo in image, "
"corruption (n_x86_entry = %zi)\n",
img_cpu_info->n_x86_entry);
goto err;
}
img_x86_entry = img_cpu_info->x86_entry[0];
if (img_x86_entry->vendor_id != CPUINFO_X86_ENTRY__VENDOR__INTEL &&
img_x86_entry->vendor_id != CPUINFO_X86_ENTRY__VENDOR__AMD) {
pr_err("Unknown cpu vendor %d\n", img_x86_entry->vendor_id);
goto err;
}
cpu_info = img_to_cpuinfo(img_x86_entry);
if (cpu_info)
ret = cpu_validate_features(cpu_info);
err:
xfree(cpu_info);
close_image(img);
return ret;
}
int cpuinfo_dump(void)
{
if (cpu_init())
return -1;
if (cpu_dump_cpuinfo())
return -1;
return 0;
}
int cpuinfo_check(void)
{
if (cpu_init())
return 1;
/*
* Force to check all caps if empty passed,
* still allow to check instructions only
* and etc.
*/
if (opts.cpu_cap == CPU_CAP_NONE)
opts.cpu_cap = CPU_CAP_ALL;
if (cpu_validate_cpuinfo())
return 1;
return 0;
}
| 14,140 | 29.542117 | 119 |
c
|
criu
|
criu-master/criu/arch/x86/crtools.c
|
#include "compel/asm/fpu.h"
#include "compel/infect.h"
#include "compel/plugins/std/syscall-codes.h"
#include "cpu.h"
#include "cr_options.h"
#include "images/core.pb-c.h"
#include "log.h"
#include "protobuf.h"
#include "types.h"
#include "asm/compat.h"
#undef LOG_PREFIX
#define LOG_PREFIX "x86: "
#define XSAVE_PB_NELEMS(__s, __obj, __member) (sizeof(__s) / sizeof(*(__obj)->__member))
int save_task_regs(void *x, user_regs_struct_t *regs, user_fpregs_struct_t *fpregs)
{
CoreEntry *core = x;
UserX86RegsEntry *gpregs = core->thread_info->gpregs;
#define assign_reg(dst, src, e) \
do { \
dst->e = (__typeof__(dst->e))src.e; \
} while (0)
#define assign_array(dst, src, e) memcpy(dst->e, &src.e, sizeof(src.e))
#define assign_xsave(feature, xsave, member, area) \
do { \
if (compel_fpu_has_feature(feature)) { \
uint32_t off = compel_fpu_feature_offset(feature); \
void *from = &area[off]; \
size_t size = pb_repeated_size(xsave, member); \
size_t xsize = (size_t)compel_fpu_feature_size(feature); \
if (xsize != size) { \
pr_err("%s reported %zu bytes (expecting %zu)\n", #feature, xsize, size); \
return -1; \
} \
memcpy(xsave->member, from, size); \
} \
} while (0)
if (user_regs_native(regs)) {
assign_reg(gpregs, regs->native, r15);
assign_reg(gpregs, regs->native, r14);
assign_reg(gpregs, regs->native, r13);
assign_reg(gpregs, regs->native, r12);
assign_reg(gpregs, regs->native, bp);
assign_reg(gpregs, regs->native, bx);
assign_reg(gpregs, regs->native, r11);
assign_reg(gpregs, regs->native, r10);
assign_reg(gpregs, regs->native, r9);
assign_reg(gpregs, regs->native, r8);
assign_reg(gpregs, regs->native, ax);
assign_reg(gpregs, regs->native, cx);
assign_reg(gpregs, regs->native, dx);
assign_reg(gpregs, regs->native, si);
assign_reg(gpregs, regs->native, di);
assign_reg(gpregs, regs->native, orig_ax);
assign_reg(gpregs, regs->native, ip);
assign_reg(gpregs, regs->native, cs);
assign_reg(gpregs, regs->native, flags);
assign_reg(gpregs, regs->native, sp);
assign_reg(gpregs, regs->native, ss);
assign_reg(gpregs, regs->native, fs_base);
assign_reg(gpregs, regs->native, gs_base);
assign_reg(gpregs, regs->native, ds);
assign_reg(gpregs, regs->native, es);
assign_reg(gpregs, regs->native, fs);
assign_reg(gpregs, regs->native, gs);
gpregs->mode = USER_X86_REGS_MODE__NATIVE;
} else {
assign_reg(gpregs, regs->compat, bx);
assign_reg(gpregs, regs->compat, cx);
assign_reg(gpregs, regs->compat, dx);
assign_reg(gpregs, regs->compat, si);
assign_reg(gpregs, regs->compat, di);
assign_reg(gpregs, regs->compat, bp);
assign_reg(gpregs, regs->compat, ax);
assign_reg(gpregs, regs->compat, ds);
assign_reg(gpregs, regs->compat, es);
assign_reg(gpregs, regs->compat, fs);
assign_reg(gpregs, regs->compat, gs);
assign_reg(gpregs, regs->compat, orig_ax);
assign_reg(gpregs, regs->compat, ip);
assign_reg(gpregs, regs->compat, cs);
assign_reg(gpregs, regs->compat, flags);
assign_reg(gpregs, regs->compat, sp);
assign_reg(gpregs, regs->compat, ss);
gpregs->mode = USER_X86_REGS_MODE__COMPAT;
}
gpregs->has_mode = true;
if (!fpregs)
return 0;
assign_reg(core->thread_info->fpregs, fpregs->i387, cwd);
assign_reg(core->thread_info->fpregs, fpregs->i387, swd);
assign_reg(core->thread_info->fpregs, fpregs->i387, twd);
assign_reg(core->thread_info->fpregs, fpregs->i387, fop);
assign_reg(core->thread_info->fpregs, fpregs->i387, rip);
assign_reg(core->thread_info->fpregs, fpregs->i387, rdp);
assign_reg(core->thread_info->fpregs, fpregs->i387, mxcsr);
assign_reg(core->thread_info->fpregs, fpregs->i387, mxcsr_mask);
/* Make sure we have enough space */
BUG_ON(core->thread_info->fpregs->n_st_space != ARRAY_SIZE(fpregs->i387.st_space));
BUG_ON(core->thread_info->fpregs->n_xmm_space != ARRAY_SIZE(fpregs->i387.xmm_space));
assign_array(core->thread_info->fpregs, fpregs->i387, st_space);
assign_array(core->thread_info->fpregs, fpregs->i387, xmm_space);
if (compel_cpu_has_feature(X86_FEATURE_OSXSAVE)) {
UserX86XsaveEntry *xsave = core->thread_info->fpregs->xsave;
uint8_t *extended_state_area = (void *)fpregs;
/*
* xcomp_bv is designated for compacted format but user
* space never use it, thus we can simply ignore.
*/
assign_reg(xsave, fpregs->xsave_hdr, xstate_bv);
assign_xsave(XFEATURE_YMM, xsave, ymmh_space, extended_state_area);
assign_xsave(XFEATURE_BNDREGS, xsave, bndreg_state, extended_state_area);
assign_xsave(XFEATURE_BNDCSR, xsave, bndcsr_state, extended_state_area);
assign_xsave(XFEATURE_OPMASK, xsave, opmask_reg, extended_state_area);
assign_xsave(XFEATURE_ZMM_Hi256, xsave, zmm_upper, extended_state_area);
assign_xsave(XFEATURE_Hi16_ZMM, xsave, hi16_zmm, extended_state_area);
assign_xsave(XFEATURE_PKRU, xsave, pkru, extended_state_area);
}
#undef assign_reg
#undef assign_array
#undef assign_xsave
return 0;
}
static void alloc_tls(ThreadInfoX86 *ti, void **mempool)
{
int i;
ti->tls = xptr_pull_s(mempool, GDT_ENTRY_TLS_NUM * sizeof(UserDescT *));
ti->n_tls = GDT_ENTRY_TLS_NUM;
for (i = 0; i < GDT_ENTRY_TLS_NUM; i++) {
ti->tls[i] = xptr_pull(mempool, UserDescT);
user_desc_t__init(ti->tls[i]);
}
}
static int alloc_xsave_extends(UserX86XsaveEntry *xsave)
{
if (compel_fpu_has_feature(XFEATURE_YMM)) {
xsave->n_ymmh_space = XSAVE_PB_NELEMS(struct ymmh_struct, xsave, ymmh_space);
xsave->ymmh_space = xzalloc(pb_repeated_size(xsave, ymmh_space));
if (!xsave->ymmh_space)
goto err;
}
if (compel_fpu_has_feature(XFEATURE_BNDREGS)) {
xsave->n_bndreg_state = XSAVE_PB_NELEMS(struct mpx_bndreg_state, xsave, bndreg_state);
xsave->bndreg_state = xzalloc(pb_repeated_size(xsave, bndreg_state));
if (!xsave->bndreg_state)
goto err;
}
if (compel_fpu_has_feature(XFEATURE_BNDCSR)) {
xsave->n_bndcsr_state = XSAVE_PB_NELEMS(struct mpx_bndcsr_state, xsave, bndcsr_state);
xsave->bndcsr_state = xzalloc(pb_repeated_size(xsave, bndcsr_state));
if (!xsave->bndcsr_state)
goto err;
}
if (compel_fpu_has_feature(XFEATURE_OPMASK)) {
xsave->n_opmask_reg = XSAVE_PB_NELEMS(struct avx_512_opmask_state, xsave, opmask_reg);
xsave->opmask_reg = xzalloc(pb_repeated_size(xsave, opmask_reg));
if (!xsave->opmask_reg)
goto err;
}
if (compel_fpu_has_feature(XFEATURE_ZMM_Hi256)) {
xsave->n_zmm_upper = XSAVE_PB_NELEMS(struct avx_512_zmm_uppers_state, xsave, zmm_upper);
xsave->zmm_upper = xzalloc(pb_repeated_size(xsave, zmm_upper));
if (!xsave->zmm_upper)
goto err;
}
if (compel_fpu_has_feature(XFEATURE_Hi16_ZMM)) {
xsave->n_hi16_zmm = XSAVE_PB_NELEMS(struct avx_512_hi16_state, xsave, hi16_zmm);
xsave->hi16_zmm = xzalloc(pb_repeated_size(xsave, hi16_zmm));
if (!xsave->hi16_zmm)
goto err;
}
if (compel_fpu_has_feature(XFEATURE_PKRU)) {
xsave->n_pkru = XSAVE_PB_NELEMS(struct pkru_state, xsave, pkru);
xsave->pkru = xzalloc(pb_repeated_size(xsave, pkru));
if (!xsave->pkru)
goto err;
}
return 0;
err:
return -1;
}
int arch_alloc_thread_info(CoreEntry *core)
{
size_t sz;
bool with_fpu, with_xsave = false;
void *m;
ThreadInfoX86 *ti = NULL;
with_fpu = compel_cpu_has_feature(X86_FEATURE_FPU);
sz = sizeof(ThreadInfoX86) + sizeof(UserX86RegsEntry) + GDT_ENTRY_TLS_NUM * sizeof(UserDescT) +
GDT_ENTRY_TLS_NUM * sizeof(UserDescT *);
if (with_fpu) {
sz += sizeof(UserX86FpregsEntry);
with_xsave = compel_cpu_has_feature(X86_FEATURE_OSXSAVE);
if (with_xsave)
sz += sizeof(UserX86XsaveEntry);
}
m = xmalloc(sz);
if (!m)
return -1;
ti = core->thread_info = xptr_pull(&m, ThreadInfoX86);
thread_info_x86__init(ti);
ti->gpregs = xptr_pull(&m, UserX86RegsEntry);
user_x86_regs_entry__init(ti->gpregs);
alloc_tls(ti, &m);
if (with_fpu) {
UserX86FpregsEntry *fpregs;
fpregs = ti->fpregs = xptr_pull(&m, UserX86FpregsEntry);
user_x86_fpregs_entry__init(fpregs);
/* These are numbers from kernel */
fpregs->n_st_space = 32;
fpregs->n_xmm_space = 64;
fpregs->st_space = xzalloc(pb_repeated_size(fpregs, st_space));
fpregs->xmm_space = xzalloc(pb_repeated_size(fpregs, xmm_space));
if (!fpregs->st_space || !fpregs->xmm_space)
goto err;
if (with_xsave) {
UserX86XsaveEntry *xsave;
xsave = fpregs->xsave = xptr_pull(&m, UserX86XsaveEntry);
user_x86_xsave_entry__init(xsave);
if (alloc_xsave_extends(xsave))
goto err;
}
}
return 0;
err:
return -1;
}
void arch_free_thread_info(CoreEntry *core)
{
if (!core->thread_info)
return;
if (core->thread_info->fpregs->xsave) {
xfree(core->thread_info->fpregs->xsave->ymmh_space);
xfree(core->thread_info->fpregs->xsave->pkru);
xfree(core->thread_info->fpregs->xsave->hi16_zmm);
xfree(core->thread_info->fpregs->xsave->zmm_upper);
xfree(core->thread_info->fpregs->xsave->opmask_reg);
xfree(core->thread_info->fpregs->xsave->bndcsr_state);
xfree(core->thread_info->fpregs->xsave->bndreg_state);
}
xfree(core->thread_info->fpregs->st_space);
xfree(core->thread_info->fpregs->xmm_space);
xfree(core->thread_info);
}
static bool valid_xsave_frame(CoreEntry *core)
{
UserX86XsaveEntry *xsave = core->thread_info->fpregs->xsave;
struct xsave_struct *x = NULL;
if (core->thread_info->fpregs->n_st_space < ARRAY_SIZE(x->i387.st_space)) {
pr_err("Corruption in FPU st_space area "
"(got %li but %li expected)\n",
(long)core->thread_info->fpregs->n_st_space, (long)ARRAY_SIZE(x->i387.st_space));
return false;
}
if (core->thread_info->fpregs->n_xmm_space < ARRAY_SIZE(x->i387.xmm_space)) {
pr_err("Corruption in FPU xmm_space area "
"(got %li but %li expected)\n",
(long)core->thread_info->fpregs->n_st_space, (long)ARRAY_SIZE(x->i387.xmm_space));
return false;
}
if (compel_cpu_has_feature(X86_FEATURE_OSXSAVE)) {
if (xsave) {
size_t i;
struct {
const char *name;
size_t expected;
size_t obtained;
void *ptr;
} features[] = {
{
.name = __stringify_1(XFEATURE_YMM),
.expected = XSAVE_PB_NELEMS(struct ymmh_struct, xsave, ymmh_space),
.obtained = xsave->n_ymmh_space,
.ptr = xsave->ymmh_space,
},
{
.name = __stringify_1(XFEATURE_BNDREGS),
.expected = XSAVE_PB_NELEMS(struct mpx_bndreg_state, xsave, bndreg_state),
.obtained = xsave->n_bndreg_state,
.ptr = xsave->bndreg_state,
},
{
.name = __stringify_1(XFEATURE_BNDCSR),
.expected = XSAVE_PB_NELEMS(struct mpx_bndcsr_state, xsave, bndcsr_state),
.obtained = xsave->n_bndcsr_state,
.ptr = xsave->bndcsr_state,
},
{
.name = __stringify_1(XFEATURE_OPMASK),
.expected = XSAVE_PB_NELEMS(struct avx_512_opmask_state, xsave, opmask_reg),
.obtained = xsave->n_opmask_reg,
.ptr = xsave->opmask_reg,
},
{
.name = __stringify_1(XFEATURE_ZMM_Hi256),
.expected = XSAVE_PB_NELEMS(struct avx_512_zmm_uppers_state, xsave, zmm_upper),
.obtained = xsave->n_zmm_upper,
.ptr = xsave->zmm_upper,
},
{
.name = __stringify_1(XFEATURE_Hi16_ZMM),
.expected = XSAVE_PB_NELEMS(struct avx_512_hi16_state, xsave, hi16_zmm),
.obtained = xsave->n_hi16_zmm,
.ptr = xsave->hi16_zmm,
},
{
.name = __stringify_1(XFEATURE_PKRU),
.expected = XSAVE_PB_NELEMS(struct pkru_state, xsave, pkru),
.obtained = xsave->n_pkru,
.ptr = xsave->pkru,
},
};
for (i = 0; i < ARRAY_SIZE(features); i++) {
if (!features[i].ptr)
continue;
if (features[i].expected > features[i].obtained) {
pr_err("Corruption in %s area (expected %zu but %zu obtained)\n",
features[i].name, features[i].expected, features[i].obtained);
return false;
}
}
}
} else {
/*
* If the image has xsave area present then CPU we're restoring
* on must have X86_FEATURE_OSXSAVE feature until explicitly
* stated in options.
*/
if (xsave) {
if (opts.cpu_cap & CPU_CAP_FPU) {
pr_err("FPU xsave area present, "
"but host cpu doesn't support it\n");
return false;
} else
pr_warn_once("FPU is about to restore ignoring xsave state!\n");
}
}
return true;
}
static void show_rt_xsave_frame(struct xsave_struct *x)
{
struct fpx_sw_bytes *fpx = (void *)&x->i387.sw_reserved;
struct xsave_hdr_struct *xsave_hdr = &x->xsave_hdr;
struct i387_fxsave_struct *i387 = &x->i387;
pr_debug("xsave runtime structure\n");
pr_debug("-----------------------\n");
pr_debug("cwd:%#x swd:%#x twd:%#x fop:%#x mxcsr:%#x mxcsr_mask:%#x\n", (int)i387->cwd, (int)i387->swd,
(int)i387->twd, (int)i387->fop, (int)i387->mxcsr, (int)i387->mxcsr_mask);
pr_debug("magic1:%#x extended_size:%u xstate_bv:%#lx xstate_size:%u\n", fpx->magic1, fpx->extended_size,
(long)fpx->xstate_bv, fpx->xstate_size);
pr_debug("xstate_bv: %#lx\n", (long)xsave_hdr->xstate_bv);
pr_debug("-----------------------\n");
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core)
{
fpu_state_t *fpu_state = core_is_compat(core) ? &sigframe->compat.fpu_state : &sigframe->native.fpu_state;
struct xsave_struct *x = core_is_compat(core) ? (void *)&fpu_state->fpu_state_ia32.xsave :
(void *)&fpu_state->fpu_state_64.xsave;
/*
* If no FPU information provided -- we're restoring
* old image which has no FPU support, or the dump simply
* has no FPU support at all.
*/
if (!core->thread_info->fpregs) {
fpu_state->has_fpu = false;
return 0;
}
if (!valid_xsave_frame(core))
return -1;
fpu_state->has_fpu = true;
#define assign_reg(dst, src, e) \
do { \
dst.e = (__typeof__(dst.e))src->e; \
} while (0)
#define assign_array(dst, src, e) memcpy(dst.e, (src)->e, sizeof(dst.e))
#define assign_xsave(feature, xsave, member, area) \
do { \
if (compel_fpu_has_feature(feature) && (xsave->xstate_bv & (1UL << feature))) { \
uint32_t off = compel_fpu_feature_offset(feature); \
void *to = &area[off]; \
void *from = xsave->member; \
size_t size = pb_repeated_size(xsave, member); \
size_t xsize = (size_t)compel_fpu_feature_size(feature); \
size_t xstate_size_next = off + xsize; \
if (xsize != size) { \
if (size) { \
pr_err("%s reported %zu bytes (expecting %zu)\n", #feature, xsize, size); \
return -1; \
} else { \
pr_debug("%s is not present in image, ignore\n", #feature); \
} \
} \
xstate_bv |= (1UL << feature); \
BUG_ON(xstate_size > xstate_size_next); \
xstate_size = xstate_size_next; \
memcpy(to, from, size); \
} \
} while (0)
assign_reg(x->i387, core->thread_info->fpregs, cwd);
assign_reg(x->i387, core->thread_info->fpregs, swd);
assign_reg(x->i387, core->thread_info->fpregs, twd);
assign_reg(x->i387, core->thread_info->fpregs, fop);
assign_reg(x->i387, core->thread_info->fpregs, rip);
assign_reg(x->i387, core->thread_info->fpregs, rdp);
assign_reg(x->i387, core->thread_info->fpregs, mxcsr);
assign_reg(x->i387, core->thread_info->fpregs, mxcsr_mask);
assign_array(x->i387, core->thread_info->fpregs, st_space);
assign_array(x->i387, core->thread_info->fpregs, xmm_space);
if (core_is_compat(core))
compel_convert_from_fxsr(&fpu_state->fpu_state_ia32.fregs_state.i387_ia32,
&fpu_state->fpu_state_ia32.xsave.i387);
if (compel_cpu_has_feature(X86_FEATURE_OSXSAVE)) {
struct fpx_sw_bytes *fpx_sw = (void *)&x->i387.sw_reserved;
size_t xstate_size = XSAVE_YMM_OFFSET;
uint32_t xstate_bv = 0;
void *magic2;
xstate_bv = XFEATURE_MASK_FP | XFEATURE_MASK_SSE;
/*
* fpregs->xsave pointer might not present on image so we
* simply clear out everything.
*/
if (core->thread_info->fpregs->xsave) {
UserX86XsaveEntry *xsave = core->thread_info->fpregs->xsave;
uint8_t *extended_state_area = (void *)x;
/*
* Note the order does matter here and bound
* to the increasing offsets of XFEATURE_x
* inside memory layout (xstate_size calculation).
*/
assign_xsave(XFEATURE_YMM, xsave, ymmh_space, extended_state_area);
assign_xsave(XFEATURE_BNDREGS, xsave, bndreg_state, extended_state_area);
assign_xsave(XFEATURE_BNDCSR, xsave, bndcsr_state, extended_state_area);
assign_xsave(XFEATURE_OPMASK, xsave, opmask_reg, extended_state_area);
assign_xsave(XFEATURE_ZMM_Hi256, xsave, zmm_upper, extended_state_area);
assign_xsave(XFEATURE_Hi16_ZMM, xsave, hi16_zmm, extended_state_area);
assign_xsave(XFEATURE_PKRU, xsave, pkru, extended_state_area);
}
x->xsave_hdr.xstate_bv = xstate_bv;
fpx_sw->magic1 = FP_XSTATE_MAGIC1;
fpx_sw->xstate_bv = xstate_bv;
fpx_sw->xstate_size = xstate_size;
fpx_sw->extended_size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
/*
* This should be at the end of xsave frame.
*/
magic2 = (void *)x + xstate_size;
*(u32 *)magic2 = FP_XSTATE_MAGIC2;
}
show_rt_xsave_frame(x);
#undef assign_reg
#undef assign_array
#undef assign_xsave
return 0;
}
#define CPREG32(d) f->compat.uc.uc_mcontext.d = r->d
static void restore_compat_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r)
{
CPREG32(gs);
CPREG32(fs);
CPREG32(es);
CPREG32(ds);
CPREG32(di);
CPREG32(si);
CPREG32(bp);
CPREG32(sp);
CPREG32(bx);
CPREG32(dx);
CPREG32(cx);
CPREG32(ip);
CPREG32(ax);
CPREG32(cs);
CPREG32(ss);
CPREG32(flags);
f->is_native = false;
}
#undef CPREG32
#define CPREG64(d, s) f->native.uc.uc_mcontext.d = r->s
static void restore_native_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r)
{
CPREG64(rdi, di);
CPREG64(rsi, si);
CPREG64(rbp, bp);
CPREG64(rsp, sp);
CPREG64(rbx, bx);
CPREG64(rdx, dx);
CPREG64(rcx, cx);
CPREG64(rip, ip);
CPREG64(rax, ax);
CPREG64(r8, r8);
CPREG64(r9, r9);
CPREG64(r10, r10);
CPREG64(r11, r11);
CPREG64(r12, r12);
CPREG64(r13, r13);
CPREG64(r14, r14);
CPREG64(r15, r15);
CPREG64(cs, cs);
CPREG64(eflags, flags);
f->is_native = true;
}
#undef CPREG64
int restore_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r)
{
switch (r->mode) {
case USER_X86_REGS_MODE__NATIVE:
restore_native_gpregs(f, r);
break;
case USER_X86_REGS_MODE__COMPAT:
restore_compat_gpregs(f, r);
break;
default:
pr_err("Can't prepare rt_sigframe: registers mode corrupted (%d)\n", r->mode);
return -1;
}
return 0;
}
static int get_robust_list32(pid_t pid, uintptr_t head, uintptr_t len)
{
struct syscall_args32 s = {
.nr = __NR32_get_robust_list,
.arg0 = pid,
.arg1 = (uint32_t)head,
.arg2 = (uint32_t)len,
};
return do_full_int80(&s);
}
static int set_robust_list32(uint32_t head, uint32_t len)
{
struct syscall_args32 s = {
.nr = __NR32_set_robust_list,
.arg0 = head,
.arg1 = len,
};
return do_full_int80(&s);
}
int get_task_futex_robust_list_compat(pid_t pid, ThreadCoreEntry *info)
{
void *mmap32;
int ret = -1;
mmap32 = alloc_compat_syscall_stack();
if (!mmap32)
return -1;
ret = get_robust_list32(pid, (uintptr_t)mmap32, (uintptr_t)mmap32 + 4);
if (ret == -ENOSYS) {
/* Check native get_task_futex_robust_list() for details. */
if (set_robust_list32(0, 0) == (uint32_t)-ENOSYS) {
info->futex_rla = 0;
info->futex_rla_len = 0;
ret = 0;
}
} else if (ret == 0) {
uint32_t *arg1 = (uint32_t *)mmap32;
info->futex_rla = *arg1;
info->futex_rla_len = *(arg1 + 1);
ret = 0;
}
free_compat_syscall_stack(mmap32);
return ret;
}
| 21,277 | 31.735385 | 115 |
c
|
criu
|
criu-master/criu/arch/x86/kerndat.c
|
#include <elf.h>
#include <sched.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/wait.h>
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
#include "compel/asm/fpu.h"
#include "compel/plugins/std/syscall-codes.h"
#include "cpu.h"
#include "kerndat.h"
#include "log.h"
#include "types.h"
#include "asm/compat.h"
#include "asm/dump.h"
int kdat_can_map_vdso(void)
{
pid_t child;
int stat;
/*
* Running under fork so if vdso_64 is disabled - don't create
* it for criu accidentally.
*/
child = fork();
if (child < 0) {
pr_perror("%s(): failed to fork()", __func__);
return -1;
}
if (child == 0) {
int ret;
ret = syscall(SYS_arch_prctl, ARCH_MAP_VDSO_32, 0);
if (ret == 0)
exit(1);
/*
* Mapping vDSO while have not unmap it yet:
* this is restricted by API if ARCH_MAP_VDSO_* is supported.
*/
if (ret == -1 && errno == EEXIST)
exit(1);
exit(0);
}
if (waitpid(child, &stat, 0) != child) {
pr_err("Failed to wait for arch_prctl() test\n");
kill(child, SIGKILL);
return -1;
}
if (!WIFEXITED(stat))
return -1;
return WEXITSTATUS(stat);
}
#ifdef CONFIG_COMPAT
void *mmap_ia32(void *addr, size_t len, int prot, int flags, int fildes, off_t off)
{
struct syscall_args32 s;
s.nr = __NR32_mmap2;
s.arg0 = (uint32_t)(uintptr_t)addr;
s.arg1 = (uint32_t)len;
s.arg2 = prot;
s.arg3 = flags;
s.arg4 = fildes;
s.arg5 = (uint32_t)off;
return (void *)(uintptr_t)do_full_int80(&s);
}
/*
* The idea of the test:
* From kernel's top-down allocator we assume here that
* 1. A = mmap(0, ...); munmap(A);
* 2. B = mmap(0, ...);
* results in A == B.
* ...but if we have 32-bit mmap() bug, then A will have only lower
* 4 bytes of 64-bit address allocated with mmap().
* That means, that the next mmap() will return B != A
* (as munmap(A) hasn't really unmapped A mapping).
*
* As mapping with lower 4 bytes of A may really exist, we run
* this test under fork().
*
* Another approach to test bug's presence would be to parse
* /proc/self/maps before and after 32-bit mmap(), but that would
* be soo slow.
*/
static void mmap_bug_test(void)
{
void *map1, *map2;
int err;
map1 = mmap_ia32(0, PAGE_SIZE, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
/* 32-bit error, not sign-extended - can't use IS_ERR_VALUE() here */
err = (uintptr_t)map1 % PAGE_SIZE;
if (err) {
pr_err("ia32 mmap() failed: %d\n", err);
exit(1);
}
if (munmap(map1, PAGE_SIZE)) {
pr_perror("Failed to unmap() 32-bit mapping");
exit(1);
}
map2 = mmap_ia32(0, PAGE_SIZE, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
err = (uintptr_t)map2 % PAGE_SIZE;
if (err) {
pr_err("ia32 mmap() failed: %d\n", err);
exit(1);
}
if (map1 != map2)
exit(1);
exit(0);
}
/*
* Pre v4.12 kernels have a bug: for a process started as 64-bit
* 32-bit mmap() may return 8 byte pointer.
* Which is fatal for us: after 32-bit C/R a task will map 64-bit
* addresses, cut upper 4 bytes and try to use lower 4 bytes.
* This is a check if the bug was fixed in the kernel.
*/
static int has_32bit_mmap_bug(void)
{
pid_t child = fork();
int stat;
if (child < 0) {
pr_perror("%s(): failed to fork()", __func__);
return -1;
}
if (child == 0)
mmap_bug_test();
if (waitpid(child, &stat, 0) != child) {
pr_err("Failed to wait for mmap test\n");
kill(child, SIGKILL);
return -1;
}
if (!WIFEXITED(stat) || WEXITSTATUS(stat) != 0)
return 1;
return 0;
}
int kdat_compatible_cr(void)
{
if (!kdat.can_map_vdso)
return 0;
if (has_32bit_mmap_bug())
return 0;
return 1;
}
#else /* !CONFIG_COMPAT */
int kdat_compatible_cr(void)
{
return 0;
}
#endif
static int kdat_x86_has_ptrace_fpu_xsave_bug_child(void *arg)
{
if (ptrace(PTRACE_TRACEME, 0, 0, 0)) {
pr_perror("%d: ptrace(PTRACE_TRACEME) failed", getpid());
_exit(1);
}
if (kill(getpid(), SIGSTOP))
pr_perror("%d: failed to kill myself", getpid());
pr_err("Continue after SIGSTOP.. Urr what?\n");
_exit(1);
}
/*
* Pre v4.14 kernels have a bug on Skylake CPUs:
* copyout_from_xsaves() creates fpu state for
* ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov)
* without MXCSR and MXCSR_FLAGS if there is SSE/YMM state, but no FP state.
* That is xfeatures had either/both XFEATURE_MASK_{SSE,YMM} set, but not
* XFEATURE_MASK_FP.
* But we *really* need to C/R MXCSR & MXCSR_FLAGS if SSE/YMM active,
* as mxcsr store part of the state.
*/
int kdat_x86_has_ptrace_fpu_xsave_bug(void)
{
user_fpregs_struct_t xsave = {};
struct iovec iov;
char stack[PAGE_SIZE];
int flags = CLONE_VM | CLONE_FILES | CLONE_UNTRACED | SIGCHLD;
int ret = -1;
pid_t child;
int stat;
/* OSXSAVE can't be changed during boot. */
if (!compel_cpu_has_feature(X86_FEATURE_OSXSAVE))
return 0;
child = clone(kdat_x86_has_ptrace_fpu_xsave_bug_child, stack + ARRAY_SIZE(stack), flags, 0);
if (child < 0) {
pr_perror("%s(): failed to clone()", __func__);
return -1;
}
if (waitpid(child, &stat, WUNTRACED) != child) {
/*
* waitpid() may end with ECHILD if SIGCHLD == SIG_IGN,
* and the child has stopped already.
*/
pr_perror("Failed to wait for %s() test", __func__);
goto out_kill;
}
if (!WIFSTOPPED(stat)) {
pr_err("Born child is unstoppable! (might be dead)\n");
goto out_kill;
}
iov.iov_base = &xsave;
iov.iov_len = sizeof(xsave);
if (ptrace(PTRACE_GETREGSET, child, (unsigned)NT_X86_XSTATE, &iov) < 0) {
pr_perror("Can't obtain FPU registers for %d", child);
goto out_kill;
}
/*
* MXCSR should be never 0x0: e.g., it should contain either:
* R+/R-/RZ/RN to determine rounding model.
*/
ret = !xsave.i387.mxcsr;
out_kill:
if (kill(child, SIGKILL))
pr_perror("Failed to kill my own child");
if (waitpid(child, &stat, 0) < 0)
pr_perror("Failed wait for a dead child");
return ret;
}
| 5,813 | 21.889764 | 93 |
c
|
criu
|
criu-master/criu/arch/x86/restorer.c
|
#include <asm/prctl.h>
#include <unistd.h>
#include "types.h"
#include "restorer.h"
#include "asm/compat.h"
#include "asm/restorer.h"
#include <compel/asm/fpu.h>
#include <compel/plugins/std/syscall-codes.h>
#include <compel/plugins/std/string.h>
#include <compel/plugins/std/syscall.h>
#include "log.h"
#include "cpu.h"
int arch_map_vdso(unsigned long map_at, bool compatible)
{
int vdso_type = compatible ? ARCH_MAP_VDSO_32 : ARCH_MAP_VDSO_64;
pr_debug("Mapping %s vDSO at %lx\n", compatible ? "compatible" : "native", map_at);
return sys_arch_prctl(vdso_type, map_at);
}
int restore_nonsigframe_gpregs(UserX86RegsEntry *r)
{
long ret;
unsigned long fsgs_base;
fsgs_base = r->fs_base;
ret = sys_arch_prctl(ARCH_SET_FS, fsgs_base);
if (ret) {
pr_info("SET_FS fail %ld\n", ret);
return -1;
}
fsgs_base = r->gs_base;
ret = sys_arch_prctl(ARCH_SET_GS, fsgs_base);
if (ret) {
pr_info("SET_GS fail %ld\n", ret);
return -1;
}
return 0;
}
#ifdef CONFIG_COMPAT
int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
struct syscall_args32 s = {
.nr = __NR32_set_robust_list,
.arg0 = head_ptr,
.arg1 = len,
};
return do_full_int80(&s);
}
static int prepare_stack32(void **stack32)
{
if (*stack32)
return 0;
*stack32 = alloc_compat_syscall_stack();
if (!*stack32) {
pr_err("Failed to allocate stack for 32-bit TLS restore\n");
return -1;
}
return 0;
}
void restore_tls(tls_t *ptls)
{
/*
* We need here compatible stack, because 32-bit syscalls get
* 4-byte pointer and _usally_ restorer is also under 4Gb, but
* it can be upper and then pointers are messed up.
* (we lose high 4 bytes and... BANG!)
* Nothing serious, but syscall will return -EFAULT - or if we're
* lucky and lower 4 bytes points on some writeable VMA - corruption).
*/
void *stack32 = NULL;
unsigned i;
for (i = 0; i < GDT_ENTRY_TLS_NUM; i++) {
user_desc_t *desc = &ptls->desc[i];
int ret;
if (desc->seg_not_present)
continue;
if (prepare_stack32(&stack32) < 0)
return;
memcpy(stack32, desc, sizeof(user_desc_t));
asm volatile(" mov %1,%%eax \n"
" mov %2,%%ebx \n"
" int $0x80 \n"
" mov %%eax,%0 \n"
: "=g"(ret)
: "r"(__NR32_set_thread_area), "r"((uint32_t)(uintptr_t)stack32)
: "eax", "ebx", "r8", "r9", "r10", "r11", "memory");
if (ret)
pr_err("Failed to restore TLS descriptor %u in GDT: %d\n", desc->entry_number, ret);
}
if (stack32)
free_compat_syscall_stack(stack32);
}
#endif
| 2,525 | 21.353982 | 87 |
c
|
criu
|
criu-master/criu/arch/x86/sigaction_compat.c
|
#include "log.h"
#include "asm/restorer.h"
#include <compel/asm/fpu.h>
#include "asm/compat.h"
#include <compel/plugins/std/syscall-codes.h>
#ifdef CR_NOGLIBC
#include <compel/plugins/std/string.h>
#endif
#include "cpu.h"
asm(" .pushsection .text \n"
" .global restore_rt_sigaction \n"
" .code32 \n"
"restore_rt_sigaction: \n"
" mov %edx, %esi \n"
" mov $0, %edx \n"
" movl $" __stringify(__NR32_rt_sigaction) ",%eax \n"
" int $0x80 \n"
" ret \n"
" .popsection \n"
" .code64");
extern char restore_rt_sigaction;
/*
* Call raw rt_sigaction syscall through int80 - so the ABI kernel choses
* to deliver this signal would be i386.
*/
int arch_compat_rt_sigaction(void *stack32, int sig, rt_sigaction_t_compat *act)
{
struct syscall_args32 arg = {};
unsigned long act_stack = (unsigned long)stack32;
/* To make sure the 32-bit stack was allocated in caller */
if (act_stack >= (uint32_t)-1) {
pr_err("compat rt_sigaction without 32-bit stack\n");
return -1;
}
/*
* To be sure, that sigaction pointer lies under 4G,
* coping it on the bottom of the stack.
*/
memcpy(stack32, act, sizeof(rt_sigaction_t_compat));
arg.nr = __NR32_rt_sigaction;
arg.arg0 = sig;
arg.arg1 = (uint32_t)act_stack; /* act */
arg.arg2 = 0; /* oldact */
arg.arg3 = (uint32_t)sizeof(act->rt_sa_mask); /* sigsetsize */
return do_full_int80(&arg);
}
| 1,450 | 26.377358 | 80 |
c
|
criu
|
criu-master/criu/arch/x86/sigframe.c
|
#include <stdlib.h>
#include <stdint.h>
#include "asm/sigframe.h"
#include "asm/types.h"
#include "log.h"
int sigreturn_prep_fpu_frame(struct rt_sigframe *sigframe, struct rt_sigframe *rsigframe)
{
/*
* Use local sigframe to check native/compat type,
* but set address for rsigframe.
*/
fpu_state_t *fpu_state = (sigframe->is_native) ? &rsigframe->native.fpu_state : &rsigframe->compat.fpu_state;
if (sigframe->is_native) {
unsigned long addr = (unsigned long)(void *)&fpu_state->fpu_state_64.xsave;
if ((addr % 64ul)) {
pr_err("Unaligned address passed: %lx (native %d)\n", addr, sigframe->is_native);
return -1;
}
sigframe->native.uc.uc_mcontext.fpstate = (uint64_t)addr;
} else if (!sigframe->is_native) {
unsigned long addr = (unsigned long)(void *)&fpu_state->fpu_state_ia32.xsave;
sigframe->compat.uc.uc_mcontext.fpstate = (uint32_t)(unsigned long)(void *)&fpu_state->fpu_state_ia32;
if ((addr % 64ul)) {
pr_err("Unaligned address passed: %lx (native %d)\n", addr, sigframe->is_native);
return -1;
}
}
return 0;
}
| 1,067 | 27.864865 | 110 |
c
|
criu
|
criu-master/criu/arch/x86/sys-exec-tbl.c
|
static struct syscall_exec_desc sc_exec_table_64[] = {
#include "sys-exec-tbl-64.c"
{}, /* terminator */
};
#ifdef CONFIG_COMPAT
static struct syscall_exec_desc sc_exec_table_32[] = {
#include "sys-exec-tbl-32.c"
{}, /* terminator */
};
#endif
struct syscall_exec_desc;
static inline struct syscall_exec_desc *find_syscall_table(char *name, struct syscall_exec_desc *tbl)
{
int i;
for (i = 0; tbl[i].name != NULL; i++)
if (!strcmp(tbl[i].name, name))
return &tbl[i];
return NULL;
}
#define ARCH_HAS_FIND_SYSCALL
/* overwrite default to search in two tables above */
#ifdef CONFIG_COMPAT
struct syscall_exec_desc *find_syscall(char *name, struct parasite_ctl *ctl)
{
if (compel_mode_native(ctl))
return find_syscall_table(name, sc_exec_table_64);
else
return find_syscall_table(name, sc_exec_table_32);
}
#else
struct syscall_exec_desc *find_syscall(char *name, __always_unused struct parasite_ctl *ctl)
{
return find_syscall_table(name, sc_exec_table_64);
}
#endif
| 988 | 22.547619 | 101 |
c
|
criu
|
criu-master/criu/arch/x86/include/asm/compat.h
|
#ifndef __CR_ASM_COMPAT_H__
#define __CR_ASM_COMPAT_H__
#ifdef CR_NOGLIBC
#include <compel/plugins/std/syscall.h>
#include <compel/plugins/std/syscall-codes.h>
#else
#define sys_mmap mmap
#define sys_munmap munmap
#endif
#include <sys/mman.h>
static inline void *alloc_compat_syscall_stack(void)
{
void *mem = (void *)sys_mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_32BIT | MAP_ANONYMOUS | MAP_PRIVATE,
-1, 0);
if ((uintptr_t)mem % PAGE_SIZE) {
int err = (~(uint32_t)(uintptr_t)mem) + 1;
pr_err("mmap() of compat syscall stack failed with %d\n", err);
return 0;
}
return mem;
}
static inline void free_compat_syscall_stack(void *mem)
{
long int ret = sys_munmap(mem, PAGE_SIZE);
if (ret)
pr_err("munmap() of compat addr %p failed with %ld\n", mem, ret);
}
struct syscall_args32 {
uint32_t nr, arg0, arg1, arg2, arg3, arg4, arg5;
};
static inline uint32_t do_full_int80(struct syscall_args32 *args)
{
/*
* Kernel older than v4.4 do not preserve r8-r15 registers when
* invoking int80, so we need to preserve them.
*
* Additionally, %rbp is used as the 6th syscall argument, and we need
* to preserve its value when returning from the syscall to avoid
* upsetting GCC. However, we can't use %rbp in the GCC asm clobbers
* due to a GCC limitation. Instead, we explicitly save %rbp on the
* stack before invoking the syscall and restore its value afterward.
*
* Further, GCC may not adjust the %rsp pointer when allocating the
* args and ret variables because 1) do_full_int80() is a leaf
* function, and 2) the local variables (args and ret) are in the
* 128-byte red-zone as defined in the x86_64 ABI. To use the stack
* when preserving %rbp, we must either tell GCC to a) mark the
* function as non-leaf, or b) move away from the red-zone when using
* the stack. It seems that there is no easy way to do a), so we'll go
* with b).
* Note 1: Another workaround would have been to add %rsp in the list
* of clobbers, but this was deprecated in GCC 9.
* Note 2: This red-zone bug only manifests when compiling CRIU with
* DEBUG=1.
*/
uint32_t ret;
asm volatile("sub $128, %%rsp\n\t"
"pushq %%rbp\n\t"
"mov %7, %%ebp\n\t"
"int $0x80\n\t"
"popq %%rbp\n\t"
"add $128, %%rsp\n\t"
: "=a"(ret)
: "a"(args->nr), "b"(args->arg0), "c"(args->arg1), "d"(args->arg2), "S"(args->arg3),
"D"(args->arg4), "g"(args->arg5)
: "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15");
return ret;
}
#ifndef CR_NOGLIBC
#undef sys_mmap
#undef sys_munmap
#endif
#endif
| 2,597 | 29.209302 | 111 |
h
|
criu
|
criu-master/criu/arch/x86/include/asm/dump.h
|
#ifndef __CR_ASM_DUMP_H__
#define __CR_ASM_DUMP_H__
extern int save_task_regs(void *, user_regs_struct_t *, user_fpregs_struct_t *);
extern int arch_alloc_thread_info(CoreEntry *core);
extern void arch_free_thread_info(CoreEntry *core);
extern int get_task_futex_robust_list_compat(pid_t pid, ThreadCoreEntry *info);
static inline void core_put_tls(CoreEntry *core, tls_t tls)
{
ThreadInfoX86 *ti = core->thread_info;
int i;
for (i = 0; i < GDT_ENTRY_TLS_NUM; i++) {
user_desc_t *from = &tls.desc[i];
UserDescT *to = ti->tls[i];
#define COPY_TLS(field) to->field = from->field
COPY_TLS(entry_number);
COPY_TLS(base_addr);
COPY_TLS(limit);
COPY_TLS(seg_32bit);
to->contents_h = from->contents & 0x2;
to->contents_l = from->contents & 0x1;
COPY_TLS(read_exec_only);
COPY_TLS(limit_in_pages);
COPY_TLS(seg_not_present);
COPY_TLS(usable);
#undef COPY_TLS
}
}
#endif
| 895 | 25.352941 | 80 |
h
|
criu
|
criu-master/criu/arch/x86/include/asm/restore.h
|
#ifndef __CR_ASM_RESTORE_H__
#define __CR_ASM_RESTORE_H__
#include "asm/restorer.h"
#include "images/core.pb-c.h"
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
asm volatile( \
"movq %0, %%rbx \n" \
"movq %1, %%rax \n" \
"movq %2, %%rdi \n" \
"movq %%rbx, %%rsp \n" \
"callq *%%rax \n" \
: \
: "g"(new_sp), \
"g"(restore_task_exec_start), \
"g"(task_args) \
: "rdi", "rsi", "rbx", "rax", "memory")
/* clang-format on */
static inline void core_get_tls(CoreEntry *pcore, tls_t *ptls)
{
ThreadInfoX86 *ti = pcore->thread_info;
size_t i;
for (i = 0; i < GDT_ENTRY_TLS_NUM; i++) {
user_desc_t *to = &ptls->desc[i];
UserDescT *from;
/*
* If proto image has lesser TLS entries,
* mark them as not present (and thus skip restore).
*/
if (i >= ti->n_tls) {
to->seg_not_present = 1;
continue;
}
from = ti->tls[i];
#define COPY_TLS(field) to->field = from->field
COPY_TLS(entry_number);
COPY_TLS(base_addr);
COPY_TLS(limit);
COPY_TLS(seg_32bit);
to->contents = ((u32)from->contents_h << 1) | from->contents_l;
COPY_TLS(read_exec_only);
COPY_TLS(limit_in_pages);
COPY_TLS(seg_not_present);
COPY_TLS(usable);
#undef COPY_TLS
}
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core);
#endif
| 1,445 | 23.1 | 65 |
h
|
criu
|
criu-master/criu/arch/x86/include/asm/restorer.h
|
#ifndef __CR_ASM_RESTORER_H__
#define __CR_ASM_RESTORER_H__
#include "asm/types.h"
#include <compel/asm/fpu.h>
#include <compel/asm/infect-types.h>
#include "images/core.pb-c.h"
#include <compel/plugins/std/syscall-codes.h>
#include <compel/asm/sigframe.h>
#include "asm/compat.h"
#ifdef CONFIG_COMPAT
extern void restore_tls(tls_t *ptls);
extern int arch_compat_rt_sigaction(void *stack32, int sig, rt_sigaction_t_compat *act);
extern int set_compat_robust_list(uint32_t head_ptr, uint32_t len);
#else /* CONFIG_COMPAT */
static inline void restore_tls(tls_t *ptls)
{
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
return -1;
}
static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
return -1;
}
#endif /* !CONFIG_COMPAT */
/*
* Documentation copied from glibc sysdeps/unix/sysv/linux/x86_64/clone.S
* The kernel expects:
* rax: system call number
* rdi: flags
* rsi: child_stack
* rdx: TID field in parent
* r10: TID field in child
* r8: thread pointer
*
* int clone(unsigned long clone_flags, unsigned long newsp,
* int *parent_tidptr, int *child_tidptr,
* unsigned long tls);
*/
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
"clone_emul: \n" \
"movq %2, %%rsi \n" \
"subq $16, %%rsi \n" \
"movq %6, %%rdi \n" \
"movq %%rdi, 8(%%rsi) \n" \
"movq %5, %%rdi \n" \
"movq %%rdi, 0(%%rsi) \n" \
"movq %1, %%rdi \n" \
"movq %3, %%rdx \n" \
"movq %4, %%r10 \n" \
"movl $"__stringify(__NR_clone)", %%eax \n" \
"syscall \n" \
\
"testq %%rax,%%rax \n" \
"jz thread_run \n" \
\
"movq %%rax, %0 \n" \
"jmp clone_end \n" \
\
"thread_run: \n" \
"xorq %%rbp, %%rbp \n" \
"popq %%rax \n" \
"popq %%rdi \n" \
"callq *%%rax \n" \
\
"clone_end: \n" \
: "=r"(ret) \
: "g"(clone_flags), \
"g"(new_sp), \
"g"(&parent_tid), \
"g"(&thread_args[i].pid), \
"g"(clone_restore_fn), \
"g"(&thread_args[i]) \
: "rax", "rcx", "rdi", "rsi", "rdx", "r10", "r11", "memory")
/* int clone3(struct clone_args *args, size_t size) */
#define RUN_CLONE3_RESTORE_FN(ret, clone_args, size, args, \
clone_restore_fn) \
asm volatile( \
"clone3_emul: \n" \
/*
* Prepare stack pointer for child process. The kernel does
* stack + stack_size before passing the stack pointer to the
* child process. As we have to put the function and the
* arguments for the new process on that stack we have handle
* the kernel's implicit stack + stack_size.
*/ \
"movq (%3), %%rsi /* new stack pointer */ \n" \
/* Move the stack_size to %rax to use later as the offset */ \
"movq %4, %%rax \n" \
/* 16 bytes are needed on the stack for function and args */ \
"subq $16, (%%rsi, %%rax) \n" \
"movq %6, %%rdi /* thread args */ \n" \
"movq %%rdi, 8(%%rsi, %%rax) \n" \
"movq %5, %%rdi /* thread function */ \n" \
"movq %%rdi, 0(%%rsi, %%rax) \n" \
/*
* The stack address has been modified for the two
* elements above (child function, child arguments).
* This modified stack needs to be stored back into the
* clone_args structure.
*/ \
"movq (%%rsi), %3 \n" \
/*
* Do the actual clone3() syscall. First argument (%rdi) is
* the clone_args structure, second argument is the size
* of clone_args.
*/ \
"movq %1, %%rdi /* clone_args */ \n" \
"movq %2, %%rsi /* size */ \n" \
"movl $"__stringify(__NR_clone3)", %%eax \n" \
"syscall \n" \
/*
* If clone3() was successful and if we are in the child
* '0' is returned. Jump to the child function handler.
*/ \
"testq %%rax,%%rax \n" \
"jz thread3_run \n" \
/* Return the PID to the parent process. */ \
"movq %%rax, %0 \n" \
"jmp clone3_end \n" \
\
"thread3_run: /* Child process */ \n" \
/* Clear the frame pointer */ \
"xorq %%rbp, %%rbp \n" \
/* Pop the child function from the stack */ \
"popq %%rax \n" \
/* Pop the child function arguments from the stack */ \
"popq %%rdi \n" \
/* Run the child function */ \
"callq *%%rax \n" \
/*
* If the child function is expected to return, this
* would be the place to handle the return code. In CRIU's
* case the child function is expected to not return
* and do exit() itself.
*/ \
\
"clone3_end: \n" \
: "=r"(ret) \
/*
* This uses the "r" modifier for all parameters
* as clang complained if using "g".
*/ \
: "r"(&clone_args), \
"r"(size), \
"r"(&clone_args.stack), \
"r"(clone_args.stack_size), \
"r"(clone_restore_fn), \
"r"(args) \
: "rax", "rcx", "rdi", "rsi", "rdx", "r10", "r11", "memory")
#define ARCH_FAIL_CORE_RESTORE \
asm volatile( \
"movq %0, %%rsp \n" \
"movq 0, %%rax \n" \
"jmp *%%rax \n" \
: \
: "r"(ret) \
: "memory")
/* clang-format on */
static inline void __setup_sas_compat(struct ucontext_ia32 *uc, ThreadSasEntry *sas)
{
uc->uc_stack.ss_sp = (compat_uptr_t)(sas)->ss_sp;
uc->uc_stack.ss_flags = (int)(sas)->ss_flags;
uc->uc_stack.ss_size = (compat_size_t)(sas)->ss_size;
}
static inline void __setup_sas(struct rt_sigframe *sigframe, ThreadSasEntry *sas)
{
if (sigframe->is_native) {
struct rt_ucontext *uc = &sigframe->native.uc;
uc->uc_stack.ss_sp = (void *)decode_pointer((sas)->ss_sp);
uc->uc_stack.ss_flags = (int)(sas)->ss_flags;
uc->uc_stack.ss_size = (size_t)(sas)->ss_size;
} else {
__setup_sas_compat(&sigframe->compat.uc, sas);
}
}
static inline void _setup_sas(struct rt_sigframe *sigframe, ThreadSasEntry *sas)
{
if (sas)
__setup_sas(sigframe, sas);
}
#define setup_sas _setup_sas
int restore_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r);
int restore_nonsigframe_gpregs(UserX86RegsEntry *r);
int ptrace_set_breakpoint(pid_t pid, void *addr);
int ptrace_flush_breakpoints(pid_t pid);
extern int arch_map_vdso(unsigned long map_at, bool compatible);
#endif
| 6,534 | 30.570048 | 88 |
h
|
criu
|
criu-master/criu/arch/x86/include/asm/syscall32.h
|
#ifndef __CR_SYSCALL32_H__
#define __CR_SYSCALL32_H__
extern long sys_socket(int domain, int type, int protocol);
extern long sys_connect(int sockfd, struct sockaddr *addr, int addrlen);
extern long sys_sendto(int sockfd, void *buff, size_t len, unsigned int flags, struct sockaddr *addr, int addr_len);
extern long sys_recvfrom(int sockfd, void *ubuf, size_t size, unsigned int flags, struct sockaddr *addr, int *addr_len);
extern long sys_sendmsg(int sockfd, const struct msghdr *msg, int flags);
extern long sys_recvmsg(int sockfd, struct msghdr *msg, int flags);
extern long sys_shutdown(int sockfd, int how);
extern long sys_bind(int sockfd, const struct sockaddr *addr, int addrlen);
extern long sys_setsockopt(int sockfd, int level, int optname, const void *optval, unsigned int optlen);
extern long sys_getsockopt(int sockfd, int level, int optname, const void *optval, unsigned int *optlen);
extern long sys_shmat(int shmid, void *shmaddr, int shmflag);
extern long sys_pread(unsigned int fd, char *ubuf, u32 count, u64 pos);
#endif /* __CR_SYSCALL32_H__ */
| 1,069 | 58.444444 | 120 |
h
|
criu
|
criu-master/criu/arch/x86/include/asm/thread_pointer.h
|
/* __thread_pointer definition. x86 version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#ifndef _SYS_THREAD_POINTER_H
#define _SYS_THREAD_POINTER_H
static inline void *__criu_thread_pointer(void)
{
#if __GNUC_PREREQ(11, 1)
return __builtin_thread_pointer();
#else
void *__result;
#ifdef __x86_64__
__asm__("mov %%fs:0, %0" : "=r"(__result));
#else
__asm__("mov %%gs:0, %0" : "=r"(__result));
#endif
return __result;
#endif /* !GCC 11 */
}
#endif /* _SYS_THREAD_POINTER_H */
| 1,224 | 32.108108 | 71 |
h
|
criu
|
criu-master/criu/arch/x86/include/asm/vdso.h
|
#ifndef __CR_ASM_VDSO_H__
#define __CR_ASM_VDSO_H__
#include "asm/int.h"
#include "asm-generic/vdso.h"
/* This definition is used in pie/util-vdso.c to initialize the vdso symbol
* name string table 'vdso_symbols'
*/
/*
* This is a minimal amount of symbols
* we should support at the moment.
*/
#define VDSO_SYMBOL_MAX 6
#define VDSO_SYMBOL_GTOD 2
/*
* XXX: we don't patch __kernel_vsyscall as it's too small:
*
* byte *before* *after*
* 0x0 push %ecx mov $[rt-vdso],%eax
* 0x1 push %edx ^
* 0x2 push %ebp ^
* 0x3 mov %esp,%ebp ^
* 0x5 sysenter jmp *%eax
* 0x7 int $0x80 int3
* 0x9 pop %ebp int3
* 0xa pop %edx int3
* 0xb pop %ecx pop %ecx
* 0xc ret ret
*
* As restarting a syscall is quite likely after restore,
* the patched version quitly crashes.
* vsyscall will be patched again when addressing:
* https://github.com/checkpoint-restore/criu/issues/512
*/
#define ARCH_VDSO_SYMBOLS_LIST \
const char *aarch_vdso_symbol1 = "__vdso_clock_gettime"; \
const char *aarch_vdso_symbol2 = "__vdso_getcpu"; \
const char *aarch_vdso_symbol3 = "__vdso_gettimeofday"; \
const char *aarch_vdso_symbol4 = "__vdso_time"; \
const char *aarch_vdso_symbol5 = "__kernel_sigreturn"; \
const char *aarch_vdso_symbol6 = "__kernel_rt_sigreturn";
#define ARCH_VDSO_SYMBOLS \
aarch_vdso_symbol1, aarch_vdso_symbol2, aarch_vdso_symbol3, aarch_vdso_symbol4, aarch_vdso_symbol5, \
aarch_vdso_symbol6
/* "__kernel_vsyscall", */
#ifndef ARCH_MAP_VDSO_32
#define ARCH_MAP_VDSO_32 0x2002
#endif
#ifndef ARCH_MAP_VDSO_64
#define ARCH_MAP_VDSO_64 0x2003
#endif
#if defined(CONFIG_COMPAT) && !defined(__ASSEMBLY__)
struct vdso_symtable;
extern int vdso_fill_symtable(uintptr_t mem, size_t size, struct vdso_symtable *t);
extern int vdso_fill_symtable_compat(uintptr_t mem, size_t size, struct vdso_symtable *t);
static inline int __vdso_fill_symtable(uintptr_t mem, size_t size, struct vdso_symtable *t, bool compat_vdso)
{
if (compat_vdso)
return vdso_fill_symtable_compat(mem, size, t);
else
return vdso_fill_symtable(mem, size, t);
}
#endif
#endif /* __CR_ASM_VDSO_H__ */
| 2,297 | 29.236842 | 109 |
h
|
criu
|
criu-master/criu/include/aio.h
|
#ifndef __CR_AIO_H__
#define __CR_AIO_H__
#include "linux/aio_abi.h"
#include "images/mm.pb-c.h"
unsigned int aio_estimate_nr_reqs(unsigned int size);
int dump_aio_ring(MmEntry *mme, struct vma_area *vma);
void free_aios(MmEntry *mme);
struct parasite_ctl;
int parasite_collect_aios(struct parasite_ctl *, struct vm_area_list *);
unsigned long aio_rings_args_size(struct vm_area_list *);
struct task_restore_args;
int prepare_aios(struct pstree_item *t, struct task_restore_args *ta);
struct aio_ring {
unsigned id; /* kernel internal index number */
unsigned nr; /* number of io_events */
unsigned head; /* Written to by userland or under ring_lock
* mutex by aio_read_events_ring(). */
unsigned tail;
unsigned magic;
unsigned compat_features;
unsigned incompat_features;
unsigned header_length; /* size of aio_ring */
struct io_event io_events[0];
};
struct rst_aio_ring {
unsigned long addr;
unsigned long len;
unsigned int nr_req;
};
#endif /* __CR_AIO_H__ */
| 990 | 26.527778 | 72 |
h
|
criu
|
criu-master/criu/include/autofs.h
|
#ifndef __CR_AUTOFS_H__
#define __CR_AUTOFS_H__
#ifndef AUTOFS_MINOR
#define AUTOFS_MINOR 235
#endif
#include <stdbool.h>
bool is_autofs_pipe(unsigned long inode);
struct mount_info;
int autofs_parse(struct mount_info *pm);
int autofs_dump(struct mount_info *pm);
int autofs_mount(struct mount_info *mi, const char *source, const char *filesystemtype, unsigned long mountflags);
#include <linux/limits.h>
#include <linux/auto_fs.h>
#include <string.h>
#define AUTOFS_DEVICE_NAME "autofs"
#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1
#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0
#define AUTOFS_DEVID_LEN 16
#define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl)
/*
* An ioctl interface for autofs mount point control.
*/
struct args_protover {
__u32 version;
};
struct args_protosubver {
__u32 sub_version;
};
struct args_openmount {
__u32 devid;
};
struct args_ready {
__u32 token;
};
struct args_fail {
__u32 token;
__s32 status;
};
struct args_setpipefd {
__s32 pipefd;
};
struct args_timeout {
__u64 timeout;
};
struct args_requester {
__u32 uid;
__u32 gid;
};
struct args_expire {
__u32 how;
};
struct args_askumount {
__u32 may_umount;
};
struct args_ismountpoint {
union {
struct args_in {
__u32 type;
} in;
struct args_out {
__u32 devid;
__u32 magic;
} out;
};
};
/*
* All the ioctls use this structure.
* When sending a path size must account for the total length
* of the chunk of memory otherwise is is the size of the
* structure.
*/
struct autofs_dev_ioctl {
__u32 ver_major;
__u32 ver_minor;
__u32 size; /* total size of data passed in
* including this struct */
__s32 ioctlfd; /* automount command fd */
/* Command parameters */
union {
struct args_protover protover;
struct args_protosubver protosubver;
struct args_openmount openmount;
struct args_ready ready;
struct args_fail fail;
struct args_setpipefd setpipefd;
struct args_timeout timeout;
struct args_requester requester;
struct args_expire expire;
struct args_askumount askumount;
struct args_ismountpoint ismountpoint;
};
char path[0];
};
static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
{
memset(in, 0, sizeof(struct autofs_dev_ioctl));
in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
in->size = sizeof(struct autofs_dev_ioctl);
in->ioctlfd = -1;
return;
}
/*
* If you change this make sure you make the corresponding change
* to autofs-dev-ioctl.c:lookup_ioctl()
*/
enum {
/* Get various version info */
AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
AUTOFS_DEV_IOCTL_PROTOVER_CMD,
AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
/* Open mount ioctl fd */
AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
/* Close mount ioctl fd */
AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
/* Mount/expire status returns */
AUTOFS_DEV_IOCTL_READY_CMD,
AUTOFS_DEV_IOCTL_FAIL_CMD,
/* Activate/deactivate autofs mount */
AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
AUTOFS_DEV_IOCTL_CATATONIC_CMD,
/* Expiry timeout */
AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
/* Get mount last requesting uid and gid */
AUTOFS_DEV_IOCTL_REQUESTER_CMD,
/* Check for eligible expire candidates */
AUTOFS_DEV_IOCTL_EXPIRE_CMD,
/* Request busy status */
AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
/* Check if path is a mountpoint */
AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
};
#define AUTOFS_IOCTL 0x93
#define AUTOFS_DEV_IOCTL_VERSION _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_PROTOVER _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_PROTOSUBVER _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_OPENMOUNT _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_CLOSEMOUNT _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_READY _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_FAIL _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_SETPIPEFD _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_CATATONIC _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_TIMEOUT _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_REQUESTER _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_EXPIRE _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_ASKUMOUNT _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
#endif
| 4,934 | 23.310345 | 117 |
h
|
criu
|
criu-master/criu/include/bpfmap.h
|
#ifndef __CR_BPFMAP_H__
#define __CR_BPFMAP_H__
#include "files.h"
#include "bpfmap-file.pb-c.h"
#include "bpfmap-data.pb-c.h"
struct bpfmap_file_info {
BpfmapFileEntry *bpfe;
struct file_desc d;
};
struct bpfmap_data_rst {
BpfmapDataEntry *bde;
void *data;
struct bpfmap_data_rst *next;
};
#define BPFMAP_DATA_HASH_BITS 5
#define BPFMAP_DATA_TABLE_SIZE (1 << BPFMAP_DATA_HASH_BITS)
#define BPFMAP_DATA_HASH_MASK (BPFMAP_DATA_TABLE_SIZE - 1)
extern int is_bpfmap_link(char *link);
extern int dump_one_bpfmap_data(BpfmapFileEntry *bpf, int lfd, const struct fd_parms *p);
extern int do_collect_bpfmap_data(struct bpfmap_data_rst *, ProtobufCMessage *, struct cr_img *,
struct bpfmap_data_rst **);
extern int restore_bpfmap_data(int, uint32_t, struct bpfmap_data_rst **);
extern const struct fdtype_ops bpfmap_dump_ops;
extern struct collect_image_info bpfmap_cinfo;
extern struct collect_image_info bpfmap_data_cinfo;
#endif /* __CR_BPFMAP_H__ */
| 966 | 27.441176 | 96 |
h
|
criu
|
criu-master/criu/include/cgroup-props.h
|
#ifndef __CR_CGROUP_PROPS_H__
#define __CR_CGROUP_PROPS_H__
#include <stdbool.h>
typedef struct {
const char *name;
size_t nr_props;
const char **props;
} cgp_t;
extern cgp_t cgp_global;
extern cgp_t cgp_global_v2;
extern const cgp_t *cgp_get_props(const char *name);
extern bool cgp_should_skip_controller(const char *name);
extern bool cgp_add_dump_controller(const char *name);
extern int cgp_init(char *stream, size_t len, char *path);
extern void cgp_fini(void);
#endif /* __CR_CGROUP_PROPS_H__ */
| 511 | 22.272727 | 58 |
h
|
criu
|
criu-master/criu/include/cgroup.h
|
#ifndef __CR_CGROUP_H__
#define __CR_CGROUP_H__
#include "int.h"
#include "images/core.pb-c.h"
struct pstree_item;
struct parasite_dump_cgroup_args;
extern u32 root_cg_set;
int dump_thread_cgroup(const struct pstree_item *, u32 *, struct parasite_dump_cgroup_args *args, int id);
int dump_cgroups(void);
int prepare_task_cgroup(struct pstree_item *);
int prepare_cgroup(void);
/* Restore things like cpu_limit in known cgroups. */
int prepare_cgroup_properties(void);
int restore_freezer_state(void);
void fini_cgroup(void);
struct cg_controller;
struct cgroup_prop {
char *name;
char *value;
mode_t mode;
uid_t uid;
gid_t gid;
struct list_head list;
};
/* This describes a particular cgroup path, e.g. the '/lxc/u1' part of
* 'blkio/lxc/u1' and any properties it has.
*/
struct cgroup_dir {
char *path;
mode_t mode;
uid_t uid;
gid_t gid;
struct list_head properties;
unsigned int n_properties;
/* this is how children are linked together */
struct list_head siblings;
/* more cgroup_dirs */
struct list_head children;
unsigned int n_children;
};
/* This describes a particular cgroup controller, e.g. blkio or cpuset.
* The heads are subdirectories organized in their tree format.
*/
struct cg_controller {
unsigned int n_controllers;
char **controllers;
/* cgroup_dirs */
struct list_head heads;
unsigned int n_heads;
/* for cgroup list in cgroup.c */
struct list_head l;
/* controller is a threaded cgroup or not */
int is_threaded;
};
struct cg_controller *new_controller(const char *name);
/* parse all global cgroup information into structures */
int parse_cg_info(void);
int new_cg_root_add(char *controller, char *newroot);
extern struct ns_desc cgroup_ns_desc;
/*
* This struct describes a group controlled by one controller.
* The @name is the controller name or 'name=...' for named cgroups.
* The @path is the path from the hierarchy root.
*/
struct cg_ctl {
struct list_head l;
char *name;
char *path;
u32 cgns_prefix;
};
/*
* Returns the list of cg_ctl-s sorted by name
*/
struct list_head;
struct parasite_dump_cgroup_args;
extern int parse_thread_cgroup(int pid, int tid, struct parasite_dump_cgroup_args *args, struct list_head *l,
unsigned int *n);
extern void put_ctls(struct list_head *);
int collect_controllers(struct list_head *cgroups, unsigned int *n_cgroups);
int stop_cgroupd(void);
#endif /* __CR_CGROUP_H__ */
| 2,409 | 22.627451 | 109 |
h
|
criu
|
criu-master/criu/include/cr_options.h
|
#ifndef __CR_OPTIONS_H__
#define __CR_OPTIONS_H__
#include <stdbool.h>
#include <sys/capability.h>
#include "common/config.h"
#include "common/list.h"
#include "int.h"
#include "image.h"
/* Configuration and CLI parsing order defines */
#define PARSING_GLOBAL_CONF 1
#define PARSING_USER_CONF 2
#define PARSING_ENV_CONF 3
#define PARSING_CMDLINE_CONF 4
#define PARSING_ARGV 5
#define PARSING_RPC_CONF 6
#define PARSING_LAST 7
#define SET_CHAR_OPTS(__dest, __src) \
do { \
char *__src_dup = xstrdup(__src); \
if (!__src_dup) \
abort(); \
xfree(opts.__dest); \
opts.__dest = __src_dup; \
} while (0)
/*
* CPU capability options.
*/
#define CPU_CAP_NONE (0u << 0) /* Don't check capability at all */
#define CPU_CAP_FPU (1u << 0) /* Only FPU capability required */
#define CPU_CAP_CPU (1u << 1) /* Strict CPU capability required */
#define CPU_CAP_INS (1u << 2) /* Instructions CPU capability */
#define CPU_CAP_IMAGE (1u << 3) /* Write capability on dump and read on restore*/
#define CPU_CAP_ALL (CPU_CAP_FPU | CPU_CAP_CPU | CPU_CAP_INS)
#define CPU_CAP_DEFAULT (CPU_CAP_FPU | CPU_CAP_INS)
struct cg_root_opt {
struct list_head node;
char *controller;
char *newroot;
};
/*
* Pre-dump variants
*/
#define PRE_DUMP_SPLICE 1 /* Pre-dump using parasite */
#define PRE_DUMP_READ 2 /* Pre-dump using process_vm_readv syscall */
/*
* Cgroup management options.
*/
#define CG_MODE_IGNORE (0u << 0) /* Zero is important here */
#define CG_MODE_NONE (1u << 0)
#define CG_MODE_PROPS (1u << 1)
#define CG_MODE_SOFT (1u << 2)
#define CG_MODE_FULL (1u << 3)
#define CG_MODE_STRICT (1u << 4)
#define CG_MODE_DEFAULT (CG_MODE_SOFT)
/*
* Network locking method
*/
enum NETWORK_LOCK_METHOD {
NETWORK_LOCK_IPTABLES,
NETWORK_LOCK_NFTABLES,
};
#define NETWORK_LOCK_DEFAULT NETWORK_LOCK_IPTABLES
/*
* Ghost file size we allow to carry by default.
*/
#define DEFAULT_GHOST_LIMIT (1 << 20)
#define DEFAULT_TIMEOUT 10
enum FILE_VALIDATION_OPTIONS {
/*
* This constant indicates that the file validation should be tried with the
* file size method by default.
*/
FILE_VALIDATION_FILE_SIZE,
/*
* This constant indicates that the file validation should be tried with the
* build-ID method by default.
*/
FILE_VALIDATION_BUILD_ID
};
/* This constant dictates which file validation method should be tried by default. */
#define FILE_VALIDATION_DEFAULT FILE_VALIDATION_BUILD_ID
/* This constant dictates that criu use fiemap to copy ghost file by default.*/
#define FIEMAP_DEFAULT 1
struct irmap;
struct irmap_path_opt {
struct list_head node;
struct irmap *ir;
};
enum criu_mode {
CR_UNSET = 0,
CR_DUMP,
CR_PRE_DUMP,
CR_RESTORE,
CR_LAZY_PAGES,
CR_CHECK,
CR_PAGE_SERVER,
CR_SERVICE,
CR_SWRK,
CR_DEDUP,
CR_CPUINFO,
CR_EXEC_DEPRECATED,
CR_SHOW_DEPRECATED,
};
struct cr_options {
int final_state;
int check_extra_features;
int check_experimental_features;
union {
int restore_detach;
bool daemon_mode;
};
int restore_sibling;
bool ext_unix_sk;
int shell_job;
int handle_file_locks;
int tcp_established_ok;
int tcp_close;
int evasive_devices;
int link_remap_ok;
int log_file_per_pid;
int pre_dump_mode;
bool swrk_restore;
char *output;
char *root;
char *pidfile;
char *freeze_cgroup;
struct list_head ext_mounts;
struct list_head inherit_fds;
struct list_head external;
struct list_head join_ns;
char *libdir;
int use_page_server;
unsigned short port;
char *addr;
int ps_socket;
int track_mem;
char *img_parent;
int auto_dedup;
unsigned int cpu_cap;
int force_irmap;
char **exec_cmd;
unsigned int manage_cgroups;
char *new_global_cg_root;
char *cgroup_props;
char *cgroup_props_file;
struct list_head new_cgroup_roots;
char *cgroup_yard;
bool autodetect_ext_mounts;
int enable_external_sharing;
int enable_external_masters;
bool aufs; /* auto-detected, not via cli */
bool overlayfs;
int ghost_fiemap;
#ifdef CONFIG_BINFMT_MISC_VIRTUALIZED
bool has_binfmt_misc; /* auto-detected */
#endif
size_t ghost_limit;
struct list_head irmap_scan_paths;
bool lsm_supplied;
char *lsm_profile;
char *lsm_mount_context;
unsigned int timeout;
unsigned int empty_ns;
int tcp_skip_in_flight;
bool lazy_pages;
char *work_dir;
int network_lock_method;
int skip_file_rwx_check;
/*
* When we scheduler for removal some functionality we first
* deprecate it and it sits in criu for some time. By default
* the deprecated stuff is not working, but it's still possible
* to turn one ON while the code is in.
*/
int deprecated_ok;
int display_stats;
int weak_sysctls;
int status_fd;
bool orphan_pts_master;
int stream;
pid_t tree_id;
int log_level;
char *imgs_dir;
char *tls_cacert;
char *tls_cacrl;
char *tls_cert;
char *tls_key;
int tls;
int tls_no_cn_verify;
/* This stores which method to use for file validation. */
int file_validation_method;
/* Shows the mode criu is running at the moment: dump/pre-dump/restore/... */
enum criu_mode mode;
int mntns_compat_mode;
/* Remember the program name passed to main() so we can use it in
* error messages elsewhere.
*/
char *argv_0;
/*
* This contains the eUID of the current CRIU user. It
* will only be set to a non-zero value if CRIU has
* the necessary capabilities to run as non root.
* CAP_CHECKPOINT_RESTORE or CAP_SYS_ADMIN
*/
uid_t uid;
/* This contains the value from capget()->effective */
u32 cap_eff[_LINUX_CAPABILITY_U32S_3];
/*
* If CRIU should be running as non-root with the help of
* CAP_CHECKPOINT_RESTORE or CAP_SYS_ADMIN the user should
* explicitly request it as it comes with many limitations.
*/
int unprivileged;
};
extern struct cr_options opts;
extern char *rpc_cfg_file;
extern int parse_options(int argc, char **argv, bool *usage_error, bool *has_exec_cmd, int state);
extern int check_options(void);
extern void init_opts(void);
#endif /* __CR_OPTIONS_H__ */
| 6,027 | 23.208835 | 98 |
h
|
criu
|
criu-master/criu/include/criu-log.h
|
/*
This file defines types and macros for CRIU plugins.
Copyright (C) 2013 Parallels, Inc
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __CRIU_LOG_H__
#define __CRIU_LOG_H__
#include "log.h"
#include <sys/types.h>
extern int log_init(const char *output);
extern void log_fini(void);
extern int log_init_by_pid(pid_t pid);
extern void log_closedir(void);
extern int log_keep_err(void);
extern char *log_first_err(void);
extern void log_set_fd(int fd);
extern int log_get_fd(void);
extern void log_set_loglevel(unsigned int loglevel);
extern unsigned int log_get_loglevel(void);
struct timeval;
extern void log_get_logstart(struct timeval *);
extern int write_pidfile(int pid);
#define DEFAULT_LOG_FILENAME "criu.log"
static inline int pr_quelled(unsigned int loglevel)
{
return log_get_loglevel() < loglevel && loglevel != LOG_MSG;
}
#endif /* __CR_LOG_LEVELS_H__ */
| 1,571 | 30.44 | 78 |
h
|
criu
|
criu-master/criu/include/criu-plugin.h
|
/*
* This file defines types and macros for CRIU plugins.
* Copyright (C) 2013-2014 Parallels, Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __CRIU_PLUGIN_H__
#define __CRIU_PLUGIN_H__
#include <limits.h>
#include <stdbool.h>
#include <stdint.h>
#include <sys/stat.h>
#define CRIU_PLUGIN_GEN_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
#define CRIU_PLUGIN_VERSION_MAJOR 0
#define CRIU_PLUGIN_VERSION_MINOR 2
#define CRIU_PLUGIN_VERSION_SUBLEVEL 0
#define CRIU_PLUGIN_VERSION_OLD CRIU_PLUGIN_GEN_VERSION(0, 1, 0)
#define CRIU_PLUGIN_VERSION \
CRIU_PLUGIN_GEN_VERSION(CRIU_PLUGIN_VERSION_MAJOR, CRIU_PLUGIN_VERSION_MINOR, CRIU_PLUGIN_VERSION_SUBLEVEL)
/*
* Plugin hook points and their arguments in hooks.
*/
enum {
CR_PLUGIN_HOOK__DUMP_UNIX_SK = 0,
CR_PLUGIN_HOOK__RESTORE_UNIX_SK = 1,
CR_PLUGIN_HOOK__DUMP_EXT_FILE = 2,
CR_PLUGIN_HOOK__RESTORE_EXT_FILE = 3,
CR_PLUGIN_HOOK__DUMP_EXT_MOUNT = 4,
CR_PLUGIN_HOOK__RESTORE_EXT_MOUNT = 5,
CR_PLUGIN_HOOK__DUMP_EXT_LINK = 6,
CR_PLUGIN_HOOK__HANDLE_DEVICE_VMA = 7,
CR_PLUGIN_HOOK__UPDATE_VMA_MAP = 8,
CR_PLUGIN_HOOK__RESUME_DEVICES_LATE = 9,
CR_PLUGIN_HOOK__MAX
};
#define DECLARE_PLUGIN_HOOK_ARGS(__hook, ...) typedef int(__hook##_t)(__VA_ARGS__)
DECLARE_PLUGIN_HOOK_ARGS(CR_PLUGIN_HOOK__DUMP_UNIX_SK, int fd, int id);
DECLARE_PLUGIN_HOOK_ARGS(CR_PLUGIN_HOOK__RESTORE_UNIX_SK, int id);
DECLARE_PLUGIN_HOOK_ARGS(CR_PLUGIN_HOOK__DUMP_EXT_FILE, int fd, int id);
DECLARE_PLUGIN_HOOK_ARGS(CR_PLUGIN_HOOK__RESTORE_EXT_FILE, int id);
DECLARE_PLUGIN_HOOK_ARGS(CR_PLUGIN_HOOK__DUMP_EXT_MOUNT, char *mountpoint, int id);
DECLARE_PLUGIN_HOOK_ARGS(CR_PLUGIN_HOOK__RESTORE_EXT_MOUNT, int id, char *mountpoint, char *old_root, int *is_file);
DECLARE_PLUGIN_HOOK_ARGS(CR_PLUGIN_HOOK__DUMP_EXT_LINK, int index, int type, char *kind);
DECLARE_PLUGIN_HOOK_ARGS(CR_PLUGIN_HOOK__HANDLE_DEVICE_VMA, int fd, const struct stat *stat);
DECLARE_PLUGIN_HOOK_ARGS(CR_PLUGIN_HOOK__UPDATE_VMA_MAP, const char *path, const uint64_t addr,
const uint64_t old_pgoff, uint64_t *new_pgoff, int *plugin_fd);
DECLARE_PLUGIN_HOOK_ARGS(CR_PLUGIN_HOOK__RESUME_DEVICES_LATE, int pid);
enum {
CR_PLUGIN_STAGE__DUMP,
CR_PLUGIN_STAGE__PRE_DUMP,
CR_PLUGIN_STAGE__RESTORE,
CR_PLUGIN_STAGE_MAX
};
/*
* Plugin descriptor.
*/
typedef struct {
const char *name;
int (*init)(int stage);
void (*exit)(int stage, int ret);
unsigned int version;
unsigned int max_hooks;
void *hooks[CR_PLUGIN_HOOK__MAX];
} cr_plugin_desc_t;
extern cr_plugin_desc_t CR_PLUGIN_DESC;
#define CR_PLUGIN_REGISTER(___name, ___init, ___exit) \
cr_plugin_desc_t CR_PLUGIN_DESC = { \
.name = ___name, \
.init = ___init, \
.exit = ___exit, \
.version = CRIU_PLUGIN_VERSION, \
.max_hooks = CR_PLUGIN_HOOK__MAX, \
};
static inline int cr_plugin_dummy_init(int stage)
{
return 0;
}
static inline void cr_plugin_dummy_exit(int stage, int ret)
{
}
#define CR_PLUGIN_REGISTER_DUMMY(___name) \
cr_plugin_desc_t CR_PLUGIN_DESC = { \
.name = ___name, \
.init = cr_plugin_dummy_init, \
.exit = cr_plugin_dummy_exit, \
.version = CRIU_PLUGIN_VERSION, \
.max_hooks = CR_PLUGIN_HOOK__MAX, \
};
#define CR_PLUGIN_REGISTER_HOOK(__hook, __func) \
static void __attribute__((constructor)) cr_plugin_register_hook_##__func(void) \
{ \
CR_PLUGIN_DESC.hooks[__hook] = (void *)__func; \
}
/* Public API */
extern int criu_get_image_dir(void);
/*
* Deprecated, will be removed in next version.
*/
typedef int(cr_plugin_init_t)(void);
typedef void(cr_plugin_fini_t)(void);
typedef int(cr_plugin_dump_unix_sk_t)(int fd, int id);
typedef int(cr_plugin_restore_unix_sk_t)(int id);
typedef int(cr_plugin_dump_file_t)(int fd, int id);
typedef int(cr_plugin_restore_file_t)(int id);
typedef int(cr_plugin_dump_ext_mount_t)(char *mountpoint, int id);
typedef int(cr_plugin_restore_ext_mount_t)(int id, char *mountpoint, char *old_root, int *is_file);
typedef int(cr_plugin_dump_ext_link_t)(int index, int type, char *kind);
typedef int(cr_plugin_handle_device_vma_t)(int fd, const struct stat *stat);
typedef int(cr_plugin_update_vma_map_t)(const char *path, const uint64_t addr, const uint64_t old_pgoff,
uint64_t *new_pgoff, int *plugin_fd);
typedef int(cr_plugin_resume_devices_late_t)(int pid);
#endif /* __CRIU_PLUGIN_H__ */
| 5,233 | 33.662252 | 116 |
h
|
criu
|
criu-master/criu/include/crtools.h
|
#ifndef __CR_CRTOOLS_H__
#define __CR_CRTOOLS_H__
#include <sys/types.h>
#include "common/list.h"
#include "servicefd.h"
#include "images/inventory.pb-c.h"
#define CR_FD_PERM (S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)
extern int check_img_inventory(bool restore);
extern int write_img_inventory(InventoryEntry *he);
extern int inventory_save_uptime(InventoryEntry *he);
extern InventoryEntry *get_parent_inventory(void);
extern int prepare_inventory(InventoryEntry *he);
struct pprep_head {
int (*actor)(struct pprep_head *);
struct pprep_head *next;
};
extern void add_post_prepare_cb(struct pprep_head *);
extern bool deprecated_ok(char *what);
extern int cr_dump_tasks(pid_t pid);
extern int cr_pre_dump_tasks(pid_t pid);
extern int cr_restore_tasks(void);
extern int convert_to_elf(char *elf_path, int fd_core);
extern int cr_check(void);
extern int check_caps(void);
extern int cr_dedup(void);
extern int cr_lazy_pages(bool daemon);
extern int check_add_feature(char *arg);
extern void pr_check_features(const char *offset, const char *sep, int width);
#define PPREP_HEAD_INACTIVE ((struct pprep_head *)-1)
#define add_post_prepare_cb_once(phead) \
do { \
if ((phead)->next == PPREP_HEAD_INACTIVE) \
add_post_prepare_cb(phead); \
} while (0)
#define MAKE_PPREP_HEAD(name) \
struct pprep_head name = { \
.next = PPREP_HEAD_INACTIVE, \
.actor = name##_cb, \
}
#endif /* __CR_CRTOOLS_H__ */
| 1,518 | 28.784314 | 78 |
h
|
criu
|
criu-master/criu/include/file-lock.h
|
#ifndef __FILE_LOCK_H__
#define __FILE_LOCK_H__
#include "common/list.h"
#include "protobuf.h"
#include "images/file-lock.pb-c.h"
#define FL_UNKNOWN -1
#define FL_POSIX 1
#define FL_FLOCK 2
#define FL_OFD 4
#define FL_LEASE 8
/* for posix fcntl() and lockf() */
#ifndef F_RDLCK
#define F_RDLCK 0
#define F_WRLCK 1
#define F_UNLCK 2
#endif
/* for OFD locks fcntl() */
#ifndef F_OFD_GETLK
#define F_OFD_GETLK 36
#define F_OFD_SETLK 37
#define F_OFD_SETLKW 38
#endif
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
#define LOCK_NB \
4 /* or'd with one of the above to prevent
blocking */
#define LOCK_UN 8 /* remove lock */
#define LOCK_MAND 32 /* This is a mandatory flock ... */
#define LOCK_READ 64 /* which allows concurrent read operations */
#define LOCK_WRITE 128 /* which allows concurrent write operations */
#define LOCK_RW 192 /* which allows concurrent read & write ops */
/* for leases */
#define LEASE_BREAKING 4
struct file_lock {
long long fl_id;
int fl_kind;
int fl_ltype;
pid_t fl_owner; /* process, which created the lock */
pid_t fl_holder; /* pid of fd on whose the lock is found */
int maj, min;
unsigned long i_no;
long long start;
char end[32];
struct list_head list; /* list of all file locks */
int real_owner;
int owners_fd;
};
extern struct list_head file_lock_list;
extern struct file_lock *alloc_file_lock(void);
extern void free_file_locks(void);
extern int prepare_file_locks(int pid);
extern struct collect_image_info file_locks_cinfo;
struct pid;
struct fd_parms;
extern void discard_dup_locks_tail(pid_t pid, int fd);
extern int correct_file_leases_type(struct pid *, int fd, int lfd);
extern int note_file_lock(struct pid *, int fd, int lfd, struct fd_parms *);
extern int dump_file_locks(void);
#define OPT_FILE_LOCKS "file-locks"
#endif /* __FILE_LOCK_H__ */
| 1,953 | 23.123457 | 76 |
h
|
criu
|
criu-master/criu/include/files-reg.h
|
#ifndef __CR_FILES_REG_H__
#define __CR_FILES_REG_H__
#include "files.h"
#include "util.h"
#include "images/regfile.pb-c.h"
#include "images/ghost-file.pb-c.h"
struct cr_imgset;
struct fd_parms;
struct file_remap {
char *rpath;
bool is_dir;
int rmnt_id;
uid_t uid;
gid_t gid;
};
struct reg_file_info {
struct file_desc d;
RegFileEntry *rfe;
struct file_remap *remap;
bool size_mode_checked;
bool is_dir;
char *path;
};
extern int open_reg_by_id(u32 id);
extern int open_reg_fd(struct file_desc *);
extern int open_path(struct file_desc *, int (*open_cb)(int ns_root_fd, struct reg_file_info *, void *), void *arg);
extern const struct fdtype_ops regfile_dump_ops;
extern int do_open_reg_noseek_flags(int ns_root_fd, struct reg_file_info *rfi, void *arg);
extern int dump_one_reg_file(int lfd, u32 id, const struct fd_parms *p);
extern struct file_remap *lookup_ghost_remap(u32 dev, u32 ino);
extern struct file_desc *try_collect_special_file(u32 id, int optional);
#define collect_special_file(id) try_collect_special_file(id, 0)
extern int collect_filemap(struct vma_area *);
extern void filemap_ctx_init(bool auto_close);
extern void filemap_ctx_fini(void);
extern struct collect_image_info reg_file_cinfo;
extern int collect_remaps_and_regfiles(void);
extern void delete_link_remaps(void);
extern void free_link_remaps(void);
extern int prepare_remaps(void);
extern int try_clean_remaps(bool only_ghosts);
static inline int link_strip_deleted(struct fd_link *link)
{
return strip_deleted(link->name, link->len);
}
extern int dead_pid_conflict(void);
extern int rm_parent_dirs(int mntns_root, char *path, int count);
extern int make_parent_dirs_if_need(int mntns_root, char *path);
#endif /* __CR_FILES_REG_H__ */
| 1,743 | 25.830769 | 116 |
h
|
criu
|
criu-master/criu/include/files.h
|
#ifndef __CR_FILES_H__
#define __CR_FILES_H__
#include <sys/stat.h>
#include "int.h"
#include "common/compiler.h"
#include "fcntl.h"
#include "common/lock.h"
#include "common/list.h"
#include "pid.h"
#include "rst_info.h"
#include "images/fdinfo.pb-c.h"
#include "images/fown.pb-c.h"
#include "images/vma.pb-c.h"
struct parasite_drain_fd;
struct pstree_item;
struct file_desc;
struct cr_imgset;
struct rst_info;
struct parasite_ctl;
struct fd_link {
union {
/* Link info for generic file (path) */
struct {
char name[PATH_MAX];
size_t len;
};
/* Link info for proc-ns file */
struct {
struct ns_desc *ns_d;
unsigned int ns_kid;
};
};
};
struct fd_parms {
int fd;
off_t pos;
unsigned int flags;
char fd_flags;
struct stat stat;
pid_t pid;
FownEntry fown;
struct fd_link *link;
long fs_type;
int mnt_id;
struct parasite_ctl *fd_ctl;
struct parasite_drain_fd *dfds;
};
#define FD_PARMS_INIT \
(struct fd_parms) \
{ \
.fd = FD_DESC_INVALID, .fown = FOWN_ENTRY__INIT, .link = NULL, .mnt_id = -1, \
}
extern int fill_fdlink(int lfd, const struct fd_parms *p, struct fd_link *link);
extern uint32_t make_gen_id(uint32_t st_dev, uint32_t st_ino, uint64_t pos);
struct file_desc;
enum {
FLE_INITIALIZED,
/*
* FLE is open (via open() or socket() or etc syscalls), and
* common file setting are set up (type-specific are not yet).
* Most possible, the master was already served out.
*/
FLE_OPEN,
/*
* File-type specific settings and preparations are finished,
* and FLE is completely restored.
*/
FLE_RESTORED,
};
struct fdinfo_list_entry {
struct list_head desc_list; /* To chain on @fd_info_head */
struct file_desc *desc; /* Associated file descriptor */
struct list_head ps_list; /* To chain per-task files */
struct pstree_item *task;
FdinfoEntry *fe;
int pid;
u8 received : 1;
u8 stage : 3;
u8 fake : 1;
};
extern int inh_fd_max;
/* reports whether fd_a takes prio over fd_b */
static inline int fdinfo_rst_prio(struct fdinfo_list_entry *fd_a, struct fdinfo_list_entry *fd_b)
{
return pid_rst_prio(fd_a->pid, fd_b->pid) || ((fd_a->pid == fd_b->pid) && (fd_a->fe->fd < fd_b->fe->fd));
}
struct file_desc_ops {
/* fd_types from images/fdinfo.proto */
unsigned int type;
/*
* Opens a file by whatever syscall is required for that.
* The returned descriptor may be closed (dup2-ed to another)
* so it shouldn't be saved for any post-actions.
*/
int (*open)(struct file_desc *d, int *new_fd);
char *(*name)(struct file_desc *, char *b, size_t s);
};
int collect_fd(int pid, FdinfoEntry *e, struct rst_info *rst_info, bool ghost);
struct fdinfo_list_entry *collect_fd_to(int pid, FdinfoEntry *e, struct rst_info *rst_info, struct file_desc *fdesc,
bool fake, bool force_master);
u32 find_unused_file_desc_id(void);
unsigned int find_unused_fd(struct pstree_item *, int hint_fd);
struct fdinfo_list_entry *find_used_fd(struct pstree_item *, int fd);
struct file_desc {
u32 id; /* File id, unique */
struct hlist_node hash; /* Descriptor hashing and lookup */
struct list_head fd_info_head; /* Chain of fdinfo_list_entry-s with same ID and type but different pids */
struct file_desc_ops *ops; /* Associated operations */
struct list_head fake_master_list; /* To chain in the list of file_desc, which don't
* have a fle in a task, that having permissions */
};
struct fdtype_ops {
unsigned int type;
int (*dump)(int lfd, u32 id, const struct fd_parms *p);
int (*pre_dump)(int pid, int lfd);
};
struct cr_img;
extern int dump_my_file(int lfd, u32 *, int *type);
extern int do_dump_gen_file(struct fd_parms *p, int lfd, const struct fdtype_ops *ops, FdinfoEntry *e);
struct parasite_drain_fd;
int dump_task_files_seized(struct parasite_ctl *ctl, struct pstree_item *item, struct parasite_drain_fd *dfds);
int predump_task_files(int pid);
extern void file_desc_init(struct file_desc *d, u32 id, struct file_desc_ops *ops);
extern int file_desc_add(struct file_desc *d, u32 id, struct file_desc_ops *ops);
extern struct fdinfo_list_entry *try_file_master(struct file_desc *d);
extern struct fdinfo_list_entry *file_master(struct file_desc *d);
extern struct file_desc *find_file_desc_raw(int type, u32 id);
extern int setup_and_serve_out(struct fdinfo_list_entry *fle, int new_fd);
extern int recv_desc_from_peer(struct file_desc *d, int *fd);
extern int send_desc_to_peer(int fd, struct file_desc *d);
extern int restore_fown(int fd, FownEntry *fown);
extern int rst_file_params(int fd, FownEntry *fown, int flags);
extern void show_saved_files(void);
extern int prepare_fds(struct pstree_item *me);
extern int prepare_fd_pid(struct pstree_item *me);
extern int prepare_files(void);
extern int restore_fs(struct pstree_item *);
extern int prepare_fs_pid(struct pstree_item *);
extern int set_fd_flags(int fd, int flags);
extern struct collect_image_info files_cinfo;
#define files_collected() (files_cinfo.flags & COLLECT_HAPPENED)
extern int close_old_fds(void);
#ifndef AT_EMPTY_PATH
#define AT_EMPTY_PATH 0x1000
#endif
#define LREMAP_PARAM "link-remap"
extern int shared_fdt_prepare(struct pstree_item *item);
extern struct collect_image_info ext_file_cinfo;
extern int dump_unsupp_fd(struct fd_parms *p, int lfd, char *more, char *info, FdinfoEntry *);
extern int inherit_fd_parse(char *optarg);
extern int inherit_fd_add(int fd, char *key);
extern void inherit_fd_log(void);
extern int inherit_fd_move_to_fdstore(void);
extern int inherit_fd_lookup_id(char *id);
extern bool inherited_fd(struct file_desc *, int *fdp);
extern FdinfoEntry *dup_fdinfo(FdinfoEntry *old, int fd, unsigned flags);
int dup_fle(struct pstree_item *task, struct fdinfo_list_entry *ple, int fd, unsigned flags);
extern int open_transport_socket(void);
extern int set_fds_event(pid_t virt);
extern void wait_fds_event(void);
int find_unused_fd_pid(pid_t pid);
#endif /* __CR_FILES_H__ */
| 6,140 | 29.705 | 116 |
h
|
criu
|
criu-master/criu/include/filesystems.h
|
#ifndef __CR_FILESYSTEMS_H__
#define __CR_FILESYSTEMS_H__
extern struct fstype *find_fstype_by_name(char *fst);
extern struct fstype *decode_fstype(u32 fst);
extern bool add_fsname_auto(const char *names);
struct mount_info;
typedef int (*mount_fn_t)(struct mount_info *mi, const char *src, const char *fstype, unsigned long mountflags);
struct fstype {
char *name;
int code;
int (*dump)(struct mount_info *pm);
int (*restore)(struct mount_info *pm);
int (*check_bindmount)(struct mount_info *pm);
int (*parse)(struct mount_info *pm);
int (*collect)(struct mount_info *pm);
bool (*sb_equal)(struct mount_info *a, struct mount_info *b);
mount_fn_t mount;
};
extern struct fstype *fstype_auto(void);
/* callback for AUFS support */
extern int aufs_parse(struct mount_info *mi);
/* callback for OverlayFS support */
extern int overlayfs_parse(struct mount_info *mi);
/* FIXME -- remove */
extern struct list_head binfmt_misc_list;
#endif
| 950 | 27.818182 | 112 |
h
|
criu
|
criu-master/criu/include/fsnotify.h
|
#ifndef __CR_FSNOTIFY_H__
#define __CR_FSNOTIFY_H__
#include "files.h"
#include "protobuf.h"
#include "images/fsnotify.pb-c.h"
#define KERNEL_FS_EVENT_ON_CHILD 0x08000000
#ifndef INOTIFY_IOC_SETNEXTWD
#define INOTIFY_IOC_SETNEXTWD _IOW('I', 0, __s32)
#endif
extern int is_inotify_link(char *link);
extern int is_fanotify_link(char *link);
extern const struct fdtype_ops inotify_dump_ops;
extern const struct fdtype_ops fanotify_dump_ops;
extern struct collect_image_info inotify_cinfo;
extern struct collect_image_info inotify_mark_cinfo;
extern struct collect_image_info fanotify_cinfo;
extern struct collect_image_info fanotify_mark_cinfo;
#endif /* __CR_FSNOTIFY_H__ */
| 679 | 26.2 | 53 |
h
|
criu
|
criu-master/criu/include/hugetlb.h
|
#ifndef __CR_HUGETLB_H_
#define __CR_HUGETLB_H_
#include <sys/types.h>
#include <stddef.h>
#include "vma.h"
#define ANON_HUGEPAGE_PREFIX "/anon_hugepage"
#define ANON_HUGEPAGE_PREFIX_LEN (sizeof(ANON_HUGEPAGE_PREFIX) - 1)
enum hugepage_size {
HUGETLB_16KB,
HUGETLB_64KB,
HUGETLB_512KB,
HUGETLB_1MB,
HUGETLB_2MB,
HUGETLB_8MB,
HUGETLB_16MB,
HUGETLB_32MB,
HUGETLB_256MB,
HUGETLB_512MB,
HUGETLB_1GB,
HUGETLB_2GB,
HUGETLB_16GB,
HUGETLB_MAX
};
#define MAP_HUGETLB_SHIFT 26
#define MAP_HUGETLB_SIZE_MASK (0x3f << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_16KB (14 << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_64KB (16 << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_512KB (19 << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_1MB (20 << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_2MB (21 << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_8MB (23 << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_16MB (24 << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_32MB (25 << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_256MB (28 << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_512MB (29 << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_1GB (30 << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_2GB (31 << MAP_HUGETLB_SHIFT)
#define MAP_HUGETLB_16GB (34 << MAP_HUGETLB_SHIFT)
struct htlb_info {
unsigned long long size;
int flag;
};
extern struct htlb_info hugetlb_info[HUGETLB_MAX];
int is_hugetlb_dev(dev_t dev, int *hugetlb_size_flag);
int can_dump_with_memfd_hugetlb(dev_t dev, int *hugetlb_size_flag, const char *file_path, struct vma_area *vma);
unsigned long get_size_from_hugetlb_flag(int flag);
#ifndef MFD_HUGETLB
#define MFD_HUGETLB 4
#endif
#endif
| 1,617 | 25.096774 | 112 |
h
|
criu
|
criu-master/criu/include/image.h
|
#ifndef __CR_IMAGE_H__
#define __CR_IMAGE_H__
#include <stdbool.h>
#include "common/compiler.h"
#include "servicefd.h"
#include "image-desc.h"
#include "fcntl.h"
#include "magic.h"
#include "bfd.h"
#include "log.h"
#include "common/bug.h"
#define PAGE_RSS 1
#define PAGE_ANON 2
/*
* Top bit set in the tgt id means we've remapped
* to a ghost file.
*/
#define REMAP_GHOST (1 << 31)
/*
* VMA_AREA status:
*
* - none
* VmaEntry is just allocated and has not been used
* for anything yet
* - regular
* VmaEntry represent some memory area which should be
* dumped and restored; this is a general sign that we
* should not skip the area content from processing in
* compare with special areas such as vsyscall
* - stack
* the memory area is used in application stack so we
* should be careful about guard page here
* - vsyscall
* special memory area injected into the task memory
* space by the kernel itself, represent virtual syscall
* implementation and it is specific to every kernel version,
* its contents should not be dumped ever
* - vdso,vvar
* the vDSO area, it might reqire additional memory
* contents modification especially when tasks are
* migrating between different kernel versions
* - heap
* "heap" area in application, currently for information only
* - file private
* stands for privately memory mapped files
* - file shared
* stands for shared memory mapped files
* - anon shared
* represent shared anonymous memory areas
* - anon private
* represent private anonymous memory areas
* - SysV IPC
* IPC shared memory area
* - socket
* memory map for socket
* - AIO ring
* memory area serves AIO buffers
* - unsupported
* stands for any unknown memory areas, usually means
* we don't know how to work with it and should stop
* processing exiting with error; while the rest of bits
* are part of image ABI, this particular one must never
* be used in image.
*/
#define VMA_AREA_NONE (0 << 0)
#define VMA_AREA_REGULAR (1 << 0)
#define VMA_AREA_STACK (1 << 1)
#define VMA_AREA_VSYSCALL (1 << 2)
#define VMA_AREA_VDSO (1 << 3)
#define VMA_AREA_HEAP (1 << 5)
#define VMA_FILE_PRIVATE (1 << 6)
#define VMA_FILE_SHARED (1 << 7)
#define VMA_ANON_SHARED (1 << 8)
#define VMA_ANON_PRIVATE (1 << 9)
#define VMA_AREA_SYSVIPC (1 << 10)
#define VMA_AREA_SOCKET (1 << 11)
#define VMA_AREA_VVAR (1 << 12)
#define VMA_AREA_AIORING (1 << 13)
#define VMA_AREA_MEMFD (1 << 14)
#define VMA_EXT_PLUGIN (1 << 27)
#define VMA_CLOSE (1 << 28)
#define VMA_NO_PROT_WRITE (1 << 29)
#define VMA_PREMMAPED (1 << 30)
#define VMA_UNSUPP (1 << 31)
#define CR_CAP_SIZE 2
#define TASK_COMM_LEN 16
#define CR_PARENT_LINK "parent"
extern bool ns_per_id;
extern bool img_common_magic;
#define O_NOBUF (O_DIRECT)
#define O_SERVICE (O_DIRECTORY)
#define O_DUMP (O_WRONLY | O_CREAT | O_TRUNC)
#define O_RSTR (O_RDONLY)
#define O_FORCE_LOCAL (O_SYNC)
struct cr_img {
union {
struct bfd _x;
struct {
int fd; /* should be first to coincide with _x.fd */
int type;
unsigned long oflags;
char *path;
};
};
};
#define EMPTY_IMG_FD (-404)
#define LAZY_IMG_FD (-505)
static inline bool empty_image(struct cr_img *img)
{
return img && img->_x.fd == EMPTY_IMG_FD;
}
static inline bool lazy_image(struct cr_img *img)
{
return img->_x.fd == LAZY_IMG_FD;
}
extern int open_image_lazy(struct cr_img *img);
static inline int img_raw_fd(struct cr_img *img)
{
if (!img)
return -1;
if (lazy_image(img) && open_image_lazy(img))
return -1;
BUG_ON(bfd_buffered(&img->_x));
return img->_x.fd;
}
extern off_t img_raw_size(struct cr_img *img);
extern int open_image_dir(char *dir, int mode);
extern void close_image_dir(void);
/*
* Return -1 -- parent symlink points to invalid target
* Return 0 && pfd < 0 -- parent symlink does not exist
* Return 0 && pfd >= 0 -- opened
*/
extern int open_parent(int dfd, int *pfd);
extern struct cr_img *open_image_at(int dfd, int type, unsigned long flags, ...);
#define open_image(typ, flags, ...) open_image_at(-1, typ, flags, ##__VA_ARGS__)
extern int open_image_lazy(struct cr_img *img);
extern struct cr_img *open_pages_image(unsigned long flags, struct cr_img *pmi, u32 *pages_id);
extern struct cr_img *open_pages_image_at(int dfd, unsigned long flags, struct cr_img *pmi, u32 *pages_id);
extern void up_page_ids_base(void);
extern struct cr_img *img_from_fd(int fd); /* for cr-show mostly */
extern int write_img_buf(struct cr_img *, const void *ptr, int size);
#define write_img(img, ptr) write_img_buf((img), (ptr), sizeof(*(ptr)))
extern int read_img_buf_eof(struct cr_img *, void *ptr, int size);
#define read_img_eof(img, ptr) read_img_buf_eof((img), (ptr), sizeof(*(ptr)))
extern int read_img_buf(struct cr_img *, void *ptr, int size);
#define read_img(img, ptr) read_img_buf((img), (ptr), sizeof(*(ptr)))
extern int read_img_str(struct cr_img *, char **pstr, int size);
extern void close_image(struct cr_img *);
#endif /* __CR_IMAGE_H__ */
| 5,062 | 27.44382 | 107 |
h
|
criu
|
criu-master/criu/include/imgset.h
|
#ifndef __CR_IMGSET_H__
#define __CR_IMGSET_H__
#include "image-desc.h"
#include "log.h"
#include "common/bug.h"
#include "image.h"
struct cr_imgset {
int fd_off;
int fd_nr;
struct cr_img **_imgs;
};
static inline struct cr_img *img_from_set(const struct cr_imgset *imgset, int type)
{
int idx;
idx = type - imgset->fd_off;
BUG_ON(idx > imgset->fd_nr);
return imgset->_imgs[idx];
}
extern struct cr_imgset *glob_imgset;
extern struct cr_fd_desc_tmpl imgset_template[CR_FD_MAX];
extern struct cr_imgset *cr_task_imgset_open(int pid, int mode);
extern struct cr_imgset *cr_imgset_open_range(int pid, int from, int to, unsigned long flags);
#define cr_imgset_open(pid, type, flags) cr_imgset_open_range(pid, _CR_FD_##type##_FROM, _CR_FD_##type##_TO, flags)
extern struct cr_imgset *cr_glob_imgset_open(int mode);
extern void close_cr_imgset(struct cr_imgset **cr_imgset);
#endif /* __CR_IMGSET_H__ */
| 915 | 23.756757 | 115 |
h
|
criu
|
criu-master/criu/include/kerndat.h
|
#ifndef __CR_KERNDAT_H__
#define __CR_KERNDAT_H__
#include <stdbool.h>
#include "int.h"
#include "common/config.h"
#include "asm/kerndat.h"
#include "util-vdso.h"
#include "hugetlb.h"
#include <compel/ptrace.h>
struct stat;
/*
* kerndat stands for "kernel data" and is a collection
* of run-time information about current kernel
*/
extern int kerndat_init(void);
enum pagemap_func {
PM_UNKNOWN,
PM_DISABLED, /* /proc/pid/pagemap doesn't open (user mode) */
PM_FLAGS_ONLY, /* pagemap zeroes pfn part (user mode) */
PM_FULL,
};
enum loginuid_func {
LUID_NONE,
LUID_READ,
LUID_FULL,
};
struct kerndat_s {
u32 magic1, magic2;
dev_t shmem_dev;
int last_cap;
u64 zero_page_pfn;
bool has_dirty_track;
bool has_memfd;
bool has_memfd_hugetlb;
bool has_fdinfo_lock;
unsigned long task_size;
bool ipv6;
enum loginuid_func luid;
bool compat_cr;
bool sk_ns;
bool sk_unix_file;
bool tun_ns;
enum pagemap_func pmap;
unsigned int has_xtlocks;
unsigned long mmap_min_addr;
bool has_tcp_half_closed;
bool stack_guard_gap_hidden;
int lsm;
bool apparmor_ns_dumping_enabled;
bool has_uffd;
unsigned long uffd_features;
bool has_thp_disable;
bool can_map_vdso;
bool vdso_hint_reliable;
struct vdso_symtable vdso_sym;
#ifdef CONFIG_COMPAT
struct vdso_symtable vdso_sym_compat;
#endif
bool has_nsid;
bool has_link_nsid;
unsigned int sysctl_nr_open;
bool x86_has_ptrace_fpu_xsave_bug;
bool has_inotify_setnextwd;
bool has_kcmp_epoll_tfd;
bool has_fsopen;
bool has_clone3_set_tid;
bool has_timens;
bool has_newifindex;
bool has_pidfd_open;
bool has_pidfd_getfd;
bool has_nspid;
bool has_nftables_concat;
bool has_sockopt_buf_lock;
dev_t hugetlb_dev[HUGETLB_MAX];
bool has_move_mount_set_group;
bool has_openat2;
bool has_rseq;
bool has_ptrace_get_rseq_conf;
struct __ptrace_rseq_configuration libc_rseq_conf;
bool has_ipv6_freebind;
};
extern struct kerndat_s kdat;
enum {
KERNDAT_FS_STAT_DEVPTS,
KERNDAT_FS_STAT_DEVTMPFS,
KERNDAT_FS_STAT_BINFMT_MISC,
KERNDAT_FS_STAT_MAX
};
/*
* Check whether the fs @which with kdevice @kdev
* is the same as host's. If yes, this means that
* the fs mount is shared with host, if no -- it's
* a new (likely virtuzlized) fs instance.
*/
extern int kerndat_fs_virtualized(unsigned int which, u32 kdev);
extern int kerndat_has_nspid(void);
#endif /* __CR_KERNDAT_H__ */
| 2,361 | 20.279279 | 64 |
h
|
criu
|
criu-master/criu/include/libnetlink.h
|
#ifndef __CR_LIBNETLINK_H__
#define __CR_LIBNETLINK_H__
#define CR_NLMSG_SEQ 24680 /* arbitrary chosen */
struct ns_id;
extern int do_rtnl_req(int nl, void *req, int size,
int (*receive_callback)(struct nlmsghdr *h, struct ns_id *ns, void *),
int (*error_callback)(int err, struct ns_id *ns, void *), struct ns_id *ns, void *);
extern int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data, int alen);
extern int32_t nla_get_s32(const struct nlattr *nla);
#define NLMSG_TAIL(nmsg) ((struct rtattr *)(((void *)(nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len)))
#ifndef NETNS_RTA
#define NETNS_RTA(r) ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct rtgenmsg))))
#endif
#endif /* __CR_LIBNETLINK_H__ */
| 748 | 33.045455 | 95 |
h
|
criu
|
criu-master/criu/include/log.h
|
#ifndef __CR_LOG_H__
#define __CR_LOG_H__
#include <inttypes.h>
#ifndef CR_NOGLIBC
#include <string.h>
#include <errno.h>
#include <stdarg.h>
#endif /* CR_NOGLIBC */
#define LOG_UNSET (-1)
#define LOG_MSG (0) /* Print message regardless of log level */
#define LOG_ERROR (1) /* Errors only, when we're in trouble */
#define LOG_WARN (2) /* Warnings, dazen and confused but trying to continue */
#define LOG_INFO (3) /* Informative, everything is fine */
#define LOG_DEBUG (4) /* Debug only */
#define DEFAULT_LOGLEVEL LOG_WARN
/*
* This is low-level printing helper, try hard not to use it directly
* and use the pr_foo() helpers below.
*/
extern void print_on_level(unsigned int loglevel, const char *format, ...)
__attribute__((__format__(__printf__, 2, 3)));
#ifndef LOG_PREFIX
#define LOG_PREFIX
#endif
void flush_early_log_buffer(int fd);
#define print_once(loglevel, fmt, ...) \
do { \
static bool __printed; \
if (!__printed) { \
print_on_level(loglevel, fmt, ##__VA_ARGS__); \
__printed = 1; \
} \
} while (0)
#define pr_msg(fmt, ...) print_on_level(LOG_MSG, fmt, ##__VA_ARGS__)
#define pr_info(fmt, ...) print_on_level(LOG_INFO, LOG_PREFIX fmt, ##__VA_ARGS__)
#define pr_err(fmt, ...) print_on_level(LOG_ERROR, "Error (%s:%d): " LOG_PREFIX fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#define pr_err_once(fmt, ...) print_once(LOG_ERROR, fmt, ##__VA_ARGS__)
#define pr_warn(fmt, ...) print_on_level(LOG_WARN, "Warn (%s:%d): " LOG_PREFIX fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#define pr_warn_once(fmt, ...) print_once(LOG_WARN, "Warn (%s:%d): " LOG_PREFIX fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#define pr_debug(fmt, ...) print_on_level(LOG_DEBUG, LOG_PREFIX fmt, ##__VA_ARGS__)
#ifndef CR_NOGLIBC
#define pr_perror(fmt, ...) pr_err(fmt ": %s\n", ##__VA_ARGS__, strerror(errno))
#endif /* CR_NOGLIBC */
#endif /* __CR_LOG_H__ */
| 2,117 | 31.090909 | 120 |
h
|
criu
|
criu-master/criu/include/lsm.h
|
#ifndef __CR_LSM_H__
#define __CR_LSM_H__
#include "images/inventory.pb-c.h"
#include "images/creds.pb-c.h"
#include "images/fdinfo.pb-c.h"
#define AA_SECURITYFS_PATH "/sys/kernel/security/apparmor"
/*
* Get the Lsmtype for the current host.
*/
extern Lsmtype host_lsm_type(void);
/*
* Initialize the Lsmtype for the current host
*/
extern void kerndat_lsm(void);
int collect_and_suspend_lsm(void);
int unsuspend_lsm(void);
/*
* Validate that the LSM profiles can be correctly applied (must happen after
* pstree is set up).
*/
int validate_lsm(char *profile);
/*
* Render the profile name in the way that the LSM wants it written to
* /proc/<pid>/attr/current, according to whatever is in the images and
* specified by --lsm-profile.
*/
int render_lsm_profile(char *profile, char **val);
extern int lsm_check_opts(void);
#ifdef CONFIG_HAS_SELINUX
int dump_xattr_security_selinux(int fd, FdinfoEntry *e);
int run_setsockcreatecon(FdinfoEntry *e);
int reset_setsockcreatecon(void);
#else
static inline int dump_xattr_security_selinux(int fd, FdinfoEntry *e)
{
return 0;
}
static inline int run_setsockcreatecon(FdinfoEntry *e)
{
return 0;
}
static inline int reset_setsockcreatecon(void)
{
return 0;
}
#endif
#endif /* __CR_LSM_H__ */
| 1,258 | 20.706897 | 77 |
h
|
criu
|
criu-master/criu/include/magic.h
|
#ifndef __CR_MAGIC_H__
#define __CR_MAGIC_H__
/*
* Basic multi-file images
*/
#define CRTOOLS_IMAGES_V1 1
/*
* v1.1 has common magic in the head of each image file,
* except for inventory
*/
#define CRTOOLS_IMAGES_V1_1 2
/*
* Raw images are images in which data is stored in some
* non-crtool format (ip tool dumps, tarballs, etc.)
*/
#define RAW_IMAGE_MAGIC 0x0
/*
* Images have the IMG_COMMON_MAGIC in the head. Service files
* such as stats and irmap-cache have the IMG_SERVICE_MAGIC.
*/
#define IMG_COMMON_MAGIC 0x54564319 /* Sarov (a.k.a. Arzamas-16) */
#define IMG_SERVICE_MAGIC 0x55105940 /* Zlatoust */
/*
* The magic-s below correspond to coordinates
* of various Russian towns in the NNNNEEEE form.
*/
#define INVENTORY_MAGIC 0x58313116 /* Veliky Novgorod */
#define PSTREE_MAGIC 0x50273030 /* Kyiv */
#define FDINFO_MAGIC 0x56213732 /* Dmitrov */
#define PAGEMAP_MAGIC 0x56084025 /* Vladimir */
#define SHMEM_PAGEMAP_MAGIC PAGEMAP_MAGIC
#define PAGES_MAGIC RAW_IMAGE_MAGIC
#define CORE_MAGIC 0x55053847 /* Kolomna */
#define IDS_MAGIC 0x54432030 /* Konigsberg */
#define VMAS_MAGIC 0x54123737 /* Tula */
#define PIPES_MAGIC 0x56513555 /* Tver */
#define PIPES_DATA_MAGIC 0x56453709 /* Dubna */
#define FIFO_MAGIC 0x58364939 /* Kirov */
#define FIFO_DATA_MAGIC 0x59333054 /* Tosno */
#define SIGACT_MAGIC 0x55344201 /* Murom */
#define UNIXSK_MAGIC 0x54373943 /* Ryazan */
#define INETSK_MAGIC 0x56443851 /* Pereslavl */
#define PACKETSK_MAGIC 0x60454618 /* Veliky Ustyug */
#define ITIMERS_MAGIC 0x57464056 /* Kostroma */
#define POSIX_TIMERS_MAGIC 0x52603957 /* Lipetsk */
#define SK_QUEUES_MAGIC 0x56264026 /* Suzdal */
#define UTSNS_MAGIC 0x54473203 /* Smolensk */
#define CREDS_MAGIC 0x54023547 /* Kozelsk */
#define IPC_VAR_MAGIC 0x53115007 /* Samara */
#define IPCNS_SHM_MAGIC 0x46283044 /* Odessa */
#define IPCNS_MSG_MAGIC 0x55453737 /* Moscow */
#define IPCNS_SEM_MAGIC 0x59573019 /* St. Petersburg */
#define REG_FILES_MAGIC 0x50363636 /* Belgorod */
#define EXT_FILES_MAGIC 0x59255641 /* Usolye */
#define FS_MAGIC 0x51403912 /* Voronezh */
#define MM_MAGIC 0x57492820 /* Pskov */
#define REMAP_FPATH_MAGIC 0x59133954 /* Vologda */
#define GHOST_FILE_MAGIC 0x52583605 /* Oryol */
#define TCP_STREAM_MAGIC 0x51465506 /* Orenburg */
#define EVENTFD_FILE_MAGIC 0x44523722 /* Anapa */
#define EVENTPOLL_FILE_MAGIC 0x45023858 /* Krasnodar */
#define EVENTPOLL_TFD_MAGIC 0x44433746 /* Novorossiysk */
#define SIGNALFD_MAGIC 0x57323820 /* Uglich */
#define INOTIFY_FILE_MAGIC 0x48424431 /* Volgograd */
#define INOTIFY_WD_MAGIC 0x54562009 /* Svetlogorsk (Rauschen) */
#define MNTS_MAGIC 0x55563928 /* Petushki */
#define NETDEV_MAGIC 0x57373951 /* Yaroslavl */
#define NETNS_MAGIC 0x55933752 /* Dolgoprudny */
#define TTY_FILES_MAGIC 0x59433025 /* Pushkin */
#define TTY_INFO_MAGIC 0x59453036 /* Kolpino */
#define TTY_DATA_MAGIC 0x59413026 /* Pavlovsk */
#define FILE_LOCKS_MAGIC 0x54323616 /* Kaluga */
#define RLIMIT_MAGIC 0x57113925 /* Rostov */
#define FANOTIFY_FILE_MAGIC 0x55096122 /* Chelyabinsk */
#define FANOTIFY_MARK_MAGIC 0x56506035 /* Yekaterinburg */
#define SIGNAL_MAGIC 0x59255647 /* Berezniki */
#define PSIGNAL_MAGIC SIGNAL_MAGIC
#define NETLINK_SK_MAGIC 0x58005614 /* Perm */
#define NS_FILES_MAGIC 0x61394011 /* Nyandoma */
#define TUNFILE_MAGIC 0x57143751 /* Kalyazin */
#define CGROUP_MAGIC 0x59383330 /* Tikhvin */
#define TIMERFD_MAGIC 0x50493712 /* Korocha */
#define CPUINFO_MAGIC 0x61404013 /* Nyandoma */
#define USERNS_MAGIC 0x55474906 /* Kazan */
#define SECCOMP_MAGIC 0x64413049 /* Kostomuksha */
#define BINFMT_MISC_MAGIC 0x67343323 /* Apatity */
#define AUTOFS_MAGIC 0x49353943 /* Sochi */
#define FILES_MAGIC 0x56303138 /* Toropets */
#define MEMFD_INODE_MAGIC 0x48453499 /* Dnipro */
#define TIMENS_MAGIC 0x43114433 /* Beslan */
#define PIDNS_MAGIC 0x61157326 /* Surgut */
#define BPFMAP_FILE_MAGIC 0x57506142 /* Alapayevsk */
#define BPFMAP_DATA_MAGIC 0x64324033 /* Arkhangelsk */
#define APPARMOR_MAGIC 0x59423047 /* Nikolskoye */
#define IFADDR_MAGIC RAW_IMAGE_MAGIC
#define ROUTE_MAGIC RAW_IMAGE_MAGIC
#define ROUTE6_MAGIC RAW_IMAGE_MAGIC
#define RULE_MAGIC RAW_IMAGE_MAGIC
#define TMPFS_IMG_MAGIC RAW_IMAGE_MAGIC
#define TMPFS_DEV_MAGIC RAW_IMAGE_MAGIC
#define IPTABLES_MAGIC RAW_IMAGE_MAGIC
#define IP6TABLES_MAGIC RAW_IMAGE_MAGIC
#define NFTABLES_MAGIC RAW_IMAGE_MAGIC
#define NETNF_CT_MAGIC RAW_IMAGE_MAGIC
#define NETNF_EXP_MAGIC RAW_IMAGE_MAGIC
#define PAGES_OLD_MAGIC PAGEMAP_MAGIC
#define SHM_PAGES_OLD_MAGIC PAGEMAP_MAGIC
#define BINFMT_MISC_OLD_MAGIC BINFMT_MISC_MAGIC
/*
* These are special files, not exactly images
*/
#define STATS_MAGIC 0x57093306 /* Ostashkov */
#define IRMAP_CACHE_MAGIC 0x57004059 /* Ivanovo */
/*
* Main magic for kerndat_s structure.
*/
#define KDAT_MAGIC 0x57023458 /* Torzhok */
#endif /* __CR_MAGIC_H__ */
| 5,137 | 37.631579 | 68 |
h
|
criu
|
criu-master/criu/include/mem.h
|
#ifndef __CR_MEM_H__
#define __CR_MEM_H__
#include <stdbool.h>
#include "int.h"
#include "vma.pb-c.h"
#include "pid.h"
#include "proc_parse.h"
#include "inventory.pb-c.h"
struct parasite_ctl;
struct vm_area_list;
struct page_pipe;
struct pstree_item;
struct vma_area;
struct mem_dump_ctl {
bool pre_dump;
bool lazy;
struct proc_pid_stat *stat;
InventoryEntry *parent_ie;
};
extern bool vma_has_guard_gap_hidden(struct vma_area *vma);
extern bool page_is_zero(u64 pme);
extern bool page_in_parent(bool dirty);
extern int prepare_mm_pid(struct pstree_item *i);
extern void prepare_cow_vmas(void);
extern int do_task_reset_dirty_track(int pid);
extern unsigned long dump_pages_args_size(struct vm_area_list *vmas);
extern int parasite_dump_pages_seized(struct pstree_item *item, struct vm_area_list *vma_area_list,
struct mem_dump_ctl *mdc, struct parasite_ctl *ctl);
#define PME_PRESENT (1ULL << 63)
#define PME_SWAP (1ULL << 62)
#define PME_FILE (1ULL << 61)
#define PME_SOFT_DIRTY (1ULL << 55)
#define PME_PSHIFT_BITS (6)
#define PME_STATUS_BITS (3)
#define PME_STATUS_OFFSET (64 - PME_STATUS_BITS)
#define PME_PSHIFT_OFFSET (PME_STATUS_OFFSET - PME_PSHIFT_BITS)
#define PME_PFRAME_MASK ((1ULL << PME_PSHIFT_OFFSET) - 1)
#define PME_PFRAME(x) ((x)&PME_PFRAME_MASK)
struct task_restore_args;
int open_vmas(struct pstree_item *t);
int prepare_vmas(struct pstree_item *t, struct task_restore_args *ta);
int unmap_guard_pages(struct pstree_item *t);
int prepare_mappings(struct pstree_item *t);
bool should_dump_page(VmaEntry *vmae, u64 pme);
#endif /* __CR_MEM_H__ */
| 1,601 | 29.807692 | 99 |
h
|
criu
|
criu-master/criu/include/memfd.h
|
#ifndef __CR_MEMFD_H__
#define __CR_MEMFD_H__
#include <sys/stat.h>
#include "int.h"
#include "common/config.h"
struct fd_parms;
struct file_desc;
extern int is_memfd(dev_t dev);
extern int dump_one_memfd_cond(int lfd, u32 *id, struct fd_parms *parms);
extern const struct fdtype_ops memfd_dump_ops;
extern int memfd_open(struct file_desc *d, u32 *fdflags);
extern struct collect_image_info memfd_cinfo;
extern struct file_desc *collect_memfd(u32 id);
extern int apply_memfd_seals(void);
extern int prepare_memfd_inodes(void);
#ifdef CONFIG_HAS_MEMFD_CREATE
#include <sys/mman.h>
#else
#include <sys/syscall.h>
#include <linux/memfd.h>
static inline int memfd_create(const char *name, unsigned int flags)
{
return syscall(SYS_memfd_create, name, flags);
}
#endif /* CONFIG_HAS_MEMFD_CREATE */
#endif /* __CR_MEMFD_H__ */
| 829 | 23.411765 | 73 |
h
|
criu
|
criu-master/criu/include/mount-v2.h
|
#ifndef __CR_MOUNT_V2_H__
#define __CR_MOUNT_V2_H__
#include "linux/mount.h"
#include "linux/openat2.h"
#include "common/list.h"
#include <compel/plugins/std/syscall-codes.h>
#ifndef MOVE_MOUNT_SET_GROUP
#define MOVE_MOUNT_SET_GROUP 0x00000100 /* Set sharing group instead */
#endif
#ifndef MOVE_MOUNT_F_EMPTY_PATH
#define MOVE_MOUNT_F_EMPTY_PATH 0x00000004 /* Empty from path permitted */
#endif
#ifndef MOVE_MOUNT_T_EMPTY_PATH
#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */
#endif
static inline int sys_move_mount(int from_dirfd, const char *from_pathname, int to_dirfd, const char *to_pathname,
unsigned int flags)
{
return syscall(__NR_move_mount, from_dirfd, from_pathname, to_dirfd, to_pathname, flags);
}
#ifndef OPEN_TREE_CLONE
#define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */
#endif
#ifndef OPEN_TREE_CLOEXEC
#define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */
#endif
#ifndef AT_SYMLINK_NOFOLLOW
#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */
#endif
#ifndef AT_NO_AUTOMOUNT
#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */
#endif
#ifndef AT_EMPTY_PATH
#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
#endif
#ifndef AT_RECURSIVE
#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
#endif
static inline int sys_open_tree(int dfd, const char *filename, unsigned int flags)
{
return syscall(__NR_open_tree, dfd, filename, flags);
}
#ifndef RESOLVE_NO_XDEV
#define RESOLVE_NO_XDEV 0x01 /* Block mount-point crossings (includes bind-mounts). */
#endif
static inline long sys_openat2(int dirfd, const char *pathname, struct open_how *how, size_t size)
{
return syscall(__NR_openat2, dirfd, pathname, how, size);
}
extern int check_mount_v2(void);
struct sharing_group {
/* This pair identifies the group */
int shared_id;
int master_id;
/* List of shared groups */
struct list_head list;
/* List of mounts in this group */
struct list_head mnt_list;
/*
* List of dependent shared groups:
* - all siblings have equal master_id
* - the parent has shared_id equal to children's master_id
*
* This is a bit tricky: parent pointer indicates if there is one
* parent sharing_group in list or only siblings.
* So for traversal if parent pointer is set we can do:
* list_for_each_entry(t, &sg->parent->children, siblings)
* and otherwise we can do:
* list_for_each_entry(t, &sg->siblings, siblings)
*/
struct list_head children;
struct list_head siblings;
struct sharing_group *parent;
char *source;
};
extern int resolve_shared_mounts_v2(void);
extern int prepare_mnt_ns_v2(void);
#endif /* __CR_MOUNT_V2_H__ */
| 2,714 | 27.28125 | 114 |
h
|
criu
|
criu-master/criu/include/mount.h
|
#ifndef __CR_MOUNT_H__
#define __CR_MOUNT_H__
#include <sys/types.h>
#include "common/list.h"
struct proc_mountinfo;
struct pstree_item;
struct fstype;
struct ns_id;
#define MS_PROPAGATE (MS_SHARED | MS_PRIVATE | MS_UNBINDABLE | MS_SLAVE)
/*
* Here are a set of flags which we know how to handle for the one mount call.
* All of them except MS_RDONLY are set only as mnt flags.
* MS_RDONLY is set for both mnt and sb flags, so we can restore it for one
* mount call only if it set for both masks.
*/
#define MS_MNT_KNOWN_FLAGS (MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_NOATIME | MS_NODIRATIME | MS_RELATIME | MS_RDONLY)
#define BINFMT_MISC_HOME "proc/sys/fs/binfmt_misc"
#define HELPER_MNT_ID 0
#define MOUNT_INVALID_DEV (0)
#define MNT_UNREACHABLE INT_MIN
/*
* We have remounted these mount writable temporary, and we
* should return it back to readonly at the end of file restore.
*/
#define REMOUNTED_RW 1
/*
* We have remounted these mount writable in service mount namespace,
* thus we shouldn't return it back to readonly, as service mntns
* will be destroyed anyway.
*/
#define REMOUNTED_RW_SERVICE 2
struct rst_mount_info {
int remounted_rw;
};
struct mount_info {
int mnt_id;
int parent_mnt_id;
unsigned int s_dev;
unsigned int s_dev_rt;
char *root;
/*
* During dump mountpoint contains path with dot at the
* beginning. It allows to use openat, statat, etc without
* creating a temporary copy of the path.
*
* On restore mountpoint is prepended with so called ns
* root path -- it's a place in fs where the namespace
* mount tree is constructed. Check mnt_roots for details.
* The ns_mountpoint contains path w/o this prefix.
*/
char *mountpoint;
char *ns_mountpoint;
/* Mount-v2 specific */
char *plain_mountpoint;
int is_dir;
int mp_fd_id;
int mnt_fd_id;
struct sharing_group *sg;
struct list_head mnt_sharing;
int fd;
unsigned flags;
unsigned sb_flags;
int master_id;
int shared_id;
struct fstype *fstype;
char *source;
char *options;
char *fsname;
union {
bool mounted;
bool dumped;
};
bool need_plugin;
bool is_ns_root;
bool deleted;
int deleted_level;
struct list_head deleted_list;
struct mount_info *next;
struct ns_id *nsid;
char *external;
bool internal_sharing;
/* tree linkage */
struct mount_info *parent;
struct mount_info *bind;
struct list_head children;
struct list_head siblings;
struct list_head mnt_bind; /* circular list of derivatives of one real mount */
bool mnt_bind_is_populated; /* indicate that mnt_bind list is ready to use */
struct list_head mnt_share; /* circular list of shared mounts */
struct list_head mnt_slave_list; /* list of slave mounts */
struct list_head mnt_slave; /* slave list entry */
struct list_head mnt_ext_slave; /* external slave list entry */
struct mount_info *mnt_master; /* slave is on master->mnt_slave_list */
struct list_head mnt_propagate; /* circular list of mounts which propagate from each other */
struct list_head mnt_notprop; /* temporary list used in can_mount_now */
struct list_head mnt_unbindable; /* list of mounts with delayed unbindable */
struct list_head postpone;
int is_overmounted;
struct rst_mount_info *rmi;
void *private; /* associated filesystem data */
};
extern struct mount_info *mntinfo;
extern void mntinfo_add_list_before(struct mount_info **head, struct mount_info *new);
/*
* Put a : in here since those are invalid on
* the cli, so we know it's autogenerated in
* debugging.
*/
#define AUTODETECTED_MOUNT "CRIU:AUTOGENERATED"
#define EXTERNAL_DEV_MOUNT "CRIU:EXTERNAL_DEV"
#define NO_ROOT_MOUNT "CRIU:NO_ROOT"
static inline bool mnt_is_dev_external(struct mount_info *mi)
{
return mi->external && !strcmp(mi->external, EXTERNAL_DEV_MOUNT);
}
static inline bool mnt_is_nodev_external(struct mount_info *mi)
{
return mi->external && strcmp(mi->external, EXTERNAL_DEV_MOUNT);
}
extern struct ns_desc mnt_ns_desc;
#ifdef CONFIG_BINFMT_MISC_VIRTUALIZED
extern int collect_binfmt_misc(void);
#else
static inline int collect_binfmt_misc(void)
{
return 0;
}
#endif
extern struct mount_info *mnt_entry_alloc(bool rst);
extern void mnt_entry_free(struct mount_info *mi);
extern int __mntns_get_root_fd(pid_t pid);
extern int mntns_get_root_fd(struct ns_id *ns);
extern int mntns_get_root_by_mnt_id(int mnt_id);
extern struct ns_id *lookup_nsid_by_mnt_id(int mnt_id);
extern int open_mount(unsigned int s_dev);
extern int __check_mountpoint_fd(struct mount_info *pm, int mnt_fd, bool parse_mountinfo);
extern int check_mountpoint_fd(struct mount_info *pm, int mnt_fd);
extern int __open_mountpoint(struct mount_info *pm);
extern int mnt_is_dir(struct mount_info *pm);
extern int open_mountpoint(struct mount_info *pm);
extern struct mount_info *collect_mntinfo(struct ns_id *ns, bool for_dump);
extern int prepare_mnt_ns(void);
extern int pivot_root(const char *new_root, const char *put_old);
extern struct mount_info *lookup_overlayfs(char *rpath, unsigned int s_dev, unsigned int st_ino, unsigned int mnt_id);
extern struct mount_info *lookup_mnt_id(unsigned int id);
extern struct mount_info *lookup_mnt_sdev(unsigned int s_dev);
extern dev_t phys_stat_resolve_dev(struct ns_id *, dev_t st_dev, const char *path);
extern bool phys_stat_dev_match(dev_t st_dev, dev_t phys_dev, struct ns_id *, const char *path);
extern int restore_task_mnt_ns(struct pstree_item *current);
extern void fini_restore_mntns(void);
extern int depopulate_roots_yard(int mntns_root, bool clean_remaps);
extern int rst_get_mnt_root(int mnt_id, char *path, int plen);
extern int ext_mount_add(char *key, char *val);
extern int ext_mount_parse_auto(char *key);
extern int mntns_maybe_create_roots(void);
extern int read_mnt_ns_img(void);
extern void cleanup_mnt_ns(void);
extern void clean_cr_time_mounts(void);
extern char *get_plain_mountpoint(int mnt_id, char *name);
extern bool add_skip_mount(const char *mountpoint);
extern int get_sdev_from_fd(int fd, unsigned int *sdev, bool parse_mountinfo);
extern struct mount_info *parse_mountinfo(pid_t pid, struct ns_id *nsid, bool for_dump);
extern int check_mnt_id(void);
extern int remount_readonly_mounts(void);
extern int try_remount_writable(struct mount_info *mi, bool ns);
extern bool mnt_is_overmounted(struct mount_info *mi);
extern struct mount_info *mnt_get_external_bind(struct mount_info *mi);
extern bool mnt_is_external_bind(struct mount_info *mi);
extern bool has_mounted_external_bind(struct mount_info *mi);
extern bool rst_mnt_is_root(struct mount_info *mi);
extern struct mount_info *mnt_get_root_bind(struct mount_info *mi);
extern bool mnt_is_root_bind(struct mount_info *mi);
extern struct mount_info *mnt_get_external_bind_nodev(struct mount_info *mi);
extern struct mount_info *mnt_bind_pick(struct mount_info *mi,
bool (*pick)(struct mount_info *mi, struct mount_info *bind));
extern int mnt_tree_for_each(struct mount_info *start, int (*fn)(struct mount_info *));
extern char *service_mountpoint(const struct mount_info *mi);
extern int validate_mounts(struct mount_info *info, bool for_dump);
extern __maybe_unused struct mount_info *add_cr_time_mount(struct mount_info *root, char *fsname, const char *path,
unsigned int s_dev, bool rst);
extern char *resolve_source(struct mount_info *mi);
extern int fetch_rt_stat(struct mount_info *m, const char *where);
extern int do_simple_mount(struct mount_info *mi, const char *src, const char *fstype, unsigned long mountflags);
extern char *mnt_fsname(struct mount_info *mi);
extern int apply_sb_flags(void *args, int fd, pid_t pid);
extern int mount_root(void *args, int fd, pid_t pid);
extern int restore_ext_mount(struct mount_info *mi);
extern int cr_pivot_root(char *root);
extern int print_ns_root(struct ns_id *ns, int remap_id, char *buf, int bs);
extern struct mount_info *root_yard_mp;
extern char *mnt_roots;
#endif /* __CR_MOUNT_H__ */
| 7,903 | 31.933333 | 118 |
h
|
criu
|
criu-master/criu/include/namespaces.h
|
#ifndef __CR_NS_H__
#define __CR_NS_H__
#include <sys/socket.h>
#include "common/compiler.h"
#include "files.h"
#include "common/list.h"
#include "images/netdev.pb-c.h"
#ifndef CLONE_NEWNS
#define CLONE_NEWNS 0x00020000
#endif
#ifndef CLONE_NEWPID
#define CLONE_NEWPID 0x20000000
#endif
#ifndef CLONE_NEWUTS
#define CLONE_NEWUTS 0x04000000
#endif
#ifndef CLONE_NEWIPC
#define CLONE_NEWIPC 0x08000000
#endif
#ifndef CLONE_NEWNET
#define CLONE_NEWNET 0x40000000
#endif
#ifndef CLONE_NEWUSER
#define CLONE_NEWUSER 0x10000000
#endif
#ifndef CLONE_NEWCGROUP
#define CLONE_NEWCGROUP 0x02000000
#endif
#ifndef CLONE_NEWTIME
#define CLONE_NEWTIME 0x00000080
#endif
#define CLONE_ALLNS \
(CLONE_NEWPID | CLONE_NEWNET | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_NEWNS | CLONE_NEWUSER | CLONE_NEWCGROUP | \
CLONE_NEWTIME)
/* Nested namespaces are supported only for these types */
#define CLONE_SUBNS (CLONE_NEWNS | CLONE_NEWNET)
#define EXTRA_SIZE 20
struct ns_desc {
unsigned int cflag;
char *str;
size_t len;
};
struct user_ns_extra {
char *uid;
char *gid;
};
/* struct join_ns is used for storing parameters specified by --join-ns */
struct join_ns {
struct list_head list;
char *ns_file;
struct ns_desc *nd; /* namespace descriptor */
int ns_fd;
/* extra options of --join-ns, like uid&gid in user namespace */
union {
struct user_ns_extra user_extra;
char *common_extra;
} extra_opts;
};
enum ns_type {
NS_UNKNOWN = 0,
NS_CRIU,
NS_ROOT,
NS_OTHER,
};
struct netns_id {
unsigned target_ns_id;
unsigned netnsid_value;
struct list_head node;
};
struct net_link {
NetDeviceEntry *nde;
bool created;
struct list_head node;
};
struct ns_id {
unsigned int kid;
unsigned int id;
pid_t ns_pid;
struct ns_desc *nd;
struct ns_id *next;
enum ns_type type;
char *ext_key;
/*
* For mount namespaces on restore -- indicates that
* the namespace in question is created (all mounts
* are mounted) and other tasks may do setns on it
* and proceed.
*/
bool ns_populated;
union {
struct {
struct mount_info *mntinfo_list;
struct mount_info *mntinfo_tree;
int nsfd_id;
int root_fd_id;
} mnt;
struct {
/*
* ns_fd is used when network namespaces are being
* restored. On this stage we access these file
* descriptors many times and it is more efficient to
* have them opened rather than to get them from fdstore.
*
* nsfd_id is used to restore sockets. On this stage we
* can't use random file descriptors to not conflict
* with restored file descriptors.
*/
union {
int nsfd_id; /* a namespace descriptor id in fdstore */
int ns_fd; /* a namespace file descriptor */
};
int nlsk; /* for sockets collection */
int seqsk; /* to talk to parasite daemons */
struct list_head ids;
struct list_head links;
NetnsEntry *netns;
} net;
};
};
extern struct ns_id *ns_ids;
#define NS_DESC_ENTRY(_cflag, _str) \
{ \
.cflag = _cflag, .str = _str, .len = sizeof(_str) - 1, \
}
extern bool check_ns_proc(struct fd_link *link);
extern struct ns_desc pid_ns_desc;
extern struct ns_desc user_ns_desc;
extern struct ns_desc time_ns_desc;
extern unsigned long root_ns_mask;
extern const struct fdtype_ops nsfile_dump_ops;
extern struct collect_image_info nsfile_cinfo;
extern int walk_namespaces(struct ns_desc *nd, int (*cb)(struct ns_id *, void *), void *oarg);
extern int collect_namespaces(bool for_dump);
extern int collect_mnt_namespaces(bool for_dump);
extern int dump_mnt_namespaces(void);
extern int dump_namespaces(struct pstree_item *item, unsigned int ns_flags);
extern int prepare_namespace_before_tasks(void);
extern int prepare_namespace(struct pstree_item *item, unsigned long clone_flags);
extern int prepare_userns_creds(void);
extern int switch_ns(int pid, struct ns_desc *nd, int *rst);
extern int switch_mnt_ns(int pid, int *rst, int *cwd_fd);
extern int switch_ns_by_fd(int nsfd, struct ns_desc *nd, int *rst);
extern int restore_ns(int rst, struct ns_desc *nd);
extern int restore_mnt_ns(int rst, int *cwd_fd);
extern int dump_task_ns_ids(struct pstree_item *);
extern int predump_task_ns_ids(struct pstree_item *);
extern int rst_add_ns_id(unsigned int id, struct pstree_item *, struct ns_desc *nd);
extern struct ns_id *lookup_ns_by_id(unsigned int id, struct ns_desc *nd);
extern int collect_user_namespaces(bool for_dump);
extern int prepare_userns(struct pstree_item *item);
extern int stop_usernsd(void);
extern uid_t userns_uid(uid_t uid);
extern gid_t userns_gid(gid_t gid);
extern int dump_user_ns(pid_t pid, int ns_id);
extern void free_userns_maps(void);
extern int join_ns_add(const char *type, char *ns_file, char *extra_opts);
extern int check_namespace_opts(void);
extern int join_namespaces(void);
typedef int (*uns_call_t)(void *arg, int fd, pid_t pid);
/*
* Async call -- The call is guaranteed to be done till the
* CR_STATE_COMPLETE happens. The function may return even
* before the call starts.
* W/o flag the call is synchronous -- this function returns
* strictly after the call finishes.
*/
#define UNS_ASYNC 0x1
/*
* The call returns an FD which should be sent back. Conflicts
* with UNS_ASYNC.
*/
#define UNS_FDOUT 0x2
#define MAX_UNSFD_MSG_SIZE 8192
/*
* When we're restoring inside user namespace, some things are
* not allowed to be done there due to insufficient capabilities.
* If the operation in question can be offloaded to another process,
* this call allows to do that.
*
* In case we're not in userns, just call the callback immediately
* in the context of calling task.
*/
extern int __userns_call(const char *func_name, uns_call_t call, int flags, void *arg, size_t arg_size, int fd);
#define userns_call(__call, __flags, __arg, __arg_size, __fd) \
__userns_call(__stringify(__call), __call, __flags, __arg, __arg_size, __fd)
extern int add_ns_shared_cb(int (*actor)(void *data), void *data);
extern struct ns_id *get_socket_ns(int lfd);
extern struct ns_id *lookup_ns_by_kid(unsigned int kid, struct ns_desc *nd);
struct unsc_msg {
struct msghdr h;
/*
* 0th is the call address
* 1st is the flags
* 2nd is the optional (NULL in response) arguments
*/
struct iovec iov[3];
char c[CMSG_SPACE(sizeof(struct ucred)) + CMSG_SPACE(sizeof(int))];
};
extern void unsc_msg_init(struct unsc_msg *m, uns_call_t *c, int *x, void *arg, size_t asize, int fd, pid_t *pid);
extern void unsc_msg_pid_fd(struct unsc_msg *um, pid_t *pid, int *fd);
extern int start_unix_cred_daemon(pid_t *pid, int (*daemon_func)(int sk));
#endif /* __CR_NS_H__ */
| 6,748 | 26.546939 | 118 |
h
|
criu
|
criu-master/criu/include/net.h
|
#ifndef __CR_NET_H__
#define __CR_NET_H__
#include <linux/netlink.h>
#include "common/list.h"
#include "external.h"
#ifndef RTM_GETNSID
#define RTM_GETNSID 90
#endif
struct cr_imgset;
struct ns_id;
extern int dump_net_ns(struct ns_id *ns);
extern int prepare_net_namespaces(void);
extern void fini_net_namespaces(void);
extern int netns_keep_nsfd(void);
struct pstree_item;
extern int restore_task_net_ns(struct pstree_item *current);
struct veth_pair {
struct list_head node;
char *inside;
char *outside;
char *bridge;
};
extern int collect_net_namespaces(bool for_dump);
extern int network_lock(void);
extern void network_unlock(void);
extern int network_lock_internal(void);
extern struct ns_desc net_ns_desc;
#include "images/netdev.pb-c.h"
extern int write_netdev_img(NetDeviceEntry *nde, struct cr_imgset *fds, struct nlattr **info);
extern int read_ns_sys_file(char *path, char *buf, int len);
struct net_link;
extern int restore_link_parms(struct net_link *link, int nlsk);
extern int veth_pair_add(char *in, char *out);
extern int macvlan_ext_add(struct external *ext);
extern int move_veth_to_bridge(void);
extern int kerndat_has_newifindex(void);
extern int kerndat_link_nsid(void);
extern int net_get_nsid(int rtsk, int fd, int *nsid);
extern struct ns_id *net_get_root_ns(void);
extern int kerndat_nsid(void);
extern void check_has_netns_ioc(int fd, bool *kdat_val, const char *name);
extern int net_set_ext(struct ns_id *ns);
extern struct ns_id *get_root_netns(void);
extern int read_net_ns_img(void);
#endif /* __CR_NET_H__ */
| 1,561 | 25.033333 | 94 |
h
|
criu
|
criu-master/criu/include/netfilter.h
|
#ifndef __CR_NETFILTER_H__
#define __CR_NETFILTER_H__
struct inet_sk_desc;
extern int iptables_lock_connection(struct inet_sk_desc *);
extern int iptables_unlock_connection(struct inet_sk_desc *);
struct inet_sk_info;
extern int iptables_unlock_connection_info(struct inet_sk_info *);
extern void preload_netfilter_modules(void);
extern int nftables_init_connection_lock(void);
extern int nftables_lock_connection(struct inet_sk_desc *);
extern int nftables_get_table(char *table, int n);
#if defined(CONFIG_HAS_NFTABLES_LIB_API_0)
#define NFT_RUN_CMD(nft, cmd) nft_run_cmd_from_buffer(nft, cmd, strlen(cmd))
#elif defined(CONFIG_HAS_NFTABLES_LIB_API_1)
#define NFT_RUN_CMD(nft, cmd) nft_run_cmd_from_buffer(nft, cmd)
#else
#define NFT_RUN_CMD(nft, cmd) BUILD_BUG_ON(1)
#endif
#endif /* __CR_NETFILTER_H__ */
| 815 | 30.384615 | 76 |
h
|
criu
|
criu-master/criu/include/page-pipe.h
|
#ifndef __CR_PAGE_PIPE_H__
#define __CR_PAGE_PIPE_H__
#include <sys/uio.h>
#include "common/list.h"
#define PAGE_ALLOC_COSTLY_ORDER 3 /* from the kernel source code */
struct kernel_pipe_buffer {
struct page *page;
unsigned int offset, len;
const struct pipe_buf_operations *ops;
unsigned int flags;
unsigned long private;
};
/*
* The kernel allocates the linear chunk of memory for pipe buffers.
* Allocation of chunks with size more than PAGE_ALLOC_COSTLY_ORDER
* fails very often, so we need to restrict the pipe capacity to not
* allocate big chunks.
*/
#define PIPE_MAX_SIZE ((1 << PAGE_ALLOC_COSTLY_ORDER) * PAGE_SIZE / sizeof(struct kernel_pipe_buffer))
/* The number of pipes for one chunk */
#define NR_PIPES_PER_CHUNK 8
/*
* page_pipe is a descriptor of task's virtual memory
* with pipes, containing pages.
*
* A page-pipe may contain holes -- these are pagemap
* entries without pages. Holes are stored in separate
* array to optimize paged iovs feed into vmsplice --
* they will be sent there in one go.
*
* A hole is a pagemap entry that doesn't have pages
* in it, since they are present in previous (parent)
* snapshot.
*
*
* This page-pipe vs holes vs task vmem vs image layout
* is described below.
*
* Task memory: (+ present, - not present pages)
* 0 0 0 0 1 1 1
* 0 3 6 B 1 8 C
* ---+++-----++++++-------++++----
*
* Page-pipe iovs:
*
* bufs = 03:3,0B:6,18:4
* holes = <empty>
*
* The pagemap.img would purely contain page-pipe bufs.
*
* Pages image will contain pages at
*
* 03,04,05,0B,0C,0D,0E,0F,10,18,19,1A,1B
*
* stored one by one.
*
* Not let's imagine task touches some pages and its mem
* looks like: (+ present, = old present, - non present)
*
* 0 0 0 0 11 11 1
* 0 3 6 B 12 78 C
* ---==+-----====+++-----++===----
*
* (not new pages at 11 and 17 vaddrs)
*
* The new --snapshot'ed page-pipe would look like
*
* bufs = 05:1,0F:3,17:2
* holes = 03:2,0B:4,19:3
*
* So the pagemap.img would look like
*
* 03:2:P,05:1,0B:4:P,0F:3,17:2,19:3:P
*
* (the page_xfer_dump_pages generates one)
*
* where P means "in parent", i.e. respective pages should
* be looked up in the parent pagemap (not pages.img, but
* the pagemap, and then the offset in previous pages.img
* should be calculated, see the read_pagemap_page routine).
*
* New pages.img file would contain only pages for
*
* 05,0F,10,11,17,18
*/
struct page_pipe_buf {
int p[2]; /* pipe with pages */
unsigned int pipe_size; /* how many pages can be fit into pipe */
unsigned int pipe_off; /* where this buf is started in a pipe */
unsigned int pages_in; /* how many pages are there */
unsigned int nr_segs; /* how many iov-s are busy */
#define PPB_LAZY (1 << 0)
unsigned int flags;
struct iovec *iov; /* vaddr:len map */
struct list_head l; /* links into page_pipe->bufs */
};
/*
* Page pipe buffers with different flags cannot share the same pipe.
* We track the last ppb that was used for each type separately in the
* prev[] array in the struct page_pipe (below).
* Currently we have 2 types: the buffers that are always stored in
* the images and the buffers that are lazily migrated
*/
#define PP_PIPE_TYPES 2
#define PP_HOLE_PARENT (1 << 0)
struct page_pipe {
unsigned int nr_pipes; /* how many page_pipe_bufs in there */
struct list_head bufs; /* list of bufs */
struct list_head free_bufs; /* list of bufs */
struct page_pipe_buf *prev[PP_PIPE_TYPES]; /* last ppb of each type for pipe sharing */
unsigned int nr_iovs; /* number of iovs */
unsigned int free_iov; /* first free iov */
struct iovec *iovs; /* iovs. They are provided into create_page_pipe
and all bufs have their iov-s in there */
unsigned int nr_holes; /* number of holes allocated */
unsigned int free_hole; /* number of holes in use */
struct iovec *holes; /* holes */
unsigned int *hole_flags;
unsigned int flags; /* PP_FOO flags below */
};
#define PP_CHUNK_MODE 0x1 /* Restrict the maximum buffer size of pipes and dump memory for a few iterations */
#define PP_OWN_IOVS 0x4 /* create_page_pipe allocated IOVs memory */
struct page_pipe *create_page_pipe(unsigned int nr_segs, struct iovec *iovs, unsigned flags);
extern void destroy_page_pipe(struct page_pipe *p);
extern int page_pipe_add_page(struct page_pipe *p, unsigned long addr, unsigned int flags);
extern int page_pipe_add_hole(struct page_pipe *pp, unsigned long addr, unsigned int flags);
extern void debug_show_page_pipe(struct page_pipe *pp);
void page_pipe_reinit(struct page_pipe *pp);
extern void page_pipe_destroy_ppb(struct page_pipe_buf *ppb);
struct pipe_read_dest {
int p[2];
int sink_fd;
};
extern int pipe_read_dest_init(struct pipe_read_dest *prd);
extern int page_pipe_read(struct page_pipe *pp, struct pipe_read_dest *prd, unsigned long addr, unsigned int *nr_pages,
unsigned int ppb_flags);
#endif /* __CR_PAGE_PIPE_H__ */
| 5,020 | 31.185897 | 119 |
h
|
criu
|
criu-master/criu/include/page-xfer.h
|
#ifndef __CR_PAGE_XFER__H__
#define __CR_PAGE_XFER__H__
#include "pagemap.h"
struct ps_info {
int pid;
unsigned short port;
};
extern int cr_page_server(bool daemon_mode, bool lazy_dump, int cfd);
/* User buffer for read-mode pre-dump*/
#define PIPE_MAX_BUFFER_SIZE (PIPE_MAX_SIZE << PAGE_SHIFT)
/*
* page_xfer -- transfer pages into image file.
* Two images backends are implemented -- local image file
* and page-server image file.
*/
struct page_xfer {
/* transfers one vaddr:len entry */
int (*write_pagemap)(struct page_xfer *self, struct iovec *iov, u32 flags);
/* transfers pages related to previous pagemap */
int (*write_pages)(struct page_xfer *self, int pipe, unsigned long len);
void (*close)(struct page_xfer *self);
/*
* In case we need to dump pagemaps not as-is, but
* relative to some address. Used, e.g. by shmem.
*/
unsigned long offset;
bool transfer_lazy;
/* private data for every page-xfer engine */
union {
struct /* local */ {
struct cr_img *pmi; /* pagemaps */
struct cr_img *pi; /* pages */
};
struct /* page-server */ {
int sk;
u64 dst_id;
};
};
struct page_read *parent;
};
extern int open_page_xfer(struct page_xfer *xfer, int fd_type, unsigned long id);
struct page_pipe;
extern int page_xfer_dump_pages(struct page_xfer *, struct page_pipe *);
extern int page_xfer_predump_pages(int pid, struct page_xfer *, struct page_pipe *);
extern int connect_to_page_server_to_send(void);
extern int connect_to_page_server_to_recv(int epfd);
extern int disconnect_from_page_server(void);
extern int check_parent_page_xfer(int fd_type, unsigned long id);
/*
* The post-copy migration makes it necessary to receive pages from
* remote dump. The protocol we use for that is quite simple:
* - lazy-pages sends request containing PS_IOV_GET(nr_pages, vaddr, pid)
* - dump-side page server responds with PS_IOV_ADD(nr_pages, vaddr,
pid) or PS_IOV_ADD(0, 0, 0) if it failed to locate the required
pages
* - dump-side page server sends the raw page data
*/
/* async request/receive of remote pages */
extern int request_remote_pages(unsigned long img_id, unsigned long addr, int nr_pages);
typedef int (*ps_async_read_complete)(unsigned long img_id, unsigned long vaddr, int nr_pages, void *);
extern int page_server_start_read(void *buf, int nr_pages, ps_async_read_complete complete, void *priv, unsigned flags);
#endif /* __CR_PAGE_XFER__H__ */
| 2,437 | 30.25641 | 120 |
h
|
criu
|
criu-master/criu/include/pagemap-cache.h
|
#ifndef __CR_PAGEMAP_H__
#define __CR_PAGEMAP_H__
#include <sys/types.h>
#include "int.h"
#include "common/list.h"
struct vma_area;
#define PAGEMAP_PFN_OFF(addr) (PAGE_PFN(addr) * sizeof(u64))
typedef struct {
pid_t pid; /* which process it belongs */
unsigned long start; /* start of area */
unsigned long end; /* end of area */
const struct list_head *vma_head; /* list head of VMAs we're serving */
u64 *map; /* local buffer */
size_t map_len; /* length of a buffer */
int fd; /* file to read PMs from */
} pmc_t;
#define PMC_INIT \
(pmc_t) \
{ \
}
extern int pmc_init(pmc_t *pmc, pid_t pid, const struct list_head *vma_head, size_t size);
extern u64 *pmc_get_map(pmc_t *pmc, const struct vma_area *vma);
extern void pmc_fini(pmc_t *pmc);
#endif /* __CR_PAGEMAP_H__ */
| 821 | 23.909091 | 90 |
h
|
criu
|
criu-master/criu/include/pagemap.h
|
#ifndef __CR_PAGE_READ_H__
#define __CR_PAGE_READ_H__
#include "common/list.h"
#include "images/pagemap.pb-c.h"
#include "page.h"
/*
* page_read -- engine, that reads pages from image file(s)
*
* Several page-read's can be arranged in a chain to read
* pages from a series of snapshot.
*
* A task's address space vs pagemaps+page image pairs can
* look like this (taken from comment in page-pipe.h):
*
* task:
*
* 0 0 0 0 1 1 1
* 0 3 6 B 2 7 C
* ---+++-----+++++++-----+++++----
* pm1: ---+++-----++++++-------++++----
* pm2: ---==+-----====+++-----++===----
*
* Here + is present page, - is non prsent, = is present,
* but is not modified from last snapshot.
*
* Thus pagemap.img and pages.img entries are
*
* pm1: 03:3,0B:6,18:4
* pm2: 03:2:P,05:1,0B:4:P,0F:3,17:2,19:3:P
*
* where P means "page is in parent pagemap".
*
* pg1: 03,04,05,0B,0C,0D,0E,0F,10,18,19,1A,1B
* pg2: 05,0F,10,11,17,18
*
* When trying to restore from these 4 files we'd have
* to carefully scan pagemap.img's one by one and read or
* skip pages from pages.img where appropriate.
*
* All this is implemented in read_pagemap_page.
*/
struct page_read {
/* reads page from current pagemap */
int (*read_pages)(struct page_read *, unsigned long vaddr, int nr, void *, unsigned flags);
/* Advance page_read to the next entry */
int (*advance)(struct page_read *pr);
void (*close)(struct page_read *);
void (*skip_pages)(struct page_read *, unsigned long len);
int (*sync)(struct page_read *pr);
int (*seek_pagemap)(struct page_read *pr, unsigned long vaddr);
void (*reset)(struct page_read *pr);
int (*io_complete)(struct page_read *, unsigned long vaddr, int nr);
int (*maybe_read_page)(struct page_read *pr, unsigned long vaddr, int nr, void *buf, unsigned flags);
/* Whether or not pages can be read in PIE code */
bool pieok;
/* Private data of reader */
struct cr_img *pmi;
struct cr_img *pi;
u32 pages_img_id;
PagemapEntry *pe; /* current pagemap we are on */
struct page_read *parent; /* parent pagemap (if ->in_parent pagemap is met in image,
* then go to this guy for page, see read_pagemap_page */
unsigned long cvaddr; /* vaddr we are on */
off_t pi_off; /* current offset in pages file */
struct iovec bunch; /* record consequent neighbour iovecs to punch together */
unsigned id; /* for logging */
unsigned long img_id; /* pagemap image file ID */
PagemapEntry **pmes;
int nr_pmes;
int curr_pme;
struct list_head async;
};
/* flags for ->read_pages */
#define PR_ASYNC 0x1 /* may exit w/o data in the buffer */
#define PR_ASAP 0x2 /* PR_ASYNC, but start the IO right now */
/* flags for open_page_read */
#define PR_SHMEM 0x1
#define PR_TASK 0x2
#define PR_TYPE_MASK 0x3
#define PR_MOD 0x4 /* Will need to modify */
#define PR_REMOTE 0x8
/*
* -1 -- error
* 0 -- no images
* 1 -- opened
*/
extern int open_page_read(unsigned long id, struct page_read *, int pr_flags);
extern int open_page_read_at(int dfd, unsigned long id, struct page_read *pr, int pr_flags);
struct task_restore_args;
int pagemap_enqueue_iovec(struct page_read *pr, void *buf, unsigned long len, struct list_head *to);
int pagemap_render_iovec(struct list_head *from, struct task_restore_args *ta);
/*
* Create a shallow copy of page_read object.
* The new object shares the pagemap structures with the original, but
* maintains its own set of references to those structures.
*/
extern void dup_page_read(struct page_read *src, struct page_read *dst);
extern int dedup_one_iovec(struct page_read *pr, unsigned long base, unsigned long len);
static inline unsigned long pagemap_len(PagemapEntry *pe)
{
return pe->nr_pages * PAGE_SIZE;
}
static inline bool page_read_has_parent(struct page_read *pr)
{
return pr->parent != NULL;
}
/* Pagemap flags */
#define PE_PARENT (1 << 0) /* pages are in parent snapshot */
#define PE_LAZY (1 << 1) /* pages can be lazily restored */
#define PE_PRESENT (1 << 2) /* pages are present in pages*img */
static inline bool pagemap_in_parent(PagemapEntry *pe)
{
return !!(pe->flags & PE_PARENT);
}
static inline bool pagemap_lazy(PagemapEntry *pe)
{
return !!(pe->flags & PE_LAZY);
}
static inline bool pagemap_present(PagemapEntry *pe)
{
return !!(pe->flags & PE_PRESENT);
}
#endif /* __CR_PAGE_READ_H__ */
| 4,385 | 28.635135 | 102 |
h
|
criu
|
criu-master/criu/include/parasite-syscall.h
|
#ifndef __CR_PARASITE_SYSCALL_H__
#define __CR_PARASITE_SYSCALL_H__
#include "pid.h"
#include "common/list.h"
#include "common/config.h"
#include "asm/parasite-syscall.h"
struct parasite_dump_thread;
struct parasite_dump_misc;
struct parasite_drain_fd;
struct vm_area_list;
struct pstree_item;
struct list_head;
struct cr_imgset;
struct fd_opts;
struct pid;
struct parasite_dump_cgroup_args;
struct rt_sigframe;
struct parasite_ctl;
struct parasite_thread_ctl;
extern int parasite_dump_sigacts_seized(struct parasite_ctl *ctl, struct pstree_item *);
extern int parasite_dump_itimers_seized(struct parasite_ctl *ctl, struct pstree_item *);
struct proc_posix_timers_stat;
extern int parasite_dump_posix_timers_seized(struct proc_posix_timers_stat *proc_args, struct parasite_ctl *ctl,
struct pstree_item *);
extern int parasite_dump_misc_seized(struct parasite_ctl *ctl, struct parasite_dump_misc *misc);
extern int parasite_dump_creds(struct parasite_ctl *ctl, CredsEntry *ce);
extern int parasite_dump_thread_leader_seized(struct parasite_ctl *ctl, int pid, CoreEntry *core);
extern int parasite_dump_thread_seized(struct parasite_thread_ctl *tctl, struct parasite_ctl *ctl, int id,
struct pid *tid, CoreEntry *core);
extern int dump_thread_core(int pid, CoreEntry *core, const struct parasite_dump_thread *dt);
extern int parasite_drain_fds_seized(struct parasite_ctl *ctl, struct parasite_drain_fd *dfds, int nr_fds, int off,
int *lfds, struct fd_opts *flags);
extern int parasite_get_proc_fd_seized(struct parasite_ctl *ctl);
extern struct parasite_ctl *parasite_infect_seized(pid_t pid, struct pstree_item *item,
struct vm_area_list *vma_area_list);
extern void parasite_ensure_args_size(unsigned long sz);
extern unsigned long get_exec_start(struct vm_area_list *);
extern int parasite_dump_cgroup(struct parasite_ctl *ctl, struct parasite_dump_cgroup_args *cgroup);
extern struct parasite_tty_args *parasite_dump_tty(struct parasite_ctl *ctl, int fd, int type);
#endif /* __CR_PARASITE_SYSCALL_H__ */
| 2,057 | 38.576923 | 115 |
h
|
criu
|
criu-master/criu/include/parasite-vdso.h
|
#ifndef __CR_PARASITE_VDSO_H__
#define __CR_PARASITE_VDSO_H__
#include "common/config.h"
#include "util-vdso.h"
#include "images/vma.pb-c.h"
struct parasite_ctl;
struct vm_area_list;
/* Check if symbol present in symtable */
static inline bool vdso_symbol_empty(struct vdso_symbol *s)
{
return s->offset == VDSO_BAD_ADDR && s->name[0] == '\0';
}
/*
* Special mark which allows to identify runtime vdso (rt-vdso) where
* calls from proxy (original) vdso are redirected. This mark usually
* placed at the start of vdso area where Elf header lives.
* Since such runtime vdso is solely used by the proxy and
* nobody else is supposed to access it, it's more-less
* safe to screw the Elf header with @signature and
* vvar/vdso addresses for next dumping.
*
* The @orig_addr deserves a few comments. When we redirect the calls
* from the original vdso to runtime vdso, on next checkpoint it won't
* be possible to find original vdso/vvar pair, thus we save their
* addresses in the member.
*
* As on the following dumps we need to drop rt-{vvar,vdso} pair
* from list of VMAs to save in images, we save rt-vvar address also.
*/
struct vdso_mark {
u64 signature;
unsigned long orig_vdso_addr;
unsigned long version;
unsigned long orig_vvar_addr;
unsigned long rt_vvar_addr;
};
#define VDSO_MARK_SIGNATURE_V1 (0x6f73647675697263ULL) /* Magic number (criuvdso) */
#define VDSO_MARK_SIGNATURE_V2 (0x4f53447675697263ULL) /* Magic number (criuvDSO) */
#define VDSO_MARK_SIGNATURE_V3 (0x4f53447655495243ULL) /* Magic number (CRIUvDSO) */
#define VDSO_MARK_CUR_VERSION (3)
static inline void vdso_put_mark(void *where, unsigned long rt_vvar_addr, unsigned long orig_vdso_addr,
unsigned long orig_vvar_addr)
{
struct vdso_mark *m = where;
m->signature = VDSO_MARK_SIGNATURE_V3;
m->orig_vdso_addr = orig_vdso_addr;
m->version = VDSO_MARK_CUR_VERSION;
m->orig_vvar_addr = orig_vvar_addr;
m->rt_vvar_addr = rt_vvar_addr;
}
static inline bool is_vdso_mark(void *addr)
{
struct vdso_mark *m = addr;
switch (m->signature) {
case VDSO_MARK_SIGNATURE_V3:
return true;
/*
* Old formats -- simply extend the mark up
* to the version we support.
*/
case VDSO_MARK_SIGNATURE_V2:
vdso_put_mark(m, VVAR_BAD_ADDR, m->orig_vdso_addr, m->orig_vvar_addr);
return true;
case VDSO_MARK_SIGNATURE_V1:
vdso_put_mark(m, VVAR_BAD_ADDR, m->orig_vdso_addr, VVAR_BAD_ADDR);
return true;
}
return false;
}
extern void vdso_update_gtod_addr(struct vdso_maps *rt);
extern int vdso_do_park(struct vdso_maps *rt, unsigned long park_at, unsigned long park_size);
extern int vdso_map_compat(unsigned long map_at);
extern int vdso_proxify(struct vdso_maps *rt, bool *added_proxy, VmaEntry *vmas, size_t nr_vmas, bool compat_vdso,
bool force_trampolines);
extern int vdso_redirect_calls(unsigned long base_to, unsigned long base_from, struct vdso_symtable *to,
struct vdso_symtable *from, bool compat_vdso);
#endif /* __CR_PARASITE_VDSO_H__ */
| 2,977 | 31.725275 | 114 |
h
|
criu
|
criu-master/criu/include/parasite.h
|
#ifndef __CR_PARASITE_H__
#define __CR_PARASITE_H__
#define PARASITE_MAX_SIZE (64 << 10)
#ifndef __ASSEMBLY__
#include <sys/un.h>
#include <sys/time.h>
#include <time.h>
#include <signal.h>
#include "linux/rseq.h"
#include "image.h"
#include "util-pie.h"
#include "common/lock.h"
#include "infect-rpc.h"
#include "images/vma.pb-c.h"
#include "images/tty.pb-c.h"
#define __head __used __section(.head.text)
enum {
PARASITE_CMD_DUMP_THREAD = PARASITE_USER_CMDS,
PARASITE_CMD_MPROTECT_VMAS,
PARASITE_CMD_DUMPPAGES,
PARASITE_CMD_DUMP_SIGACTS,
PARASITE_CMD_DUMP_ITIMERS,
PARASITE_CMD_DUMP_POSIX_TIMERS,
PARASITE_CMD_DUMP_MISC,
PARASITE_CMD_DRAIN_FDS,
PARASITE_CMD_GET_PROC_FD,
PARASITE_CMD_DUMP_TTY,
PARASITE_CMD_CHECK_VDSO_MARK,
PARASITE_CMD_CHECK_AIOS,
PARASITE_CMD_DUMP_CGROUP,
PARASITE_CMD_MAX,
};
struct parasite_vma_entry {
unsigned long start;
unsigned long len;
int prot;
};
struct parasite_vdso_vma_entry {
unsigned long start;
unsigned long len;
unsigned long orig_vdso_addr;
unsigned long orig_vvar_addr;
unsigned long rt_vvar_addr;
int is_marked;
bool try_fill_symtable;
bool is_vdso;
};
struct parasite_dump_pages_args {
unsigned int nr_vmas;
unsigned int add_prot;
unsigned int off;
unsigned int nr_segs;
unsigned int nr_pages;
};
static inline struct parasite_vma_entry *pargs_vmas(struct parasite_dump_pages_args *a)
{
return (struct parasite_vma_entry *)(a + 1);
}
static inline struct iovec *pargs_iovs(struct parasite_dump_pages_args *a)
{
return (struct iovec *)(pargs_vmas(a) + a->nr_vmas);
}
struct parasite_dump_sa_args {
rt_sigaction_t sas[SIGMAX];
};
struct parasite_dump_itimers_args {
struct itimerval real;
struct itimerval virt;
struct itimerval prof;
};
struct posix_timer {
int it_id;
struct itimerspec val;
int overrun;
};
struct parasite_dump_posix_timers_args {
int timer_n;
struct posix_timer timer[0];
};
struct parasite_aio {
unsigned long ctx;
unsigned int size;
};
struct parasite_check_aios_args {
unsigned nr_rings;
struct parasite_aio ring[0];
};
static inline int posix_timers_dump_size(int timer_n)
{
return sizeof(int) + sizeof(struct posix_timer) * timer_n;
}
/*
* Misc sfuff, that is too small for separate file, but cannot
* be read w/o using parasite
*/
struct parasite_dump_misc {
unsigned long brk;
u32 pid;
u32 sid;
u32 pgid;
u32 umask;
int dumpable;
int thp_disabled;
int child_subreaper;
};
/*
* Calculate how long we can make the groups array in parasite_dump_creds
* and still fit the struct in one page
*/
#define PARASITE_MAX_GROUPS \
((PAGE_SIZE - sizeof(struct parasite_dump_thread) - offsetof(struct parasite_dump_creds, groups)) / \
sizeof(unsigned int)) /* groups */
struct parasite_dump_creds {
unsigned int cap_last_cap;
u32 cap_inh[CR_CAP_SIZE];
u32 cap_prm[CR_CAP_SIZE];
u32 cap_eff[CR_CAP_SIZE];
u32 cap_bnd[CR_CAP_SIZE];
int uids[4];
int gids[4];
unsigned int secbits;
unsigned int ngroups;
/*
* FIXME -- this structure is passed to parasite code
* through parasite args area so in parasite_dump_creds()
* call we check for size of this data fits the size of
* the area. Unfortunately, we _actually_ use more bytes
* than the sizeof() -- we put PARASITE_MAX_GROUPS int-s
* in there, so the size check is not correct.
*
* However, all this works simply because we make sure
* the PARASITE_MAX_GROUPS is so, that the total amount
* of memory in use doesn't exceed the PAGE_SIZE and the
* args area is at least one page (PARASITE_ARG_SIZE_MIN).
*/
unsigned int groups[0];
};
struct parasite_check_rseq {
bool has_rseq;
bool has_ptrace_get_rseq_conf; /* no need to check if supported */
bool rseq_inited;
};
struct parasite_dump_thread {
unsigned int *tid_addr;
pid_t tid;
tls_t tls;
struct parasite_check_rseq rseq;
stack_t sas;
int pdeath_sig;
char comm[TASK_COMM_LEN];
struct parasite_dump_creds creds[0];
};
static inline void copy_sas(ThreadSasEntry *dst, const stack_t *src)
{
dst->ss_sp = encode_pointer(src->ss_sp);
dst->ss_size = (u64)src->ss_size;
dst->ss_flags = src->ss_flags;
}
/*
* How many descriptors can be transferred from parasite:
*
* 1) struct parasite_drain_fd + all descriptors should fit into one page
* 2) The value should be a multiple of CR_SCM_MAX_FD, because descriptors
* are transferred with help of send_fds and recv_fds.
* 3) criu should work with a default value of the file limit (1024)
*/
#define PARASITE_MAX_FDS CR_SCM_MAX_FD * 3
struct parasite_drain_fd {
int nr_fds;
int fds[0];
};
struct fd_opts {
char flags;
struct {
uint32_t uid;
uint32_t euid;
uint32_t signum;
uint32_t pid_type;
uint32_t pid;
} fown;
};
static inline int drain_fds_size(struct parasite_drain_fd *dfds)
{
int nr_fds = min((int)PARASITE_MAX_FDS, dfds->nr_fds);
return sizeof(*dfds) + nr_fds * (sizeof(dfds->fds[0]) + sizeof(struct fd_opts));
}
struct parasite_tty_args {
int fd;
int type;
int sid;
int pgrp;
bool hangup;
int st_pckt;
int st_lock;
int st_excl;
};
struct parasite_dump_cgroup_args {
/*
* 4K should be enough for most cases.
*
* The string is null terminated.
*/
char contents[(1 << 12) - 32];
/*
* Contains the path to thread cgroup procfs.
* "self/task/<tid>/cgroup"
*/
char thread_cgrp[32];
};
#endif /* !__ASSEMBLY__ */
#endif /* __CR_PARASITE_H__ */
| 5,428 | 20.290196 | 109 |
h
|
criu
|
criu-master/criu/include/path.h
|
#ifndef __CR_PATH_H__
#define __CR_PATH_H__
#include "namespaces.h"
#include "pstree.h"
/* Absolute paths are used on dump and relative paths are used on restore */
static inline int is_root(char *p)
{
return (!strcmp(p, "/"));
}
/* True for the root mount (the topmost one) */
static inline int is_root_mount(struct mount_info *mi)
{
return mi->parent == NULL && mi->nsid->id == root_item->ids->mnt_ns_id;
}
/*
* True if the mountpoint target is root on its FS.
*
* This is used to determine whether we need to postpone
* mounting. E.g. one can bind mount some subdir from a
* disk, and in this case we'll have to get the root disk
* mount first, then bind-mount it. See do_mount_one().
*/
static inline int fsroot_mounted(struct mount_info *mi)
{
return is_root(mi->root);
}
char *cut_root_for_bind(char *target_root, char *source_root);
/*
* Get a mount point for a sibling of m if m->parent and p are in the same
* shared group.
*/
char *mnt_get_sibling_path(struct mount_info *m, struct mount_info *p, char *buf, int len);
#endif
| 1,055 | 24.756098 | 91 |
h
|
criu
|
criu-master/criu/include/pid.h
|
#ifndef __CR_PID_H__
#define __CR_PID_H__
#include <compel/task-state.h>
#include "stdbool.h"
#include "rbtree.h"
/*
* Task states, used in e.g. struct pid's state.
*/
enum __criu_task_state {
/* Values shared with compel */
TASK_ALIVE = COMPEL_TASK_ALIVE,
TASK_DEAD = COMPEL_TASK_DEAD,
TASK_STOPPED = COMPEL_TASK_STOPPED,
TASK_ZOMBIE = COMPEL_TASK_ZOMBIE,
/* Own internal states */
TASK_HELPER = COMPEL_TASK_MAX + 1,
TASK_THREAD,
/* new values are to be added before this line */
TASK_UNDEF = 0xff
};
struct pid {
struct pstree_item *item;
/*
* The @real pid is used to fetch tasks during dumping stage,
* This is a global pid seen from the context where the dumping
* is running.
*/
pid_t real;
int state; /* TASK_XXX constants */
/* If an item is in stopped state it has a signal number
* that caused task to stop.
*/
int stop_signo;
/*
* The @virt pid is one which used in the image itself and keeps
* the pid value to be restored. This pid fetched from the
* dumpee context, because the dumpee might have own pid namespace.
*/
struct {
pid_t virt;
struct rb_node node;
} ns[1]; /* Must be at the end of struct pid */
};
/*
* When we have to restore a shared resource, we mush select which
* task should do it, and make other(s) wait for it. In order to
* avoid deadlocks, always make task with lower pid be the restorer.
*/
static inline bool pid_rst_prio(unsigned pid_a, unsigned pid_b)
{
return pid_a < pid_b;
}
static inline bool pid_rst_prio_eq(unsigned pid_a, unsigned pid_b)
{
return pid_a <= pid_b;
}
#endif /* __CR_PID_H__ */
| 1,598 | 23.227273 | 68 |
h
|
criu
|
criu-master/criu/include/pipes.h
|
#ifndef __CR_PIPES_H__
#define __CR_PIPES_H__
#include "images/pipe-data.pb-c.h"
#include "images/pipe.pb-c.h"
extern struct collect_image_info pipe_cinfo;
extern struct collect_image_info pipe_data_cinfo;
extern const struct fdtype_ops pipe_dump_ops;
static inline u32 pipe_id(const struct fd_parms *p)
{
return p->stat.st_ino;
}
#define NR_PIPES_WITH_DATA 1024
struct pipe_data_dump {
int img_type;
unsigned int nr;
u32 ids[NR_PIPES_WITH_DATA];
};
extern int dump_one_pipe_data(struct pipe_data_dump *pd, int lfd, const struct fd_parms *p);
struct pipe_data_rst {
PipeDataEntry *pde;
void *data;
struct pipe_data_rst *next;
};
#define PIPE_DATA_HASH_BITS 5
#define PIPE_DATA_HASH_SIZE (1 << PIPE_DATA_HASH_BITS)
#define PIPE_DATA_HASH_MASK (PIPE_DATA_HASH_SIZE - 1)
extern int do_collect_pipe_data(struct pipe_data_rst *, ProtobufCMessage *, struct cr_img *,
struct pipe_data_rst **hash);
extern int restore_pipe_data(int img_type, int pfd, u32 id, struct pipe_data_rst **hash);
/*
* The sequence of objects which should be restored:
* pipe -> files struct-s -> fd-s.
* pipe_entry describes pipe's file structs-s.
* A pipe doesn't have own properties, so it has no object.
*/
#include "images/pipe.pb-c.h"
struct pipe_info {
PipeEntry *pe;
struct list_head pipe_list; /* All pipe_info with the same pipe_id
* This is pure circular list without head */
struct list_head list; /* global list of pipes */
struct file_desc d;
unsigned int create : 1, reopen : 1;
};
extern int collect_one_pipe_ops(void *o, ProtobufCMessage *base, struct file_desc_ops *ops);
extern int open_pipe(struct file_desc *d, int *new_fd);
#endif /* __CR_PIPES_H__ */
| 1,690 | 26.274194 | 92 |
h
|
criu
|
criu-master/criu/include/plugin.h
|
#ifndef __CR_PLUGIN_H__
#define __CR_PLUGIN_H__
#include "criu-plugin.h"
#include "common/compiler.h"
#include "common/list.h"
#ifndef CR_PLUGIN_DEFAULT
#define CR_PLUGIN_DEFAULT "/usr/lib/criu/"
#endif
void cr_plugin_fini(int stage, int err);
int cr_plugin_init(int stage);
typedef struct {
struct list_head head;
struct list_head hook_chain[CR_PLUGIN_HOOK__MAX];
} cr_plugin_ctl_t;
extern cr_plugin_ctl_t cr_plugin_ctl;
typedef struct {
cr_plugin_desc_t *d;
struct list_head list;
void *dlhandle;
struct list_head link[CR_PLUGIN_HOOK__MAX];
} plugin_desc_t;
#define run_plugins(__hook, ...) \
({ \
plugin_desc_t *this; \
int __ret = -ENOTSUP; \
\
list_for_each_entry(this, &cr_plugin_ctl.hook_chain[CR_PLUGIN_HOOK__##__hook], \
link[CR_PLUGIN_HOOK__##__hook]) { \
pr_debug("plugin: `%s' hook %u -> %p\n", this->d->name, CR_PLUGIN_HOOK__##__hook, \
this->d->hooks[CR_PLUGIN_HOOK__##__hook]); \
__ret = ((CR_PLUGIN_HOOK__##__hook##_t *)this->d->hooks[CR_PLUGIN_HOOK__##__hook])( \
__VA_ARGS__); \
if (__ret == -ENOTSUP) \
continue; \
break; \
} \
__ret; \
})
#endif
| 2,072 | 42.1875 | 109 |
h
|
criu
|
criu-master/criu/include/proc_parse.h
|
#ifndef __CR_PROC_PARSE_H__
#define __CR_PROC_PARSE_H__
#include <sys/types.h>
#include "compel/infect.h"
#define PROC_TASK_COMM_LEN 32
#define PROC_TASK_COMM_LEN_FMT "(%31s"
struct proc_pid_stat {
int pid;
char comm[PROC_TASK_COMM_LEN];
char state;
int ppid;
int pgid;
int sid;
int tty_nr;
int tty_pgrp;
unsigned int flags;
unsigned long min_flt;
unsigned long cmin_flt;
unsigned long maj_flt;
unsigned long cmaj_flt;
unsigned long utime;
unsigned long stime;
long cutime;
long cstime;
long priority;
long nice;
int num_threads;
int zero0;
unsigned long long start_time;
unsigned long vsize;
long mm_rss;
unsigned long rsslim;
unsigned long start_code;
unsigned long end_code;
unsigned long start_stack;
unsigned long esp;
unsigned long eip;
unsigned long sig_pending;
unsigned long sig_blocked;
unsigned long sig_ignored;
unsigned long sig_handled;
unsigned long wchan;
unsigned long zero1;
unsigned long zero2;
int exit_signal;
int task_cpu;
unsigned int rt_priority;
unsigned int policy;
unsigned long long delayacct_blkio_ticks;
unsigned long gtime;
long cgtime;
unsigned long start_data;
unsigned long end_data;
unsigned long start_brk;
unsigned long arg_start;
unsigned long arg_end;
unsigned long env_start;
unsigned long env_end;
int exit_code;
};
#define PROC_CAP_SIZE 2
struct proc_status_creds {
struct seize_task_status s;
unsigned int uids[4];
unsigned int gids[4];
u32 last_filter;
/*
* Keep them at the end of structure
* for fast comparison reason.
*/
u32 cap_inh[PROC_CAP_SIZE];
u32 cap_prm[PROC_CAP_SIZE];
u32 cap_eff[PROC_CAP_SIZE];
u32 cap_bnd[PROC_CAP_SIZE];
};
#define INVALID_UID ((uid_t)-1)
extern int parse_pid_stat(pid_t pid, struct proc_pid_stat *s);
extern unsigned int parse_pid_loginuid(pid_t pid, int *err, bool ignore_noent);
extern int parse_pid_oom_score_adj(pid_t pid, int *err);
extern int prepare_loginuid(unsigned int value);
extern int parse_pid_status(pid_t pid, struct seize_task_status *, void *data);
extern int parse_file_locks(void);
extern int get_fd_mntid(int fd, int *mnt_id);
struct pid;
extern int parse_threads(int pid, struct pid **_t, int *_n);
int parse_children(pid_t pid, pid_t **_c, int *_n);
extern bool is_vma_range_fmt(char *line);
extern void parse_vmflags(char *buf, u32 *flags, u64 *madv, int *io_pf);
extern int parse_uptime(uint64_t *upt);
extern int parse_timens_offsets(struct timespec *boff, struct timespec *moff);
#endif /* __CR_PROC_PARSE_H__ */
| 2,505 | 22.203704 | 79 |
h
|
criu
|
criu-master/criu/include/protobuf.h
|
#ifndef __CR_PROTOBUF_H__
#define __CR_PROTOBUF_H__
#include <stdbool.h>
#include "protobuf-desc.h"
#include "common/compiler.h"
#include "util.h"
struct cr_img;
extern int do_pb_read_one(struct cr_img *, void **objp, int type, bool eof);
#define pb_read_one(fd, objp, type) do_pb_read_one(fd, (void **)objp, type, false)
#define pb_read_one_eof(fd, objp, type) do_pb_read_one(fd, (void **)objp, type, true)
extern int pb_write_one(struct cr_img *, void *obj, int type);
#define pb_pksize(__obj, __proto_message_name) (__proto_message_name##__get_packed_size(__obj) + sizeof(u32))
#define pb_repeated_size(__obj, __member) ((size_t)(sizeof(*(__obj)->__member) * (__obj)->n_##__member))
#define pb_msg(__base, __type) container_of(__base, __type, base)
#include <google/protobuf-c/protobuf-c.h>
struct collect_image_info {
int fd_type;
int pb_type;
unsigned int priv_size;
int (*collect)(void *, ProtobufCMessage *, struct cr_img *);
unsigned flags;
};
#define COLLECT_SHARED 0x1 /* use shared memory for obj-s */
#define COLLECT_NOFREE 0x2 /* don't free entry after callback */
#define COLLECT_HAPPENED 0x4 /* image was opened and collected */
extern int collect_image(struct collect_image_info *);
extern int collect_entry(ProtobufCMessage *base, struct collect_image_info *cinfo);
static inline int collect_images(struct collect_image_info **array, unsigned size)
{
int i;
for (i = 0; i < size; i++) {
if (collect_image(array[i]))
return -1;
}
return 0;
}
/*
* To speed up reading of packed objects
* by providing space on stack, this should
* be more than enough for most objects.
*/
#define PB_PKOBJ_LOCAL_SIZE 1024
#endif /* __CR_PROTOBUF_H__ */
| 1,687 | 27.133333 | 109 |
h
|
criu
|
criu-master/criu/include/pstree.h
|
#ifndef __CR_PSTREE_H__
#define __CR_PSTREE_H__
#include "common/list.h"
#include "common/lock.h"
#include "pid.h"
#include "xmalloc.h"
#include "images/core.pb-c.h"
/*
* That's the init process which usually inherit
* all orphaned children in the system.
*/
#define INIT_PID (1)
struct pstree_item {
struct pstree_item *parent;
struct list_head children; /* list of my children */
struct list_head sibling; /* linkage in my parent's children list */
struct pid *pid;
pid_t pgid;
pid_t sid;
pid_t born_sid;
int nr_threads; /* number of threads */
struct pid *threads; /* array of threads */
CoreEntry **core;
TaskKobjIdsEntry *ids;
union {
futex_t task_st;
unsigned long task_st_le_bits;
};
};
static inline pid_t vpid(const struct pstree_item *i)
{
return i->pid->ns[0].virt;
}
enum {
FDS_EVENT_BIT = 0,
};
#define FDS_EVENT (1 << FDS_EVENT_BIT)
extern struct pstree_item *current;
struct rst_info;
/* See alloc_pstree_item() for details */
static inline struct rst_info *rsti(struct pstree_item *i)
{
return (struct rst_info *)(i + 1);
}
struct thread_lsm {
char *profile;
char *sockcreate;
};
struct ns_id;
struct dmp_info {
struct ns_id *netns;
struct page_pipe *mem_pp;
struct parasite_ctl *parasite_ctl;
struct parasite_thread_ctl **thread_ctls;
uint64_t *thread_sp;
struct criu_rseq_cs *thread_rseq_cs;
/*
* Although we don't support dumping different struct creds in general,
* we do for threads. Let's keep track of their profiles here; a NULL
* entry means there was no LSM profile for this thread.
*/
struct thread_lsm **thread_lsms;
};
static inline struct dmp_info *dmpi(const struct pstree_item *i)
{
return (struct dmp_info *)(i + 1);
}
/* ids is allocated and initialized for all alive tasks */
static inline int shared_fdtable(struct pstree_item *item)
{
return (item->parent && item->ids->files_id == item->parent->ids->files_id);
}
static inline bool is_alive_state(int state)
{
return (state == TASK_ALIVE) || (state == TASK_STOPPED);
}
static inline bool task_alive(struct pstree_item *i)
{
return is_alive_state(i->pid->state);
}
extern void free_pstree(struct pstree_item *root_item);
extern struct pstree_item *__alloc_pstree_item(bool rst);
#define alloc_pstree_item() __alloc_pstree_item(false)
extern int init_pstree_helper(struct pstree_item *ret);
extern struct pstree_item *lookup_create_item(pid_t pid);
extern void pstree_insert_pid(struct pid *pid_node);
extern struct pid *pstree_pid_by_virt(pid_t pid);
extern struct pstree_item *root_item;
extern struct pstree_item *pstree_item_next(struct pstree_item *item);
#define for_each_pstree_item(pi) for (pi = root_item; pi != NULL; pi = pstree_item_next(pi))
extern bool restore_before_setsid(struct pstree_item *child);
extern int prepare_pstree(void);
extern int prepare_dummy_pstree(void);
extern int dump_pstree(struct pstree_item *root_item);
struct pstree_item *pstree_item_by_real(pid_t virt);
struct pstree_item *pstree_item_by_virt(pid_t virt);
extern int pid_to_virt(pid_t pid);
struct task_entries;
extern struct task_entries *task_entries;
extern int prepare_task_entries(void);
extern int prepare_dummy_task_state(struct pstree_item *pi);
extern int get_task_ids(struct pstree_item *);
extern TaskKobjIdsEntry *root_ids;
extern void core_entry_free(CoreEntry *core);
extern CoreEntry *core_entry_alloc(int alloc_thread_info, int alloc_tc);
extern int pstree_alloc_cores(struct pstree_item *item);
extern void pstree_free_cores(struct pstree_item *item);
extern int collect_pstree_ids(void);
extern int preorder_pstree_traversal(struct pstree_item *item, int (*f)(struct pstree_item *));
#endif /* __CR_PSTREE_H__ */
| 3,688 | 25.731884 | 95 |
h
|
criu
|
criu-master/criu/include/rbtree.h
|
/*
* RBtree implementation adopted from the Linux kernel sources.
*/
#ifndef __CR_RBTREE_H__
#define __CR_RBTREE_H__
#include <stddef.h>
#include "common/compiler.h"
#define RB_RED 0
#define RB_BLACK 1
#define RB_MASK 3
struct rb_node {
unsigned long rb_parent_color; /* Keeps both parent anc color */
struct rb_node *rb_right;
struct rb_node *rb_left;
} __aligned(sizeof(long));
struct rb_root {
struct rb_node *rb_node;
};
#define rb_parent(r) ((struct rb_node *)((r)->rb_parent_color & ~RB_MASK))
#define rb_color(r) ((r)->rb_parent_color & RB_BLACK)
#define rb_is_red(r) (!rb_color(r))
#define rb_is_black(r) (rb_color(r))
#define rb_set_red(r) \
do { \
(r)->rb_parent_color &= ~RB_BLACK; \
} while (0)
#define rb_set_black(r) \
do { \
(r)->rb_parent_color |= RB_BLACK; \
} while (0)
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
{
rb->rb_parent_color = (rb->rb_parent_color & RB_MASK) | (unsigned long)p;
}
static inline void rb_set_color(struct rb_node *rb, int color)
{
rb->rb_parent_color = (rb->rb_parent_color & ~RB_BLACK) | color;
}
#define RB_ROOT \
(struct rb_root) \
{ \
NULL, \
}
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
#define RB_EMPTY_NODE(node) (rb_parent(node) == node)
#define RB_CLEAR_NODE(node) (rb_set_parent(node, node))
static inline void rb_init_node(struct rb_node *node)
{
*node = (struct rb_node){};
RB_CLEAR_NODE(node);
}
extern void rb_insert_color(struct rb_node *node, struct rb_root *root);
extern void rb_erase(struct rb_node *node, struct rb_root *root);
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_first(const struct rb_root *root);
extern struct rb_node *rb_last(const struct rb_root *root);
extern struct rb_node *rb_next(const struct rb_node *node);
extern struct rb_node *rb_prev(const struct rb_node *node);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, struct rb_node **rb_link)
{
node->rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
*rb_link = node;
}
static inline void rb_link_and_balance(struct rb_root *root, struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
{
rb_link_node(node, parent, rb_link);
rb_insert_color(node, root);
}
#endif /* __CR_RBTREE_H__ */
| 2,736 | 27.810526 | 106 |
h
|
criu
|
criu-master/criu/include/restorer.h
|
#ifndef __CR_RESTORER_H__
#define __CR_RESTORER_H__
#include <signal.h>
#include <limits.h>
#include <sys/resource.h>
#include <linux/filter.h>
#include "common/config.h"
#include "types.h"
#include "int.h"
#include "types.h"
#include "common/compiler.h"
#include <compel/asm/fpu.h>
#include "common/lock.h"
#include "util.h"
#include "asm/restorer.h"
#include "posix-timer.h"
#include "timerfd.h"
#include "shmem.h"
#include "parasite-vdso.h"
#include "fault-injection.h"
#include <time.h>
#include "images/mm.pb-c.h"
/*
* These *must* be power of two values.
*/
#define RESTORE_ARGS_SIZE (512)
#define RESTORE_STACK_REDZONE (128)
#define RESTORE_STACK_SIZE (KILO(32))
struct restore_mem_zone {
u8 redzone[RESTORE_STACK_REDZONE];
u8 stack[RESTORE_STACK_SIZE];
u8 rt_sigframe[RESTORE_STACK_SIGFRAME];
} __stack_aligned__;
struct rst_sched_param {
int policy;
int nice;
int prio;
};
struct rst_rseq_param {
u64 rseq_abi_pointer;
u32 rseq_abi_size;
u32 signature;
};
struct restore_posix_timer {
struct str_posix_timer spt;
struct itimerspec val;
int overrun;
};
/*
* We should be able to construct fpu sigframe in sigreturn_prep_fpu_frame,
* so the mem_zone.rt_sigframe should be 64-bytes aligned. To make things
* simpler, force both _args alignment be 64 bytes.
*/
struct thread_creds_args {
CredsEntry creds;
unsigned int cap_last_cap;
u32 cap_inh[CR_CAP_SIZE];
u32 cap_prm[CR_CAP_SIZE];
u32 cap_eff[CR_CAP_SIZE];
u32 cap_bnd[CR_CAP_SIZE];
unsigned int secbits;
char *lsm_profile;
unsigned int *groups;
char *lsm_sockcreate;
unsigned long mem_lsm_profile_pos;
unsigned long mem_lsm_sockcreate_pos;
unsigned long mem_groups_pos;
unsigned long mem_pos_next;
};
struct thread_seccomp_filter {
struct sock_fprog sock_fprog;
unsigned int flags;
};
struct thread_restore_args {
struct restore_mem_zone *mz;
int pid;
UserRegsEntry gpregs;
u64 clear_tid_addr;
u64 futex_rla;
u32 futex_rla_len;
struct rst_sched_param sp;
struct task_restore_args *ta;
tls_t tls;
struct rst_rseq_param rseq;
siginfo_t *siginfo;
unsigned int siginfo_n;
int pdeath_sig;
struct thread_creds_args *creds_args;
int seccomp_mode;
unsigned long seccomp_filters_pos;
struct thread_seccomp_filter *seccomp_filters;
void *seccomp_filters_data;
unsigned int seccomp_filters_n;
bool seccomp_force_tsync;
char comm[TASK_COMM_LEN];
int cg_set;
int cgroupd_sk;
} __aligned(64);
typedef long (*thread_restore_fcall_t)(struct thread_restore_args *args);
struct restore_vma_io {
int nr_iovs;
loff_t off;
struct iovec iovs[0];
};
#define RIO_SIZE(niovs) (sizeof(struct restore_vma_io) + (niovs) * sizeof(struct iovec))
struct task_restore_args {
struct thread_restore_args *t; /* thread group leader */
int fd_exe_link; /* opened self->exe file */
int logfd;
unsigned int loglevel;
struct timeval logstart;
int uffd;
bool has_thp_enabled;
/* threads restoration */
int nr_threads; /* number of threads */
thread_restore_fcall_t clone_restore_fn; /* helper address for clone() call */
struct thread_restore_args *thread_args; /* array of thread arguments */
struct task_entries *task_entries;
void *rst_mem;
unsigned long rst_mem_size;
/* Below arrays get remapped from RM_PRIVATE in sigreturn_restore */
VmaEntry *vmas;
unsigned int vmas_n;
int vma_ios_fd;
struct restore_vma_io *vma_ios;
unsigned int vma_ios_n;
struct restore_posix_timer *posix_timers;
unsigned int posix_timers_n;
struct restore_timerfd *timerfd;
unsigned int timerfd_n;
siginfo_t *siginfo;
unsigned int siginfo_n;
struct rst_tcp_sock *tcp_socks;
unsigned int tcp_socks_n;
struct rst_aio_ring *rings;
unsigned int rings_n;
struct rlimit64 *rlims;
unsigned int rlims_n;
pid_t *helpers /* the TASK_HELPERS to wait on at the end of restore */;
unsigned int helpers_n;
pid_t *zombies;
unsigned int zombies_n;
int *inotify_fds; /* fds to cleanup inotify events at CR_STATE_RESTORE_SIGCHLD stage */
unsigned int inotify_fds_n;
/* * * * * * * * * * * * * * * * * * * * */
unsigned long task_size;
unsigned long premmapped_addr;
unsigned long premmapped_len;
rt_sigaction_t sigchld_act;
void *bootstrap_start;
unsigned long bootstrap_len;
struct itimerval itimers[3];
MmEntry mm;
auxv_t mm_saved_auxv[AT_VECTOR_SIZE];
u32 mm_saved_auxv_size;
char comm[TASK_COMM_LEN];
/*
* proc_fd is a handle to /proc that the restorer blob can use to open
* files there, because some of them can't be opened before the
* restorer blob is called.
*/
int proc_fd;
int seccomp_mode;
bool compatible_mode;
bool can_map_vdso;
bool auto_dedup;
unsigned long vdso_rt_size;
struct vdso_maps vdso_maps_rt; /* runtime vdso symbols */
unsigned long vdso_rt_parked_at; /* safe place to keep vdso */
void **breakpoint;
enum faults fault_strategy;
#ifdef ARCH_HAS_LONG_PAGES
unsigned page_size;
#endif
int lsm_type;
int child_subreaper;
bool has_clone3_set_tid;
/*
* info about rseq from libc used to
* unregister it before memory restoration procedure
*/
struct rst_rseq_param libc_rseq;
uid_t uid;
u32 cap_eff[CR_CAP_SIZE];
} __aligned(64);
/*
* For arm64 stack needs to aligned to 16 bytes.
* Hence align to 16 bytes for all
*/
#define RESTORE_ALIGN_STACK(start, size) (ALIGN((start) + (size)-16, 16))
static inline unsigned long restorer_stack(struct restore_mem_zone *mz)
{
return RESTORE_ALIGN_STACK((long)&mz->stack, RESTORE_STACK_SIZE);
}
enum {
/*
* Restore stages. The stage is started by criu process, then
* confirmed by all tasks involved in it. Then criu does some
* actions and starts the next stage.
*
* The first stated stage is CR_STATE_ROOT_TASK which is started
* right before calling fork_with_pid() for the root_item.
*/
CR_STATE_FAIL = -1,
/*
* Root task is created and does some pre-checks.
* After the stage ACT_SETUP_NS scripts are performed.
*/
CR_STATE_ROOT_TASK = 0,
/*
* The prepare_namespace() is called.
* After the stage criu opens root task's mntns and
* calls ACT_POST_SETUP_NS scripts.
*/
CR_STATE_PREPARE_NAMESPACES,
/*
* All tasks fork and call open_transport_socket().
* Stage is needed to make sure they all have the socket.
* Also this stage is a sync point after which the
* fini_restore_mntns() can be called.
*
* This stage is a little bit special. Normally all stages
* are controlled by criu process, but when this stage
* starts criu process starts waiting for the tasks to
* finish it, but by the time it gets woken up the stage
* finished is CR_STATE_RESTORE. The forking stage is
* barrier-ed by the root task, this task is also the one
* that switches the stage (into restoring).
*
* The above is done to lower the amount of context
* switches from root task to criu and back, since the
* separate forking stage is not needed by criu, it's
* purely to make sure all tasks be in sync.
*/
CR_STATE_FORKING,
/*
* Main restore stage. By the end of it all tasks are
* almost ready and what's left is:
* pick up zombies and helpers
* restore sigchild handlers used to detect restore errors
* restore credentials, seccomp, dumpable and pdeath_sig
*/
CR_STATE_RESTORE,
/*
* Tasks restore sigchild handlers.
* Stage is needed to synchronize the change in error
* propagation via sigchild.
*/
CR_STATE_RESTORE_SIGCHLD,
/*
* Final stage.
* For security reason processes can be resumed only when all
* credentials are restored. Otherwise someone can attach to a
* process, which are not restored credentials yet and execute
* some code.
* Seccomp needs to be restored after creds.
* Dumpable and pdeath signal are restored after seccomp.
*/
CR_STATE_RESTORE_CREDS,
CR_STATE_COMPLETE
};
#define restore_finish_stage(__v, __stage) \
({ \
futex_dec_and_wake(&(__v)->nr_in_progress); \
futex_wait_while(&(__v)->start, __stage); \
(s32) futex_get(&(__v)->start); \
})
#define __r_sym(name) restorer_sym##name
#define restorer_sym(rblob, name) (void *)(rblob + __r_sym(name))
#endif /* __CR_RESTORER_H__ */
| 8,164 | 23.373134 | 88 |
h
|
criu
|
criu-master/criu/include/rst-malloc.h
|
#ifndef __CR_RST_MALLOC__H__
#define __CR_RST_MALLOC__H__
/*
* On restore we need differetn types of memory allocation.
* Here's an engine that tries to generalize them all. The
* main difference is in how the buffer with objects is being
* grown up.
*
* Buffers, that are to be used by restorer will be remapped
* into restorer address space with rst_mem_remap() call. Thus
* we have to either keep track of all the buffers and objects,
* or keep objects one-by-one in a plain linear buffer. The
* engine uses the 2nd approach.
*/
enum {
/*
* Shared non-remapable allocations. These can happen only
* in "global" context, i.e. when objects are allocated to
* be used by any process to be restored. The objects are
* not going to be used in restorer blob, thus allocation
* engine grows buffers in a simple manner.
*/
RM_SHARED,
/*
* Shared objects, that are about to be used in restorer
* blob. For these the *_remap_* stuff below is used to get
* the actual pointer on any object. Growing a buffer is
* done with mremap, so that we don't have to keep track
* of all the buffer chunks and can remap them in restorer
* in one call.
*/
RM_SHREMAP,
/*
* Privately used objects. Buffer grow and remap is the
* same as for SHREMAP, but memory regions are MAP_PRIVATE.
*/
RM_PRIVATE,
RST_MEM_TYPES,
};
/*
* Disables SHARED and SHREMAP allocations, turns on PRIVATE
*/
extern void rst_mem_switch_to_private(void);
/*
* Reports a cookie of a current shared buffer position, that
* can later be used in rst_mem_remap_ptr() to find out the object
* pointer in the restorer blob.
*/
extern unsigned long rst_mem_align_cpos(int type);
extern void *rst_mem_remap_ptr(unsigned long pos, int type);
#define RST_MEM_FIXUP_PPTR(ptr) \
do { \
ptr = rst_mem_remap_ptr((unsigned long)ptr, RM_PRIVATE); \
} while (0)
/*
* Allocate and free objects. We don't need to free arbitrary
* object, thus allocation is simple (linear) and only the
* last object can be freed (pop-ed from buffer).
*/
extern void *rst_mem_alloc(unsigned long size, int type);
extern void rst_mem_free_last(int type);
/* Word-align the current freelist pointer for the next allocation. If we don't
* align pointers, some futex and atomic operations can fail.
*/
extern void rst_mem_align(int type);
/*
* Routines to remap SHREMAP and PRIVATE into restorer address space
*/
extern unsigned long rst_mem_lock(void);
extern int rst_mem_remap(void *to);
extern void *shmalloc(size_t bytes);
extern void shfree_last(void *ptr);
#endif /* __CR_RST_MALLOC__H__ */
| 2,687 | 31.385542 | 79 |
h
|
criu
|
criu-master/criu/include/sched.h
|
#ifndef __CR_SCHED_H__
#define __CR_SCHED_H__
#include <linux/types.h>
#ifndef ptr_to_u64
#define ptr_to_u64(ptr) ((__u64)((uintptr_t)(ptr)))
#endif
#ifndef u64_to_ptr
#define u64_to_ptr(x) ((void *)(uintptr_t)x)
#endif
/*
* This structure is needed by clone3(). The kernel
* calls it 'struct clone_args'. As CRIU will always
* need at least this part of the structure (VER1)
* to be able to test if clone3() with set_tid works,
* the structure is defined here as 'struct _clone_args'.
*/
struct _clone_args {
__aligned_u64 flags;
__aligned_u64 pidfd;
__aligned_u64 child_tid;
__aligned_u64 parent_tid;
__aligned_u64 exit_signal;
__aligned_u64 stack;
__aligned_u64 stack_size;
__aligned_u64 tls;
__aligned_u64 set_tid;
__aligned_u64 set_tid_size;
};
#endif /* __CR_SCHED_H__ */
| 798 | 22.5 | 57 |
h
|
criu
|
criu-master/criu/include/seccomp.h
|
#ifndef __CR_SECCOMP_H__
#define __CR_SECCOMP_H__
#include <linux/seccomp.h>
#include <linux/filter.h>
#include "images/seccomp.pb-c.h"
#include "images/core.pb-c.h"
#ifndef SECCOMP_MODE_DISABLED
#define SECCOMP_MODE_DISABLED 0
#endif
#ifndef SECCOMP_MODE_STRICT
#define SECCOMP_MODE_STRICT 1
#endif
#ifndef SECCOMP_MODE_FILTER
#define SECCOMP_MODE_FILTER 2
#endif
#ifndef SECCOMP_SET_MODE_FILTER
#define SECCOMP_SET_MODE_FILTER 1
#endif
#ifndef SECCOMP_FILTER_FLAG_TSYNC
#define SECCOMP_FILTER_FLAG_TSYNC 1
#endif
struct thread_restore_args;
struct task_restore_args;
struct pstree_item;
struct rb_node;
/*
* seccomp filters are bound to @current->seccomp.filter
* in the kernel, ie they are per thread structures.
*
* If filter is assigned then every subsequent call
* to fork() makes a copy of this @current->seccomp.filter
* pointer into child process.
*
* The thread group can share a filter if the filter
* is assigned with SECCOMP_FILTER_FLAG_TSYNC on group
* which has no filters yet.
*/
struct seccomp_filter_chain {
struct seccomp_filter_chain *prev;
SeccompFilter filter;
};
struct seccomp_entry {
struct rb_node node;
struct seccomp_entry *next;
pid_t tid_real;
size_t img_filter_pos;
unsigned int mode;
struct seccomp_filter_chain *chain;
size_t nr_chains;
};
extern struct seccomp_entry *seccomp_lookup(pid_t tid_real, bool create, bool mandatory);
#define seccomp_find_entry(tid_real) seccomp_lookup(tid_real, false, true)
extern int seccomp_collect_entry(pid_t tid_real, unsigned int mode);
extern void seccomp_free_entries(void);
extern int seccomp_dump_thread(pid_t tid_real, ThreadCoreEntry *thread_core);
extern int seccomp_collect_dump_filters(void);
extern int seccomp_read_image(void);
extern int seccomp_prepare_threads(struct pstree_item *item, struct task_restore_args *ta);
extern void seccomp_rst_reloc(struct thread_restore_args *thread_arg);
#endif
| 1,913 | 24.52 | 91 |
h
|
criu
|
criu-master/criu/include/servicefd.h
|
#ifndef __CR_SERVICE_FD_H__
#define __CR_SERVICE_FD_H__
#include <stdio.h>
#include <stdbool.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include "criu-log.h"
enum sfd_type {
SERVICE_FD_MIN,
LOG_FD_OFF,
IMG_FD_OFF,
IMG_STREAMER_FD_OFF,
PROC_FD_OFF, /* fd with /proc for all proc_ calls */
PROC_PID_FD_OFF,
PROC_SELF_FD_OFF,
CR_PROC_FD_OFF, /* some other's proc fd:
* - For dump -- target ns' proc
* - For restore -- CRIU ns' proc
*/
ROOT_FD_OFF, /* Root of the namespace we dump/restore */
CGROUP_YARD,
CGROUPD_SK, /* Socket for cgroupd to fix up thread's cgroup controller */
USERNSD_SK, /* Socket for usernsd */
NS_FD_OFF, /* Node's net namespace fd */
TRANSPORT_FD_OFF, /* to transfer file descriptors */
RPC_SK_OFF,
FDSTORE_SK_OFF,
SERVICE_FD_MAX
};
struct pstree_item;
extern bool sfds_protected;
extern const char *sfd_type_name(enum sfd_type type);
extern int init_service_fd(void);
extern int get_service_fd(enum sfd_type type);
extern bool is_any_service_fd(int fd);
extern bool is_service_fd(int fd, enum sfd_type type);
extern int service_fd_min_fd(struct pstree_item *item);
extern int install_service_fd(enum sfd_type type, int fd);
extern int close_service_fd(enum sfd_type type);
extern void __close_service_fd(enum sfd_type type);
extern int clone_service_fd(struct pstree_item *me);
#endif /* __CR_SERVICE_FD_H__ */
| 1,398 | 25.903846 | 76 |
h
|
criu
|
criu-master/criu/include/shmem.h
|
#ifndef __CR_SHMEM_H__
#define __CR_SHMEM_H__
#include "int.h"
#include "common/lock.h"
#include "images/vma.pb-c.h"
struct vma_area;
extern int collect_shmem(int pid, struct vma_area *vma);
extern int collect_sysv_shmem(unsigned long shmid, unsigned long size);
extern int cr_dump_shmem(void);
extern int add_shmem_area(pid_t pid, VmaEntry *vma, u64 *map);
extern int fixup_sysv_shmems(void);
extern int dump_one_memfd_shmem(int fd, unsigned long shmid, unsigned long size);
extern int dump_one_sysv_shmem(void *addr, unsigned long size, unsigned long shmid);
extern int restore_sysv_shmem_content(void *addr, unsigned long size, unsigned long shmid);
extern int restore_memfd_shmem_content(int fd, unsigned long shmid, unsigned long size);
#define SYSV_SHMEM_SKIP_FD (0x7fffffff)
#endif /* __CR_SHMEM_H__ */
| 815 | 34.478261 | 91 |
h
|
criu
|
criu-master/criu/include/sizes.h
|
#ifndef __CR_SIZES_H__
#define __CR_SIZES_H__
/*
* Copied from the Linux kernel header include/linux/sizes.h
*/
#define SZ_1 0x00000001
#define SZ_2 0x00000002
#define SZ_4 0x00000004
#define SZ_8 0x00000008
#define SZ_16 0x00000010
#define SZ_32 0x00000020
#define SZ_64 0x00000040
#define SZ_128 0x00000080
#define SZ_256 0x00000100
#define SZ_512 0x00000200
#define SZ_1K 0x00000400
#define SZ_2K 0x00000800
#define SZ_4K 0x00001000
#define SZ_8K 0x00002000
#define SZ_16K 0x00004000
#define SZ_32K 0x00008000
#define SZ_64K 0x00010000
#define SZ_128K 0x00020000
#define SZ_256K 0x00040000
#define SZ_512K 0x00080000
#define SZ_1M 0x00100000
#define SZ_2M 0x00200000
#define SZ_4M 0x00400000
#define SZ_8M 0x00800000
#define SZ_16M 0x01000000
#define SZ_32M 0x02000000
#define SZ_64M 0x04000000
#define SZ_128M 0x08000000
#define SZ_256M 0x10000000
#define SZ_512M 0x20000000
#define SZ_1G 0x40000000
#define SZ_2G 0x80000000
#define SZ_4G 0x100000000ULL
#define SZ_8G 0x200000000ULL
#define SZ_16G 0x400000000ULL
#define SZ_32G 0x800000000ULL
#define SZ_64T 0x400000000000ULL
#endif /* __CR_SIZES_H__ */
| 1,130 | 21.176471 | 60 |
h
|
criu
|
criu-master/criu/include/sk-inet.h
|
#ifndef __CR_SK_INET_H__
#define __CR_SK_INET_H__
#include <netinet/tcp.h>
#include "sockets.h"
#include "files.h"
#include "common/list.h"
#include "images/sk-inet.pb-c.h"
#define INET_ADDR_LEN 48 /* max of INET_ADDRSTRLEN and INET6_ADDRSTRLEN */
#ifndef TCP_REPAIR
#define TCP_REPAIR 19 /* TCP sock is under repair right now */
#define TCP_REPAIR_QUEUE 20
#define TCP_QUEUE_SEQ 21
#define TCP_REPAIR_OPTIONS 22
#endif
#ifndef IP_HDRINCL
#define IP_HDRINCL 3
#endif
#ifndef IP_NODEFRAG
#define IP_NODEFRAG 22
#endif
#ifndef IPV6_HDRINCL
#define IPV6_HDRINCL 36
#endif
struct inet_sk_desc {
struct socket_desc sd;
unsigned int type;
unsigned int src_port;
unsigned int dst_port;
unsigned int state;
unsigned int rqlen;
unsigned int wqlen; /* sent + unsent data */
unsigned int uwqlen; /* unsent data */
unsigned int src_addr[4];
unsigned int dst_addr[4];
unsigned short shutdown;
bool cork;
int rfd;
int cpt_reuseaddr;
struct list_head rlist;
void *priv;
};
struct inet_port;
struct inet_sk_info {
InetSkEntry *ie;
struct file_desc d;
struct inet_port *port;
struct list_head port_list;
/*
* This is an fd by which the socket is opened.
* It will be carried down to restorer code to
* repair-off the socket at the very end.
*/
int sk_fd;
struct list_head rlist;
};
extern int inet_bind(int sk, struct inet_sk_info *);
extern int inet_connect(int sk, struct inet_sk_info *);
#ifdef CR_NOGLIBC
#define setsockopt sys_setsockopt
#endif
static inline void tcp_repair_off(int fd)
{
int aux = 0, ret;
ret = setsockopt(fd, SOL_TCP, TCP_REPAIR, &aux, sizeof(aux));
if (ret < 0)
pr_err("Failed to turn off repair mode on socket\n");
}
extern void tcp_locked_conn_add(struct inet_sk_info *);
extern void rst_unlock_tcp_connections(void);
extern void cpt_unlock_tcp_connections(void);
extern int dump_one_tcp(int sk, struct inet_sk_desc *sd, SkOptsEntry *soe);
extern int restore_one_tcp(int sk, struct inet_sk_info *si);
#define SK_EST_PARAM "tcp-established"
#define SK_INFLIGHT_PARAM "skip-in-flight"
#define SK_CLOSE_PARAM "tcp-close"
struct task_restore_args;
int prepare_tcp_socks(struct task_restore_args *);
struct rst_tcp_sock {
int sk;
bool reuseaddr;
};
union libsoccr_addr;
int restore_sockaddr(union libsoccr_addr *sa, int family, u32 pb_port, u32 *pb_addr, u32 ifindex);
#endif /* __CR_SK_INET_H__ */
| 2,375 | 21.628571 | 98 |
h
|
criu
|
criu-master/criu/include/sockets.h
|
#ifndef __CR_SOCKETS_H__
#define __CR_SOCKETS_H__
#include <alloca.h>
#include <stdbool.h>
#include <sys/socket.h>
#include "images/sk-opts.pb-c.h"
#include "images/fdinfo.pb-c.h"
struct fdinfo_list_entry;
struct sk_opts_entry;
struct file_desc;
struct fd_parms;
struct cr_imgset;
struct nlmsghdr;
struct cr_img;
struct socket_desc {
unsigned int family;
unsigned int ino;
struct socket_desc *next;
struct ns_id *sk_ns;
int already_dumped;
};
extern int dump_socket(struct fd_parms *p, int lfd, FdinfoEntry *);
extern int dump_socket_opts(int sk, SkOptsEntry *soe);
extern int restore_socket_opts(int sk, SkOptsEntry *soe);
extern int sk_setbufs(int sk, uint32_t *bufs);
extern void release_skopts(SkOptsEntry *);
extern int restore_prepare_socket(int sk);
extern void preload_socket_modules(void);
extern bool socket_test_collect_bit(unsigned int family, unsigned int proto);
extern int sk_collect_one(unsigned ino, int family, struct socket_desc *d, struct ns_id *ns);
struct ns_id;
extern int collect_sockets(struct ns_id *);
extern struct collect_image_info inet_sk_cinfo;
extern struct collect_image_info unix_sk_cinfo;
extern int add_fake_unix_queuers(void);
extern int fix_external_unix_sockets(void);
extern int prepare_scms(void);
extern int unix_note_scm_rights(int id_for, uint32_t *file_ids, int *fds, int n_ids);
extern struct collect_image_info netlink_sk_cinfo;
extern struct socket_desc *lookup_socket_ino(unsigned int ino, int family);
extern struct socket_desc *lookup_socket(unsigned int ino, int family, int proto);
extern const struct fdtype_ops unix_dump_ops;
extern const struct fdtype_ops inet_dump_ops;
extern const struct fdtype_ops inet6_dump_ops;
extern const struct fdtype_ops netlink_dump_ops;
extern const struct fdtype_ops packet_dump_ops;
extern int inet_collect_one(struct nlmsghdr *h, int family, int type, struct ns_id *ns);
extern int unix_receive_one(struct nlmsghdr *h, struct ns_id *ns, void *);
extern int netlink_receive_one(struct nlmsghdr *hdr, struct ns_id *ns, void *arg);
extern int unix_sk_id_add(unsigned int ino);
extern int unix_sk_ids_parse(char *optarg);
extern int unix_prepare_root_shared(void);
extern void init_sk_info_hash(void);
extern int do_dump_opt(int sk, int level, int name, void *val, int len);
#define dump_opt(s, l, n, f) do_dump_opt(s, l, n, f, sizeof(*f))
extern int do_restore_opt(int sk, int level, int name, void *val, int len);
#define restore_opt(s, l, n, f) do_restore_opt(s, l, n, f, sizeof(*f))
#define sk_encode_shutdown(img, mask) \
do { \
/* \
* protobuf SK_SHUTDOWN__ bits match those \
* reported by kernel \
*/ \
(img)->shutdown = mask; \
if ((img)->shutdown != SK_SHUTDOWN__NONE) \
(img)->has_shutdown = true; \
} while (0)
static inline int sk_decode_shutdown(int val)
{
static const int hows[] = { -1, SHUT_RD, SHUT_WR, SHUT_RDWR };
return hows[val];
}
#define USK_EXT_PARAM "ext-unix-sk"
#ifndef NETLINK_SOCK_DIAG
#define NETLINK_SOCK_DIAG NETLINK_INET_DIAG
#endif
extern int set_netns(uint32_t ns_id);
#ifndef SIOCGSKNS
#define SIOCGSKNS 0x894C /* get socket network namespace */
#endif
extern int kerndat_socket_netns(void);
extern int kerndat_socket_unix_file(void);
extern const char *tcp_state_name(unsigned int state, char *nm, size_t size);
extern const char *socket_type_name(unsigned int type, char *nm, size_t size);
extern const char *socket_family_name(unsigned int family, char *nm, size_t size);
extern const char *socket_proto_name(unsigned int proto, char *nm, size_t size);
#define __tcp_state_name(state, a) tcp_state_name(state, a, sizeof(a))
#define __socket_type_name(type, a) socket_type_name(type, a, sizeof(a))
#define __socket_family_name(family, a) socket_family_name(family, a, sizeof(a))
#define __socket_proto_name(proto, a) socket_proto_name(proto, a, sizeof(a))
#define __socket_info_helper(__h, __v) \
({ \
char *__nm = alloca(32); \
const char *__r = __h(__v, __nm, 32); \
__r; \
})
#define ___tcp_state_name(state) __socket_info_helper(tcp_state_name, state)
#define ___socket_type_name(type) __socket_info_helper(socket_type_name, type)
#define ___socket_family_name(family) __socket_info_helper(socket_family_name, family)
#define ___socket_proto_name(proto) __socket_info_helper(socket_proto_name, proto)
#ifndef SO_BUF_LOCK
#define SO_BUF_LOCK 72
#endif
#endif /* __CR_SOCKETS_H__ */
| 4,616 | 33.977273 | 93 |
h
|
criu
|
criu-master/criu/include/syscall.h
|
#ifndef __CR_SYSCALL_H__
#define __CR_SYSCALL_H__
static inline int sys_fsopen(const char *fsname, unsigned int flags)
{
return syscall(__NR_fsopen, fsname, flags);
}
static inline int sys_fsconfig(int fd, unsigned int cmd, const char *key, const char *value, int aux)
{
return syscall(__NR_fsconfig, fd, cmd, key, value, aux);
}
static inline int sys_fsmount(int fd, unsigned int flags, unsigned int attr_flags)
{
return syscall(__NR_fsmount, fd, flags, attr_flags);
}
#endif /* __CR_SYSCALL_H__ */
| 504 | 28.705882 | 101 |
h
|
criu
|
criu-master/criu/include/sysctl.h
|
#ifndef __CR_SYSCTL_H__
#define __CR_SYSCTL_H__
struct sysctl_req {
char *name;
void *arg;
int type;
int flags;
};
extern int sysctl_op(struct sysctl_req *req, size_t nr_req, int op, unsigned int ns);
enum {
CTL_READ,
CTL_WRITE,
};
#define CTL_SHIFT 4 /* Up to 16 types */
#define CTL_U32 1 /* Single u32 */
#define CTL_U64 2 /* Single u64 */
#define __CTL_U32A 3 /* Array of u32 */
#define __CTL_U64A 4 /* Array of u64 */
#define __CTL_STR 5 /* String */
#define CTL_32 6 /* Single s32 */
#define CTL_U32A(n) (__CTL_U32A | ((n) << CTL_SHIFT))
#define CTL_U64A(n) (__CTL_U64A | ((n) << CTL_SHIFT))
#define CTL_STR(len) (__CTL_STR | ((len) << CTL_SHIFT))
#define CTL_LEN(t) ((t) >> CTL_SHIFT)
#define CTL_TYPE(t) ((t) & ((1 << CTL_SHIFT) - 1))
/*
* Some entries might be missing mark them as optional.
*/
#define CTL_FLAGS_OPTIONAL 1
#define CTL_FLAGS_HAS 2
#define CTL_FLAGS_READ_EIO_SKIP 4
#define CTL_FLAGS_IPC_EACCES_SKIP 5
#endif /* __CR_SYSCTL_H__ */
| 993 | 22.116279 | 85 |
h
|
criu
|
criu-master/criu/include/sysfs_parse.h
|
#ifndef __CR_SYSFS_PARSE_H__
#define __CR_SYSFS_PARSE_H__
#define SYSFS_AUFS "/sys/fs/aufs/"
#define SBINFO_LEN (3 + 16 + 1) /* si_%lx */
#define SBINFO_PATH_LEN (sizeof SYSFS_AUFS + SBINFO_LEN) /* /sys/fs/aufs/<sbinfo> */
#define AUFSBR_PATH_LEN (SBINFO_PATH_LEN + 6 + 1) /* /sys/fs/aufs/<sbinfo>/br%3d */
struct mount_info;
struct vma_area;
extern int parse_aufs_branches(struct mount_info *mi);
extern int fixup_aufs_vma_fd(struct vma_area *vma, int vm_file_fd);
extern void free_aufs_branches(void);
#endif /* __CR_SYSFS_PARSE_H__ */
| 546 | 31.176471 | 84 |
h
|
criu
|
criu-master/criu/include/tls.h
|
#ifndef __CR_TLS_H__
#define __CR_TLS_H__
#ifdef CONFIG_GNUTLS
int tls_x509_init(int sockfd, bool is_server);
void tls_terminate_session(bool async);
ssize_t tls_send(const void *buf, size_t len, int flags);
ssize_t tls_recv(void *buf, size_t len, int flags);
int tls_send_data_from_fd(int fd, unsigned long len);
int tls_recv_data_to_fd(int fd, unsigned long len);
#else /* CONFIG_GNUTLS */
#define tls_x509_init(sockfd, is_server) (0)
#define tls_send(buf, len, flags) (-1)
#define tls_recv(buf, len, flags) (-1)
#define tls_send_data_from_fd(fd, len) (-1)
#define tls_recv_data_to_fd(fd, len) (-1)
#define tls_terminate_session(async)
#endif /* CONFIG_HAS_GNUTLS */
#endif /* __CR_TLS_H__ */
| 707 | 25.222222 | 57 |
h
|
criu
|
criu-master/criu/include/tty.h
|
#ifndef __CR_TTY_H__
#define __CR_TTY_H__
#include <linux/major.h>
#include <linux/vt.h>
#include "files.h"
/* Kernel's limit */
#define TERMIOS_NCC 19
/* Popular serial console's majors, which not defined in <linux/major.h> */
#define USB_SERIAL_MAJOR 188
#define LOW_DENSE_SERIAL_MAJOR 204
extern const struct fdtype_ops tty_dump_ops;
struct tty_driver;
struct tty_driver *get_tty_driver(dev_t rdev, dev_t dev);
static inline int is_tty(dev_t rdev, dev_t dev)
{
return get_tty_driver(rdev, dev) != NULL;
}
extern int tty_post_actions(void);
extern int dump_verify_tty_sids(void);
extern struct collect_image_info tty_info_cinfo;
extern struct collect_image_info tty_cinfo;
extern struct collect_image_info tty_cdata;
struct mount_info;
extern int devpts_restore(struct mount_info *pm);
extern int tty_prep_fds(void);
extern int tty_init_restore(void);
extern int devpts_check_bindmount(struct mount_info *m);
#define OPT_SHELL_JOB "shell-job"
#endif /* __CR_TTY_H__ */
| 990 | 22.595238 | 75 |
h
|
criu
|
criu-master/criu/include/tun.h
|
#ifndef __CR_TUN_H__
#define __CR_TUN_H__
#ifndef TUN_MINOR
#define TUN_MINOR 200
#endif
extern struct ns_id *ns;
#include <linux/netlink.h>
#include "images/netdev.pb-c.h"
extern const struct fdtype_ops tunfile_dump_ops;
extern int dump_tun_link(NetDeviceEntry *nde, struct cr_imgset *fds, struct nlattr **info);
struct net_link;
extern int restore_one_tun(struct ns_id *ns, struct net_link *link, int nlsk);
extern struct collect_image_info tunfile_cinfo;
extern int check_tun_cr(int no_tun_err);
extern int check_tun_netns_cr(bool *result);
#endif /* __CR_TUN_H__ */
| 576 | 24.086957 | 91 |
h
|
criu
|
criu-master/criu/include/util-vdso.h
|
#ifndef __CR_UTIL_VDSO_H__
#define __CR_UTIL_VDSO_H__
/*
* VDSO management common definitions.
*
* This header file is included by the criu main code and the parasite code.
* It contains definitions shared by these 2 parts.
*
* This file should not be included except in pie/util-vdso.c, include/vdso.h
* and include/parasite-vdso.h
*/
#include <sys/types.h>
/*
* Each architecture must export:
* VDSO_SYMBOL_MAX, the number of vDSO symbols to manage
* ARCH_VDSO_SYMBOLS, a table of string containing the vDSO symbol names
* vdso_redirect_calls, a service called to redirect the vDSO symbols in
* the parasite code.
*/
#include "asm/vdso.h"
struct vdso_symbol {
char name[32];
unsigned long offset;
};
struct vdso_symtable {
unsigned long vdso_size;
unsigned long vvar_size;
struct vdso_symbol symbols[VDSO_SYMBOL_MAX];
bool vdso_before_vvar; /* order of vdso/vvar pair */
};
struct vdso_maps {
unsigned long vdso_start;
unsigned long vvar_start;
struct vdso_symtable sym;
bool compatible;
};
static inline bool vdso_is_present(struct vdso_maps *m)
{
return m->vdso_start != VDSO_BAD_ADDR;
}
#define VDSO_SYMBOL_INIT \
{ \
.offset = VDSO_BAD_ADDR, \
}
#define VDSO_SYMTABLE_INIT \
{ \
.vdso_size = VDSO_BAD_SIZE, \
.vvar_size = VVAR_BAD_SIZE, \
.symbols = { \
[0 ... VDSO_SYMBOL_MAX - 1] = \
(struct vdso_symbol)VDSO_SYMBOL_INIT, \
}, \
.vdso_before_vvar = false, \
}
#define VDSO_MAPS_INIT \
{ \
.vdso_start = VDSO_BAD_ADDR, .vvar_start = VVAR_BAD_ADDR, .sym = VDSO_SYMTABLE_INIT, \
}
#ifdef CONFIG_VDSO_32
#define Ehdr_t Elf32_Ehdr
#define Sym_t Elf32_Sym
#define Phdr_t Elf32_Phdr
#define Word_t Elf32_Word
#define Dyn_t Elf32_Dyn
#ifndef ELF_ST_TYPE
#define ELF_ST_TYPE ELF32_ST_TYPE
#endif
#ifndef ELF_ST_BIND
#define ELF_ST_BIND ELF32_ST_BIND
#endif
#define vdso_fill_symtable vdso_fill_symtable_compat
#else /* CONFIG_VDSO_32 */
#define Ehdr_t Elf64_Ehdr
#define Sym_t Elf64_Sym
#define Phdr_t Elf64_Phdr
#define Word_t Elf64_Word
#define Dyn_t Elf64_Dyn
#ifndef ELF_ST_TYPE
#define ELF_ST_TYPE ELF64_ST_TYPE
#endif
#ifndef ELF_ST_BIND
#define ELF_ST_BIND ELF64_ST_BIND
#endif
#endif /* CONFIG_VDSO_32 */
extern int vdso_fill_symtable(uintptr_t mem, size_t size, struct vdso_symtable *t);
#endif /* __CR_UTIL_VDSO_H__ */
| 2,596 | 23.271028 | 102 |
h
|
criu
|
criu-master/criu/include/util.h
|
#ifndef __CR_UTIL_H__
#define __CR_UTIL_H__
/*
* Some bits are stolen from perf and kvm tools
*/
#include <signal.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <sys/types.h>
#include <sys/statfs.h>
#include <sys/sysmacros.h>
#include <dirent.h>
#include <poll.h>
#include "int.h"
#include "common/compiler.h"
#include "xmalloc.h"
#include "common/bug.h"
#include "log.h"
#include "common/err.h"
#define PREF_SHIFT_OP(pref, op, size) ((size)op(pref##BYTES_SHIFT))
#define KBYTES_SHIFT 10
#define MBYTES_SHIFT 20
#define GBYTES_SHIFT 30
#define KBYTES(size) PREF_SHIFT_OP(K, >>, size)
#define MBYTES(size) PREF_SHIFT_OP(M, >>, size)
#define GBYTES(size) PREF_SHIFT_OP(G, >>, size)
#define KILO(size) PREF_SHIFT_OP(K, <<, size)
#define MEGA(size) PREF_SHIFT_OP(M, <<, size)
#define GIGA(size) PREF_SHIFT_OP(G, <<, size)
struct vma_area;
struct list_head;
extern int service_fd_rlim_cur;
extern void pr_vma(const struct vma_area *vma_area);
#define pr_info_vma(vma_area) pr_vma(vma_area)
#define pr_vma_list(head) \
do { \
struct vma_area *vma; \
list_for_each_entry(vma, head, list) \
pr_vma(vma); \
} while (0)
#define pr_info_vma_list(head) pr_vma_list(head)
extern int move_fd_from(int *img_fd, int want_fd);
extern int close_safe(int *fd);
extern int reopen_fd_as_safe(char *file, int line, int new_fd, int old_fd, bool allow_reuse_fd);
#define reopen_fd_as(new_fd, old_fd) reopen_fd_as_safe(__FILE__, __LINE__, new_fd, old_fd, false)
#define reopen_fd_as_nocheck(new_fd, old_fd) reopen_fd_as_safe(__FILE__, __LINE__, new_fd, old_fd, true)
extern void close_proc(void);
extern int open_pid_proc(pid_t pid);
extern int close_pid_proc(void);
extern int set_proc_fd(int fd);
extern pid_t sys_clone_unified(unsigned long flags, void *child_stack, void *parent_tid, void *child_tid,
unsigned long newtls);
/*
* Values for pid argument of the proc opening routines below.
* SELF would open file under /proc/self
* GEN would open a file under /proc itself
* NONE is internal, don't use it ;)
*/
#define PROC_SELF 0
#define PROC_GEN -1
#define PROC_NONE -2
extern int do_open_proc(pid_t pid, int flags, const char *fmt, ...) __attribute__((__format__(__printf__, 3, 4)));
#define __open_proc(pid, ier, flags, fmt, ...) \
({ \
int __fd = do_open_proc(pid, flags, fmt, ##__VA_ARGS__); \
if (__fd < 0 && (errno != (ier))) \
pr_perror("Can't open %d/" fmt " on procfs", pid, ##__VA_ARGS__); \
\
__fd; \
})
/* int open_proc(pid_t pid, const char *fmt, ...); */
#define open_proc(pid, fmt, ...) __open_proc(pid, 0, O_RDONLY, fmt, ##__VA_ARGS__)
/* int open_proc_rw(pid_t pid, const char *fmt, ...); */
#define open_proc_rw(pid, fmt, ...) __open_proc(pid, 0, O_RDWR, fmt, ##__VA_ARGS__)
#define open_proc_path(pid, fmt, ...) __open_proc(pid, 0, O_PATH, fmt, ##__VA_ARGS__)
/* DIR *opendir_proc(pid_t pid, const char *fmt, ...); */
#define opendir_proc(pid, fmt, ...) \
({ \
int __fd = open_proc(pid, fmt, ##__VA_ARGS__); \
DIR *__d = NULL; \
\
if (__fd >= 0) { \
__d = fdopendir(__fd); \
if (__d == NULL) \
pr_perror("Can't fdopendir %d " \
"(%d/" fmt " on procfs)", \
__fd, pid, ##__VA_ARGS__); \
} \
__d; \
})
/* FILE *fopen_proc(pid_t pid, const char *fmt, ...); */
#define fopen_proc(pid, fmt, ...) \
({ \
int __fd = open_proc(pid, fmt, ##__VA_ARGS__); \
FILE *__f = NULL; \
\
if (__fd >= 0) { \
__f = fdopen(__fd, "r"); \
if (__f == NULL) \
pr_perror("Can't fdopen %d " \
"(%d/" fmt " on procfs)", \
__fd, pid, ##__VA_ARGS__); \
} \
__f; \
})
#define DEVZERO (makedev(1, 5))
#define KDEV_MINORBITS 20
#define KDEV_MINORMASK ((1UL << KDEV_MINORBITS) - 1)
#define MKKDEV(ma, mi) (((ma) << KDEV_MINORBITS) | (mi))
static inline u32 kdev_major(u32 kdev)
{
return kdev >> KDEV_MINORBITS;
}
static inline u32 kdev_minor(u32 kdev)
{
return kdev & KDEV_MINORMASK;
}
static inline dev_t kdev_to_odev(u32 kdev)
{
/*
* New kernels encode devices in a new form.
* See kernel's fs/stat.c for details, there
* choose_32_64 helpers which are the key.
*/
unsigned major = kdev_major(kdev);
unsigned minor = kdev_minor(kdev);
return makedev(major, minor);
}
extern int copy_file(int fd_in, int fd_out, size_t bytes);
extern int is_anon_link_type(char *link, char *type);
#define is_hex_digit(c) (((c) >= '0' && (c) <= '9') || ((c) >= 'a' && (c) <= 'f') || ((c) >= 'A' && (c) <= 'F'))
#define CRS_CAN_FAIL 0x1 /* cmd can validly exit with non zero code */
extern int cr_system(int in, int out, int err, char *cmd, char *const argv[], unsigned flags);
extern int cr_system_userns(int in, int out, int err, char *cmd, char *const argv[], unsigned flags, int userns_pid);
extern pid_t fork_and_ptrace_attach(int (*child_setup)(void));
extern int cr_daemon(int nochdir, int noclose, int close_fd);
extern int status_ready(void);
extern int is_root_user(void);
extern int set_proc_self_fd(int fd);
static inline bool dir_dots(const struct dirent *de)
{
return !strcmp(de->d_name, ".") || !strcmp(de->d_name, "..");
}
extern int is_empty_dir(int dirfd);
/*
* Size of buffer to carry the worst case or /proc/self/fd/N
* path. Since fd is an integer, we can easily estimate one :)
*/
#define PSFDS (sizeof("/proc/self/fd/2147483647"))
extern int read_fd_link(int lfd, char *buf, size_t size);
#define USEC_PER_SEC 1000000L
#define NSEC_PER_SEC 1000000000L
int vaddr_to_pfn(int fd, unsigned long vaddr, u64 *pfn);
/*
* Check whether @str starts with @sub and report the
* next character of @str in @end
*/
static inline bool strstartswith2(const char *str, const char *sub, char *end)
{
while (1) {
if (*sub == '\0') /* end of sub -- match */ {
if (end) {
if (*(sub - 1) == '/') /* "/", "./" or "path/" */
*end = '/';
else
*end = *str;
}
return true;
}
if (*str == '\0') /* end of str, sub is NOT ended -- miss */
return false;
if (*str != *sub)
return false;
str++;
sub++;
}
}
static inline bool strstartswith(const char *str, const char *sub)
{
return strstartswith2(str, sub, NULL);
}
/*
* Checks whether the @path has @sub_path as a sub path, i.e.
* sub_path is the beginning of path and the last component
* match is full (next character terminates path component).
*
* Paths shouldn't contain excessive /-s, i.e. only one slash
* between path components and no slash at the end (except for
* the "/" path. This is pretty good assumption to what paths
* are used by criu.
*/
static inline bool issubpath(const char *path, const char *sub_path)
{
char end;
return strstartswith2(path, sub_path, &end) && (end == '/' || end == '\0');
}
extern char *get_relative_path(char *path, char *sub_path);
extern bool is_sub_path(char *path, char *sub_path);
extern bool is_same_path(char *path1, char *path2);
int strip_deleted(char *path, int len);
int cut_path_ending(char *path, char *sub_path);
/*
* mkdir -p
*/
int mkdirpat(int fd, const char *path, int mode);
/*
* Tests whether a path is a prefix of another path. This is different than
* strstartswith because "/foo" is _not_ a path prefix of "/foobar", since they
* refer to different directories.
*/
bool is_path_prefix(const char *path, const char *prefix);
FILE *fopenat(int dirfd, char *path, char *cflags);
void split(char *str, char token, char ***out, int *n);
int fd_has_data(int lfd);
int make_yard(char *path);
static inline int sk_wait_data(int sk)
{
struct pollfd pfd = { sk, POLLIN, 0 };
return poll(&pfd, 1, -1);
}
void fd_set_nonblocking(int fd, bool on);
void tcp_nodelay(int sk, bool on);
void tcp_cork(int sk, bool on);
const char *ns_to_string(unsigned int ns);
int xatol(const char *string, long *number);
int xatoi(const char *string, int *number);
char *xstrcat(char *str, const char *fmt, ...) __attribute__((__format__(__printf__, 2, 3)));
char *xsprintf(const char *fmt, ...) __attribute__((__format__(__printf__, 1, 2)));
int setup_tcp_server(char *type, char *addr, unsigned short *port);
int run_tcp_server(bool daemon_mode, int *ask, int cfd, int sk);
int setup_tcp_client(char *hostname);
/* path should be writable and no more than PATH_MAX long */
int rmrf(char *path);
#define LAST_PID_PATH "sys/kernel/ns_last_pid"
#define PID_MAX_PATH "sys/kernel/pid_max"
#define block_sigmask(saved_mask, sig_mask) \
({ \
sigset_t ___blocked_mask; \
int ___ret = 0; \
sigemptyset(&___blocked_mask); \
sigaddset(&___blocked_mask, sig_mask); \
if (sigprocmask(SIG_BLOCK, &___blocked_mask, saved_mask) == -1) { \
pr_perror("Can not set mask of blocked signals"); \
___ret = -1; \
} \
___ret; \
})
#define restore_sigmask(saved_mask) \
({ \
int ___ret = 0; \
if (sigprocmask(SIG_SETMASK, saved_mask, NULL) == -1) { \
pr_perror("Can not unset mask of blocked signals"); \
___ret = -1; \
} \
___ret; \
})
/*
* Helpers to organize asynchronous reading from a bunch
* of file descriptors.
*/
#include <sys/epoll.h>
struct epoll_rfd {
int fd;
/*
* EPOLLIN notification. The data is available for read in
* rfd->fd.
* @return 0 to resume polling, 1 to stop polling or a
* negative error code
*/
int (*read_event)(struct epoll_rfd *);
/*
* EPOLLHUP | EPOLLRDHUP notification. The remote side has
* close the connection for rfd->fd.
* @return 0 to resume polling, 1 to stop polling or a
* negative error code
*/
int (*hangup_event)(struct epoll_rfd *);
};
extern int epoll_add_rfd(int epfd, struct epoll_rfd *);
extern int epoll_del_rfd(int epfd, struct epoll_rfd *rfd);
extern int epoll_run_rfds(int epfd, struct epoll_event *evs, int nr_fds, int tmo);
extern int epoll_prepare(int nr_events, struct epoll_event **evs);
extern void rlimit_unlimit_nofile(void);
extern int call_in_child_process(int (*fn)(void *), void *arg);
#ifdef __GLIBC__
extern void print_stack_trace(pid_t pid);
#else
static inline void print_stack_trace(pid_t pid)
{
}
#endif
#define block_sigmask(saved_mask, sig_mask) \
({ \
sigset_t ___blocked_mask; \
int ___ret = 0; \
sigemptyset(&___blocked_mask); \
sigaddset(&___blocked_mask, sig_mask); \
if (sigprocmask(SIG_BLOCK, &___blocked_mask, saved_mask) == -1) { \
pr_perror("Can not set mask of blocked signals"); \
___ret = -1; \
} \
___ret; \
})
#define restore_sigmask(saved_mask) \
({ \
int ___ret = 0; \
if (sigprocmask(SIG_SETMASK, saved_mask, NULL) == -1) { \
pr_perror("Can not unset mask of blocked signals"); \
___ret = -1; \
} \
___ret; \
})
extern int mount_detached_fs(const char *fsname);
extern char *get_legacy_iptables_bin(bool ipv6, bool restore);
extern int set_opts_cap_eff(void);
extern ssize_t read_all(int fd, void *buf, size_t size);
extern ssize_t write_all(int fd, const void *buf, size_t size);
#define cleanup_free __attribute__((cleanup(cleanup_freep)))
static inline void cleanup_freep(void *p)
{
void **pp = (void **)p;
free(*pp);
}
extern int run_command(char *buf, size_t buf_size, int (*child_fn)(void *), void *args);
/*
* criu_run_id is a unique value of the current run. It can be used to
* generate resource ID-s to avoid conflicts with other CRIU processes.
*/
extern uint64_t criu_run_id;
extern void util_init(void);
extern char *resolve_mountpoint(char *path);
#endif /* __CR_UTIL_H__ */
| 14,016 | 32.939467 | 117 |
h
|
criu
|
criu-master/criu/include/vdso.h
|
#ifndef __CR_VDSO_H__
#define __CR_VDSO_H__
#include <sys/mman.h>
#include <stdbool.h>
#include "common/config.h"
#include "util-vdso.h"
extern struct vdso_maps vdso_maps;
extern struct vdso_maps vdso_maps_compat;
extern int vdso_init_dump(void);
extern int vdso_init_restore(void);
extern int kerndat_vdso_fill_symtable(void);
extern int kerndat_vdso_preserves_hint(void);
extern int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid, struct vm_area_list *vma_area_list);
#ifdef CONFIG_COMPAT
extern void compat_vdso_helper(struct vdso_maps *native, int pipe_fd, int err_fd, void *vdso_buf, size_t buf_size);
#endif
#endif /* __CR_VDSO_H__ */
| 657 | 25.32 | 115 |
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.