author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
259,881 | 24.05.2019 13:23:01 | 25,200 | 6cdec6fadf7515a2d8feddcbc3058927897cbbc9 | Wrap comments and reword in common present tense | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/prctl.go",
"new_path": "pkg/abi/linux/prctl.go",
"diff": "@@ -16,80 +16,80 @@ package linux\n// PR_* flags, from <linux/pcrtl.h> for prctl(2).\nconst (\n- // PR_SET_PDEATHSIG will set the process' death signal.\n+ // PR_SET_PDEATHSIG sets the process' death signal.\nPR_SET_PDEATHSIG = 1\n- // PR_GET_PDEATHSIG will get the process' death signal.\n+ // PR_GET_PDEATHSIG gets the process' death signal.\nPR_GET_PDEATHSIG = 2\n- // PR_GET_DUMPABLE will get the process's dumpable flag.\n+ // PR_GET_DUMPABLE gets the process' dumpable flag.\nPR_GET_DUMPABLE = 3\n- // PR_SET_DUMPABLE will set the process's dumpable flag.\n+ // PR_SET_DUMPABLE sets the process' dumpable flag.\nPR_SET_DUMPABLE = 4\n- // PR_GET_KEEPCAPS will get the value of the keep capabilities flag.\n+ // PR_GET_KEEPCAPS gets the value of the keep capabilities flag.\nPR_GET_KEEPCAPS = 7\n- // PR_SET_KEEPCAPS will set the value of the keep capabilities flag.\n+ // PR_SET_KEEPCAPS sets the value of the keep capabilities flag.\nPR_SET_KEEPCAPS = 8\n- // PR_GET_TIMING will get the process's timing method.\n+ // PR_GET_TIMING gets the process' timing method.\nPR_GET_TIMING = 13\n- // PR_SET_TIMING will set the process's timing method.\n+ // PR_SET_TIMING sets the process' timing method.\nPR_SET_TIMING = 14\n- // PR_SET_NAME will set the process' name.\n+ // PR_SET_NAME sets the process' name.\nPR_SET_NAME = 15\n- // PR_GET_NAME will get the process' name.\n+ // PR_GET_NAME gets the process' name.\nPR_GET_NAME = 16\n- // PR_GET_SECCOMP will get a process' seccomp mode.\n+ // PR_GET_SECCOMP gets a process' seccomp mode.\nPR_GET_SECCOMP = 21\n- // PR_SET_SECCOMP will set a process' seccomp mode.\n+ // PR_SET_SECCOMP sets a process' seccomp mode.\nPR_SET_SECCOMP = 22\n- // PR_CAPBSET_READ will get the capability bounding set.\n+ // PR_CAPBSET_READ gets the capability bounding set.\nPR_CAPBSET_READ = 23\n- // PR_CAPBSET_DROP will set the capability bounding set.\n+ // PR_CAPBSET_DROP sets the capability bounding set.\nPR_CAPBSET_DROP = 24\n- // PR_GET_TSC will get the the value of the flag determining whether the\n+ // PR_GET_TSC gets the value of the flag determining whether the\n// timestamp counter can be read.\nPR_GET_TSC = 25\n- // PR_SET_TSC will set the the value of the flag determining whether the\n+ // PR_SET_TSC sets the value of the flag determining whether the\n// timestamp counter can be read.\nPR_SET_TSC = 26\n- // PR_SET_TIMERSLACK set the process's time slack.\n+ // PR_SET_TIMERSLACK sets the process' time slack.\nPR_SET_TIMERSLACK = 29\n- // PR_GET_TIMERSLACK get the process's time slack.\n+ // PR_GET_TIMERSLACK gets the process' time slack.\nPR_GET_TIMERSLACK = 30\n- // PR_TASK_PERF_EVENTS_DISABLE disable all performance counters attached to\n- // the calling process.\n+ // PR_TASK_PERF_EVENTS_DISABLE disables all performance counters\n+ // attached to the calling process.\nPR_TASK_PERF_EVENTS_DISABLE = 31\n- // PR_TASK_PERF_EVENTS_ENABLE enable all performance counters attached to\n- // the calling process.\n+ // PR_TASK_PERF_EVENTS_ENABLE enables all performance counters attached\n+ // to the calling process.\nPR_TASK_PERF_EVENTS_ENABLE = 32\n- // PR_MCE_KILL set the machine check memory corruption kill policy for the\n- // calling thread.\n+ // PR_MCE_KILL sets the machine check memory corruption kill policy for\n+ // the calling thread.\nPR_MCE_KILL = 33\n- // PR_MCE_KILL_GET get the machine check memory corruption kill policy for the\n- // calling thread.\n+ // PR_MCE_KILL_GET gets the machine check memory corruption kill policy\n+ // for the calling thread.\nPR_MCE_KILL_GET = 34\n- // PR_SET_MM will modify certain kernel memory map descriptor fields of the\n- // calling process. See prctl(2) for more information.\n+ // PR_SET_MM modifies certain kernel memory map descriptor fields of\n+ // the calling process. See prctl(2) for more information.\nPR_SET_MM = 35\nPR_SET_MM_START_CODE = 1\n@@ -104,44 +104,45 @@ const (\nPR_SET_MM_ENV_START = 10\nPR_SET_MM_ENV_END = 11\nPR_SET_MM_AUXV = 12\n- // PR_SET_MM_EXE_FILE will supersede the /proc/pid/exe symbolic link with a\n- // new one pointing to a new executable file identified by the file descriptor\n- // provided in arg3 argument. See prctl(2) for more information.\n+ // PR_SET_MM_EXE_FILE supersedes the /proc/pid/exe symbolic link with a\n+ // new one pointing to a new executable file identified by the file\n+ // descriptor provided in arg3 argument. See prctl(2) for more\n+ // information.\nPR_SET_MM_EXE_FILE = 13\nPR_SET_MM_MAP = 14\nPR_SET_MM_MAP_SIZE = 15\n- // PR_SET_CHILD_SUBREAPER set the \"child subreaper\" attribute of the calling\n- // process.\n+ // PR_SET_CHILD_SUBREAPER sets the \"child subreaper\" attribute of the\n+ // calling process.\nPR_SET_CHILD_SUBREAPER = 36\n- // PR_GET_CHILD_SUBREAPER get the \"child subreaper\" attribute of the calling\n- // process.\n+ // PR_GET_CHILD_SUBREAPER gets the \"child subreaper\" attribute of the\n+ // calling process.\nPR_GET_CHILD_SUBREAPER = 37\n- // PR_SET_NO_NEW_PRIVS will set the calling thread's no_new_privs bit.\n+ // PR_SET_NO_NEW_PRIVS sets the calling thread's no_new_privs bit.\nPR_SET_NO_NEW_PRIVS = 38\n- // PR_GET_NO_NEW_PRIVS will get the calling thread's no_new_privs bit.\n+ // PR_GET_NO_NEW_PRIVS gets the calling thread's no_new_privs bit.\nPR_GET_NO_NEW_PRIVS = 39\n- // PR_GET_TID_ADDRESS retrieve the clear_child_tid address.\n+ // PR_GET_TID_ADDRESS retrieves the clear_child_tid address.\nPR_GET_TID_ADDRESS = 40\n- // PR_SET_THP_DISABLE set the state of the \"THP disable\" flag for the calling\n- // thread.\n+ // PR_SET_THP_DISABLE sets the state of the \"THP disable\" flag for the\n+ // calling thread.\nPR_SET_THP_DISABLE = 41\n- // PR_GET_THP_DISABLE get the state of the \"THP disable\" flag for the calling\n- // thread.\n+ // PR_GET_THP_DISABLE gets the state of the \"THP disable\" flag for the\n+ // calling thread.\nPR_GET_THP_DISABLE = 42\n- // PR_MPX_ENABLE_MANAGEMENT enable kernel management of Memory Protection\n- // eXtensions (MPX) bounds tables.\n+ // PR_MPX_ENABLE_MANAGEMENT enables kernel management of Memory\n+ // Protection eXtensions (MPX) bounds tables.\nPR_MPX_ENABLE_MANAGEMENT = 43\n- // PR_MPX_DISABLE_MANAGEMENTdisable kernel management of Memory Protection\n- // eXtensions (MPX) bounds tables.\n+ // PR_MPX_DISABLE_MANAGEMENT disables kernel management of Memory\n+ // Protection eXtensions (MPX) bounds tables.\nPR_MPX_DISABLE_MANAGEMENT = 44\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Wrap comments and reword in common present tense
PiperOrigin-RevId: 249888234
Change-Id: Icfef32c3ed34809c34100c07e93e9581c786776e |
259,858 | 24.05.2019 16:16:54 | 25,200 | ed5793808e9d97789c9494d86c9fa4ed62df46bb | Remove obsolete TODO.
We don't need to model internal interfaces after the system
call interfaces (which are objectively worse and simply use a
flag to distinguish between two logically different operations). | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/inode_operations.go",
"new_path": "pkg/sentry/fs/inode_operations.go",
"diff": "@@ -117,9 +117,6 @@ type InodeOperations interface {\n// Remove removes the given named non-directory under dir.\n//\n// The caller must ensure that this operation is permitted.\n- //\n- // TODO(b/67778723): merge Remove and RemoveDirectory, Remove\n- // just needs a type flag.\nRemove(ctx context.Context, dir *Inode, name string) error\n// RemoveDirectory removes the given named directory under dir.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove obsolete TODO.
We don't need to model internal interfaces after the system
call interfaces (which are objectively worse and simply use a
flag to distinguish between two logically different operations).
PiperOrigin-RevId: 249916814
Change-Id: I45d02e0ec0be66b782a685b1f305ea027694cab9 |
260,017 | 27.05.2019 20:12:08 | -3,600 | b57660dffbc5a49882d9ba8e5136158e640b3885 | Changed example command for profiling CPU/heap
Profiling a container that runs `sleep 1000` command dumps an empty CPU profile that may cause unnecessary questions. I suggest replacing this example with one mentioned in the same doc above. | [
{
"change_type": "MODIFY",
"old_path": "content/docs/user_guide/debugging.md",
"new_path": "content/docs/user_guide/debugging.md",
"diff": "@@ -89,7 +89,7 @@ the options available:\nFor example:\n```bash\n-docker run --runtime=runsc-prof --rm -d alpine sleep 1000\n+docker run --runtime=runsc-prof --rm -d alpine sh -c \"while true; do echo running; sleep .1; done\"\n63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b\nsudo runsc --root /var/run/docker/runtime-runsc-prof/moby debug --profile-heap=/tmp/heap.prof 63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b\n"
}
] | Go | Apache License 2.0 | google/gvisor | Changed example command for profiling CPU/heap
Profiling a container that runs `sleep 1000` command dumps an empty CPU profile that may cause unnecessary questions. I suggest replacing this example with one mentioned in the same doc above. |
260,017 | 28.05.2019 11:53:18 | -3,600 | 7c94c9d77c6287d296f4ffed925ef5efa3a173f1 | Changed sleep time in examples
from 0.1 sec to 1 sec (in two places), according to the feedback | [
{
"change_type": "MODIFY",
"old_path": "content/docs/user_guide/debugging.md",
"new_path": "content/docs/user_guide/debugging.md",
"diff": "@@ -46,7 +46,7 @@ gVisor. It connects to the sandbox process, collects a stack dump, and writes\nit to the console. For example:\n```bash\n-docker run --runtime=runsc --rm -d alpine sh -c \"while true; do echo running; sleep .1; done\"\n+docker run --runtime=runsc --rm -d alpine sh -c \"while true; do echo running; sleep 1; done\"\n63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b\nsudo runsc --root /var/run/docker/runtime-runsc/moby debug --stacks 63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b\n@@ -89,7 +89,7 @@ the options available:\nFor example:\n```bash\n-docker run --runtime=runsc-prof --rm -d alpine sh -c \"while true; do echo running; sleep .1; done\"\n+docker run --runtime=runsc-prof --rm -d alpine sh -c \"while true; do echo running; sleep 1; done\"\n63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b\nsudo runsc --root /var/run/docker/runtime-runsc-prof/moby debug --profile-heap=/tmp/heap.prof 63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b\n"
}
] | Go | Apache License 2.0 | google/gvisor | Changed sleep time in examples
from 0.1 sec to 1 sec (in two places), according to the feedback |
259,884 | 29.05.2019 04:17:23 | 14,400 | b532e65552a6035a827b5df28aaaee4d003c420e | Add gitter link to footer | [
{
"change_type": "MODIFY",
"old_path": "config.toml",
"new_path": "config.toml",
"diff": "@@ -115,6 +115,11 @@ no = 'Sorry to hear that. Please <a href=\"https://github.com/USERNAME/REPOSITORY\nurl = \"/docs/\"\nicon = \"fa fa-book\"\ndesc = \"Read our documentation to understand gVisor, its architecture and trade-offs, and how to use it.\"\n+[[params.links.user]]\n+ name = \"Chat\"\n+ url = \"https://gitter.im/gvisor/community\"\n+ icon = \"fa fa-comment\"\n+ desc = \"Get support and chat about how to use gVisor\"\n[[params.links.user]]\nname = \"User mailing list\"\nurl = \"https://groups.google.com/forum/#!forum/gvisor-users\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add gitter link to footer |
259,858 | 24.05.2019 17:10:43 | 25,200 | 2165b77774eaa40bb7d870fddea733cd899006b9 | Remove obsolete bug.
The original bug is no longer relevant, and the FIXME here
contains lots of obsolete information. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dentry.go",
"new_path": "pkg/sentry/fs/dentry.go",
"diff": "@@ -83,9 +83,6 @@ type DirCtx struct {\nattrs map[string]DentAttr\n// DirCursor is the directory cursor.\n- // TODO(b/67778717): Once Handles are removed this can just live in the\n- // respective FileOperations implementations and not need to get\n- // plumbed everywhere.\nDirCursor *string\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove obsolete bug.
The original bug is no longer relevant, and the FIXME here
contains lots of obsolete information.
PiperOrigin-RevId: 249924036 |
259,858 | 24.05.2019 18:09:38 | 25,200 | f29ea87d2aa155e1b7966c9f899441cd11eae4ab | Create annotated tags for release. | [
{
"change_type": "MODIFY",
"old_path": "tools/tag_release.sh",
"new_path": "tools/tag_release.sh",
"diff": "# validate a provided release name, create a tag and push it. It must be\n# run manually when a release is created.\n-set -euxo pipefail\n+set -xeu\n# Check arguments.\nif [ \"$#\" -ne 2 ]; then\n@@ -26,28 +26,43 @@ if [ \"$#\" -ne 2 ]; then\nexit 1\nfi\n-commit=$1\n-release=$2\n+declare -r target_commit=\"$1\"\n+declare -r release=\"$2\"\n+\n+closest_commit() {\n+ while read line; do\n+ if [[ \"$line\" =~ \"commit \" ]]; then\n+ current_commit=\"${line#commit }\"\n+ continue\n+ elif [[ \"$line\" =~ \"PiperOrigin-RevId: \" ]]; then\n+ revid=\"${line#PiperOrigin-RevId: }\"\n+ [[ \"${revid}\" -le \"$1\" ]] && break\n+ fi\n+ done\n+ echo \"${current_commit}\"\n+}\n# Is the passed identifier a sha commit?\n-if ! git show \"${commit}\" &> /dev/null; then\n+if ! git show \"${target_commit}\" &> /dev/null; then\n# Extract the commit given a piper ID.\n- commit=$(git log|grep -E \"(^commit |^ PiperOrigin-RevId:)\" |grep -B1 \"RevId: ${commit}\"| head -n1|cut -d\" \" -f2)\n+ declare -r commit=\"$(git log | closest_commit \"${target_commit}\")\"\n+else\n+ declare -r commit=\"${target_commit}\"\nfi\nif ! git show \"${commit}\" &> /dev/null; then\n- echo \"unknown commit: ${commit}\"\n+ echo \"unknown commit: ${target_commit}\"\nexit 1\nfi\n# Is the release name sane? Must be a date with patch/rc.\nif ! [[ \"${release}\" =~ ^20[0-9]{6}\\.[0-9]+$ ]]; then\n- expected=$(date +%Y%m%d.0) # Use today's date.\n+ declare -r expected=\"$(date +%Y%m%d.0)\" # Use today's date.\necho \"unexpected release format: ${release}\"\necho \" ... expected like ${expected}\"\nexit 1\nfi\n-# Tag the given commit.\n-tag=\"release-${release}\"\n-(git tag \"${tag}\" \"${commit}\" && git push origin tag \"${tag}\") || \\\n+# Tag the given commit (annotated, to record the committer).\n+declare -r tag=\"release-${release}\"\n+(git tag -a \"${tag}\" \"${commit}\" && git push origin tag \"${tag}\") || \\\n(git tag -d \"${tag}\" && false)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Create annotated tags for release.
PiperOrigin-RevId: 249929942 |
259,992 | 28.05.2019 11:16:45 | 25,200 | 1e42b4cfcad9ff4becb1041b14107815f585becf | Update internal flag name and documentation
Updates | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/exec.go",
"new_path": "runsc/cmd/exec.go",
"diff": "@@ -40,6 +40,8 @@ import (\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n)\n+const privateClearStatusFlag = \"private-clear-status\"\n+\n// Exec implements subcommands.Command for the \"exec\" command.\ntype Exec struct {\ncwd string\n@@ -102,8 +104,9 @@ func (ex *Exec) SetFlags(f *flag.FlagSet) {\nf.StringVar(&ex.internalPidFile, \"internal-pid-file\", \"\", \"filename that the container-internal pid will be written to\")\nf.StringVar(&ex.consoleSocket, \"console-socket\", \"\", \"path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal\")\n- // clear-status is expected to only be set when we fork due to --detach being set.\n- f.BoolVar(&ex.clearStatus, \"clear-status\", true, \"clear the status of the exec'd process upon completion\")\n+ // This flag clears the status of the exec'd process upon completion. It is\n+ // only used when we fork due to --detach being set on the parent.\n+ f.BoolVar(&ex.clearStatus, privateClearStatusFlag, true, \"private flag, do not use\")\n}\n// Execute implements subcommands.Command.Execute. It starts a process in an\n@@ -210,10 +213,10 @@ func (ex *Exec) execAndWait(waitStatus *syscall.WaitStatus) subcommands.ExitStat\n// Add the rest of the args, excluding the \"detach\" flag.\nfor _, a := range os.Args[1:] {\nif strings.Contains(a, \"detach\") {\n- // Replace with the \"clear-status\" flag, which tells\n+ // Replace with the \"private-clear-status\" flag, which tells\n// the new process it's a detached child and shouldn't\n// clear the exit status of the sentry process.\n- args = append(args, \"--clear-status=false\")\n+ args = append(args, fmt.Sprintf(\"--%s=false\", privateClearStatusFlag))\n} else {\nargs = append(args, a)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update internal flag name and documentation
Updates #234
PiperOrigin-RevId: 250323553 |
259,853 | 28.05.2019 11:47:46 | 25,200 | 673358c0d94f82ac56d9f4f6e7aec7ff5761e1cc | runsc/do: allow to run commands in a host network namespace | [
{
"change_type": "MODIFY",
"old_path": "kokoro/run_tests.sh",
"new_path": "kokoro/run_tests.sh",
"diff": "@@ -182,6 +182,17 @@ run_syscall_tests() {\n--test_tag_filters=runsc_ptrace //test/syscalls/...\n}\n+run_runsc_do_tests() {\n+ local runsc=$(find bazel-bin/runsc -type f -executable -name \"runsc\" | head -n1)\n+\n+ # run runsc do without root privileges.\n+ unshare -Ur ${runsc} --network=none --TESTONLY-unsafe-nonroot do true\n+ unshare -Ur ${runsc} --TESTONLY-unsafe-nonroot --network=host do --netns=false true\n+\n+ # run runsc do with root privileges.\n+ sudo -n -E ${runsc} do true\n+}\n+\n# Find and rename all test xml and log files so that Sponge can pick them up.\n# XML files must be named sponge_log.xml, and log files must be named\n# sponge_log.log. We move all such files into KOKORO_ARTIFACTS_DIR, in a\n@@ -234,6 +245,7 @@ main() {\nrun_root_tests\nrun_syscall_tests\n+ run_runsc_do_tests\n# Build other flavors too.\nbuild_everything dbg\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/do.go",
"new_path": "runsc/cmd/do.go",
"diff": "@@ -42,6 +42,7 @@ type Do struct {\nroot string\ncwd string\nip string\n+ networkNamespace bool\n}\n// Name implements subcommands.Command.Name.\n@@ -71,6 +72,7 @@ func (c *Do) SetFlags(f *flag.FlagSet) {\nf.StringVar(&c.root, \"root\", \"/\", `path to the root directory, defaults to \"/\"`)\nf.StringVar(&c.cwd, \"cwd\", \".\", \"path to the current directory, defaults to the current directory\")\nf.StringVar(&c.ip, \"ip\", \"192.168.10.2\", \"IPv4 address for the sandbox\")\n+ f.BoolVar(&c.networkNamespace, \"netns\", true, \"run in a new network namespace\")\n}\n// Execute implements subcommands.Command.Execute.\n@@ -118,7 +120,11 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\nspecutils.LogSpec(spec)\ncid := fmt.Sprintf(\"runsc-%06d\", rand.Int31n(1000000))\n- if conf.Network != boot.NetworkNone {\n+ if !c.networkNamespace {\n+ if conf.Network != boot.NetworkHost {\n+ Fatalf(\"The current network namespace can be used only if --network=host is set\", nil)\n+ }\n+ } else if conf.Network != boot.NetworkNone {\nclean, err := c.setupNet(cid, spec)\nif err != nil {\nreturn Errorf(\"Error setting up network: %v\", err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc/do: allow to run commands in a host network namespace
PiperOrigin-RevId: 250329795 |
259,881 | 28.05.2019 18:02:07 | 25,200 | 507a15dce974d0cff18253ba50af29d6579bacc5 | Always wait on tracee children
After ("wait/ptrace: assume
__WALL if the child is traced") (Linux 4.7), tracees are always eligible
for waiting, regardless of type. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_exit.go",
"new_path": "pkg/sentry/kernel/task_exit.go",
"diff": "@@ -803,13 +803,17 @@ type WaitOptions struct {\n}\n// Preconditions: The TaskSet mutex must be locked (for reading or writing).\n-func (o *WaitOptions) matchesTask(t *Task, pidns *PIDNamespace) bool {\n+func (o *WaitOptions) matchesTask(t *Task, pidns *PIDNamespace, tracee bool) bool {\nif o.SpecificTID != 0 && o.SpecificTID != pidns.tids[t] {\nreturn false\n}\nif o.SpecificPGID != 0 && o.SpecificPGID != pidns.pgids[t.tg.processGroup] {\nreturn false\n}\n+ // Tracees are always eligible.\n+ if tracee {\n+ return true\n+ }\nif t == t.tg.leader && t.tg.terminationSignal == linux.SIGCHLD {\nreturn o.NonCloneTasks\n}\n@@ -903,7 +907,7 @@ func (t *Task) waitParentLocked(opts *WaitOptions, parent *Task) (*WaitResult, b\nanyWaitableTasks := false\nfor child := range parent.children {\n- if !opts.matchesTask(child, parent.tg.pidns) {\n+ if !opts.matchesTask(child, parent.tg.pidns, false) {\ncontinue\n}\n// Non-leaders don't notify parents on exit and aren't eligible to\n@@ -946,7 +950,7 @@ func (t *Task) waitParentLocked(opts *WaitOptions, parent *Task) (*WaitResult, b\n}\n}\nfor tracee := range parent.ptraceTracees {\n- if !opts.matchesTask(tracee, parent.tg.pidns) {\n+ if !opts.matchesTask(tracee, parent.tg.pidns, true) {\ncontinue\n}\n// Non-leaders do notify tracers on exit.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -3179,6 +3179,7 @@ cc_binary(\nlinkstatic = 1,\ndeps = [\n\"//test/util:cleanup\",\n+ \"//test/util:file_descriptor\",\n\"//test/util:logging\",\n\"//test/util:multiprocess_util\",\n\"//test/util:posix_error\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/wait.cc",
"new_path": "test/syscalls/linux/wait.cc",
"diff": "#include <signal.h>\n#include <sys/mman.h>\n+#include <sys/ptrace.h>\n#include <sys/resource.h>\n#include <sys/time.h>\n#include <sys/types.h>\n#include \"absl/time/clock.h\"\n#include \"absl/time/time.h\"\n#include \"test/util/cleanup.h\"\n+#include \"test/util/file_descriptor.h\"\n#include \"test/util/logging.h\"\n#include \"test/util/multiprocess_util.h\"\n#include \"test/util/posix_error.h\"\n@@ -861,6 +863,50 @@ TEST(WaitTest, WaitidRusage) {\nEXPECT_GE(RusageCpuTime(rusage), kSpin);\n}\n+// After bf959931ddb88c4e4366e96dd22e68fa0db9527c (\"wait/ptrace: assume __WALL\n+// if the child is traced\") (Linux 4.7), tracees are always eligible for\n+// waiting, regardless of type.\n+TEST(WaitTest, TraceeWALL) {\n+ int fds[2];\n+ ASSERT_THAT(pipe(fds), SyscallSucceeds());\n+ FileDescriptor rfd(fds[0]);\n+ FileDescriptor wfd(fds[1]);\n+\n+ pid_t child = fork();\n+ if (child == 0) {\n+ // Child.\n+ rfd.reset();\n+\n+ TEST_PCHECK(ptrace(PTRACE_TRACEME, 0, nullptr, nullptr) == 0);\n+\n+ // Notify parent that we're now a tracee.\n+ wfd.reset();\n+\n+ _exit(0);\n+ }\n+ ASSERT_THAT(child, SyscallSucceeds());\n+\n+ wfd.reset();\n+\n+ // Wait for child to become tracee.\n+ char c;\n+ EXPECT_THAT(ReadFd(rfd.get(), &c, sizeof(c)), SyscallSucceedsWithValue(0));\n+\n+ // We can wait on the fork child with WCLONE, as it is a tracee.\n+ int status;\n+ if (IsRunningOnGvisor()) {\n+ ASSERT_THAT(Wait4(child, &status, __WCLONE, nullptr),\n+ SyscallSucceedsWithValue(child));\n+\n+ EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) << status;\n+ } else {\n+ // On older versions of Linux, we may get ECHILD.\n+ ASSERT_THAT(Wait4(child, &status, __WCLONE, nullptr),\n+ ::testing::AnyOf(SyscallSucceedsWithValue(child),\n+ SyscallFailsWithErrno(ECHILD)));\n+ }\n+}\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | Always wait on tracee children
After bf959931ddb88c4e4366e96dd22e68fa0db9527c ("wait/ptrace: assume
__WALL if the child is traced") (Linux 4.7), tracees are always eligible
for waiting, regardless of type.
PiperOrigin-RevId: 250399527 |
259,853 | 28.05.2019 22:28:01 | 25,200 | 4b9cb381572e0f61f2a6c2259094548172900e0d | gvisor: socket() returns EPROTONOSUPPORT if protocol is not supported | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/provider.go",
"new_path": "pkg/sentry/socket/epsocket/provider.go",
"diff": "@@ -76,7 +76,7 @@ func getTransportProtocol(ctx context.Context, stype transport.SockType, protoco\nreturn header.TCPProtocolNumber, nil\n}\n}\n- return 0, syserr.ErrInvalidArgument\n+ return 0, syserr.ErrProtocolNotSupported\n}\n// Socket creates a new socket object for the AF_INET or AF_INET6 family.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/unix.go",
"new_path": "pkg/sentry/socket/unix/unix.go",
"diff": "@@ -598,8 +598,8 @@ type provider struct{}\n// Socket returns a new unix domain socket.\nfunc (*provider) Socket(t *kernel.Task, stype transport.SockType, protocol int) (*fs.File, *syserr.Error) {\n// Check arguments.\n- if protocol != 0 {\n- return nil, syserr.ErrInvalidArgument\n+ if protocol != 0 && protocol != linux.AF_UNIX /* PF_UNIX */ {\n+ return nil, syserr.ErrProtocolNotSupported\n}\n// Create the endpoint and socket.\n@@ -624,8 +624,8 @@ func (*provider) Socket(t *kernel.Task, stype transport.SockType, protocol int)\n// Pair creates a new pair of AF_UNIX connected sockets.\nfunc (*provider) Pair(t *kernel.Task, stype transport.SockType, protocol int) (*fs.File, *fs.File, *syserr.Error) {\n// Check arguments.\n- if protocol != 0 {\n- return nil, nil, syserr.ErrInvalidArgument\n+ if protocol != 0 && protocol != linux.AF_UNIX /* PF_UNIX */ {\n+ return nil, nil, syserr.ErrProtocolNotSupported\n}\nvar isPacket bool\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -35,6 +35,8 @@ syscall_test(\nsyscall_test(test = \"//test/syscalls/linux:brk_test\")\n+syscall_test(test = \"//test/syscalls/linux:socket_test\")\n+\nsyscall_test(test = \"//test/syscalls/linux:chdir_test\")\nsyscall_test(test = \"//test/syscalls/linux:chmod_test\")\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -312,6 +312,19 @@ cc_binary(\n],\n)\n+cc_binary(\n+ name = \"socket_test\",\n+ testonly = 1,\n+ srcs = [\"socket.cc\"],\n+ linkstatic = 1,\n+ deps = [\n+ \":socket_test_util\",\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ \"@com_google_googletest//:gtest\",\n+ ],\n+)\n+\ncc_binary(\nname = \"brk_test\",\ntestonly = 1,\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/syscalls/linux/socket.cc",
"diff": "+// Copyright 2018 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <sys/socket.h>\n+#include <unistd.h>\n+\n+#include \"gtest/gtest.h\"\n+#include \"test/syscalls/linux/socket_test_util.h\"\n+#include \"test/util/test_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+TEST(SocketTest, UnixSocketPairProtocol) {\n+ int socks[2];\n+ ASSERT_THAT(socketpair(AF_UNIX, SOCK_STREAM, PF_UNIX, socks),\n+ SyscallSucceeds());\n+ close(socks[0]);\n+ close(socks[1]);\n+}\n+\n+TEST(SocketTest, Protocol) {\n+ struct {\n+ int domain, type, protocol;\n+ } tests[] = {\n+ {AF_UNIX, SOCK_STREAM, PF_UNIX}, {AF_UNIX, SOCK_SEQPACKET, PF_UNIX},\n+ {AF_UNIX, SOCK_DGRAM, PF_UNIX}, {AF_INET, SOCK_DGRAM, IPPROTO_UDP},\n+ {AF_INET, SOCK_STREAM, IPPROTO_TCP},\n+ };\n+ for (int i = 0; i < ABSL_ARRAYSIZE(tests); i++) {\n+ ASSERT_NO_ERRNO_AND_VALUE(\n+ Socket(tests[i].domain, tests[i].type, tests[i].protocol));\n+ }\n+}\n+\n+} // namespace testing\n+} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_unix.cc",
"new_path": "test/syscalls/linux/socket_unix.cc",
"diff": "@@ -1567,15 +1567,14 @@ TEST_P(UnixSocketPairTest, TIOCOUTQSucceeds) {\n}\nTEST_P(UnixSocketPairTest, NetdeviceIoctlsSucceed) {\n- FileDescriptor sock =\n- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_DGRAM, 0));\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n// Prepare the request.\nstruct ifreq ifr;\nsnprintf(ifr.ifr_name, IFNAMSIZ, \"lo\");\n// Check that the ioctl either succeeds or fails with ENODEV.\n- int err = ioctl(sock.get(), SIOCGIFINDEX, &ifr);\n+ int err = ioctl(sockets->first_fd(), SIOCGIFINDEX, &ifr);\nif (err < 0) {\nASSERT_EQ(errno, ENODEV);\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor: socket() returns EPROTONOSUPPORT if protocol is not supported
PiperOrigin-RevId: 250426407 |
259,853 | 28.05.2019 23:02:07 | 25,200 | b52e571a6188ce90b5a13b002753230780119db9 | runsc/do: don't specify the read-only flag for the root mount
The root mount is an overlay mount. | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/do.go",
"new_path": "runsc/cmd/do.go",
"diff": "@@ -106,7 +106,6 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\nspec := &specs.Spec{\nRoot: &specs.Root{\nPath: absRoot,\n- Readonly: true,\n},\nProcess: &specs.Process{\nCwd: absCwd,\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc/do: don't specify the read-only flag for the root mount
The root mount is an overlay mount.
PiperOrigin-RevId: 250429317 |
259,962 | 29.05.2019 11:30:59 | 25,200 | 035a8fa38ed21da2e06db22d3dfd6122610fb856 | Add support for collecting execution trace to runsc.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/pprof.go",
"new_path": "pkg/sentry/control/pprof.go",
"diff": "@@ -18,6 +18,7 @@ import (\n\"errors\"\n\"runtime\"\n\"runtime/pprof\"\n+ \"runtime/trace\"\n\"sync\"\n\"gvisor.googlesource.com/gvisor/pkg/fd\"\n@@ -52,6 +53,9 @@ type Profile struct {\n// cpuFile is the current CPU profile output file.\ncpuFile *fd.FD\n+\n+ // traceFile is the current execution trace output file.\n+ traceFile *fd.FD\n}\n// StartCPUProfile is an RPC stub which starts recording the CPU profile in a\n@@ -122,3 +126,43 @@ func (p *Profile) Goroutine(o *ProfileOpts, _ *struct{}) error {\n}\nreturn nil\n}\n+\n+// StartTrace is an RPC stub which starts collection of an execution trace.\n+func (p *Profile) StartTrace(o *ProfileOpts, _ *struct{}) error {\n+ if len(o.FilePayload.Files) < 1 {\n+ return errNoOutput\n+ }\n+\n+ output, err := fd.NewFromFile(o.FilePayload.Files[0])\n+ if err != nil {\n+ return err\n+ }\n+\n+ p.mu.Lock()\n+ defer p.mu.Unlock()\n+\n+ // Returns an error if profiling is already started.\n+ if err := trace.Start(output); err != nil {\n+ output.Close()\n+ return err\n+ }\n+\n+ p.traceFile = output\n+ return nil\n+}\n+\n+// StopTrace is an RPC stub which stops collection of an ongoing execution\n+// trace and flushes the trace data. It takes no argument.\n+func (p *Profile) StopTrace(_, _ *struct{}) error {\n+ p.mu.Lock()\n+ defer p.mu.Unlock()\n+\n+ if p.traceFile == nil {\n+ return errors.New(\"Execution tracing not start\")\n+ }\n+\n+ trace.Stop()\n+ p.traceFile.Close()\n+ p.traceFile = nil\n+ return nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -101,6 +101,8 @@ const (\nStartCPUProfile = \"Profile.StartCPUProfile\"\nStopCPUProfile = \"Profile.StopCPUProfile\"\nHeapProfile = \"Profile.HeapProfile\"\n+ StartTrace = \"Profile.StartTrace\"\n+ StopTrace = \"Profile.StopTrace\"\n)\n// ControlSocketAddr generates an abstract unix socket name for the given ID.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/debug.go",
"new_path": "runsc/cmd/debug.go",
"diff": "@@ -35,6 +35,7 @@ type Debug struct {\nprofileHeap string\nprofileCPU string\nprofileDelay int\n+ trace string\n}\n// Name implements subcommands.Command.\n@@ -59,6 +60,7 @@ func (d *Debug) SetFlags(f *flag.FlagSet) {\nf.StringVar(&d.profileHeap, \"profile-heap\", \"\", \"writes heap profile to the given file.\")\nf.StringVar(&d.profileCPU, \"profile-cpu\", \"\", \"writes CPU profile to the given file.\")\nf.IntVar(&d.profileDelay, \"profile-delay\", 5, \"amount of time to wait before stoping CPU profile\")\n+ f.StringVar(&d.trace, \"trace\", \"\", \"writes an execution trace to the given file.\")\nf.IntVar(&d.signal, \"signal\", -1, \"sends signal to the sandbox\")\n}\n@@ -122,35 +124,62 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n}\nlog.Infof(\" *** Stack dump ***\\n%s\", stacks)\n}\n- if d.profileCPU != \"\" {\n- f, err := os.Create(d.profileCPU)\n+ if d.profileHeap != \"\" {\n+ f, err := os.Create(d.profileHeap)\nif err != nil {\nFatalf(err.Error())\n}\ndefer f.Close()\n- if err := c.Sandbox.StartCPUProfile(f); err != nil {\n+ if err := c.Sandbox.HeapProfile(f); err != nil {\nFatalf(err.Error())\n}\n- log.Infof(\"CPU profile started for %d sec, writing to %q\", d.profileDelay, d.profileCPU)\n- time.Sleep(time.Duration(d.profileDelay) * time.Second)\n+ log.Infof(\"Heap profile written to %q\", d.profileHeap)\n+ }\n+ delay := false\n+ if d.profileCPU != \"\" {\n+ delay = true\n+ f, err := os.Create(d.profileCPU)\n+ if err != nil {\n+ Fatalf(err.Error())\n+ }\n+ defer func() {\n+ f.Close()\nif err := c.Sandbox.StopCPUProfile(); err != nil {\nFatalf(err.Error())\n}\nlog.Infof(\"CPU profile written to %q\", d.profileCPU)\n+ }()\n+ if err := c.Sandbox.StartCPUProfile(f); err != nil {\n+ Fatalf(err.Error())\n}\n- if d.profileHeap != \"\" {\n- f, err := os.Create(d.profileHeap)\n+ log.Infof(\"CPU profile started for %d sec, writing to %q\", d.profileDelay, d.profileCPU)\n+ }\n+ if d.trace != \"\" {\n+ delay = true\n+ f, err := os.Create(d.trace)\nif err != nil {\nFatalf(err.Error())\n}\n- defer f.Close()\n-\n- if err := c.Sandbox.HeapProfile(f); err != nil {\n+ defer func() {\n+ f.Close()\n+ if err := c.Sandbox.StopTrace(); err != nil {\nFatalf(err.Error())\n}\n- log.Infof(\"Heap profile written to %q\", d.profileHeap)\n+ log.Infof(\"Trace written to %q\", d.trace)\n+ }()\n+ if err := c.Sandbox.StartTrace(f); err != nil {\n+ Fatalf(err.Error())\n+ }\n+ log.Infof(\"Tracing started for %d sec, writing to %q\", d.profileDelay, d.trace)\n+\n+ }\n+\n+ if delay {\n+ time.Sleep(time.Duration(d.profileDelay) * time.Second)\n+\n}\n+\nreturn subcommands.ExitSuccess\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -883,6 +883,41 @@ func (s *Sandbox) StopCPUProfile() error {\nreturn nil\n}\n+// StartTrace start trace writing to the given file.\n+func (s *Sandbox) StartTrace(f *os.File) error {\n+ log.Debugf(\"Trace start %q\", s.ID)\n+ conn, err := s.sandboxConnect()\n+ if err != nil {\n+ return err\n+ }\n+ defer conn.Close()\n+\n+ opts := control.ProfileOpts{\n+ FilePayload: urpc.FilePayload{\n+ Files: []*os.File{f},\n+ },\n+ }\n+ if err := conn.Call(boot.StartTrace, &opts, nil); err != nil {\n+ return fmt.Errorf(\"starting sandbox %q trace: %v\", s.ID, err)\n+ }\n+ return nil\n+}\n+\n+// StopTrace stops a previously started trace..\n+func (s *Sandbox) StopTrace() error {\n+ log.Debugf(\"Trace stop %q\", s.ID)\n+ conn, err := s.sandboxConnect()\n+ if err != nil {\n+ return err\n+ }\n+ defer conn.Close()\n+\n+ if err := conn.Call(boot.StopTrace, nil, nil); err != nil {\n+ return fmt.Errorf(\"stopping sandbox %q trace: %v\", s.ID, err)\n+ }\n+ return nil\n+}\n+\n// DestroyContainer destroys the given container. If it is the root container,\n// then the entire sandbox is destroyed.\nfunc (s *Sandbox) DestroyContainer(cid string) error {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add support for collecting execution trace to runsc.
Updates #220
PiperOrigin-RevId: 250532302 |
259,976 | 29.05.2019 16:48:19 | 25,200 | b18df9bed6af3ff9b526c9ebdcde33dffeac161e | Add VmData field to /proc/{pid}/status
VmData is the size of private data segments.
It has the same meaning as in Linux. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/task.go",
"new_path": "pkg/sentry/fs/proc/task.go",
"diff": "@@ -578,7 +578,7 @@ func (s *statusData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) (\n}\nfmt.Fprintf(&buf, \"TracerPid:\\t%d\\n\", tpid)\nvar fds int\n- var vss, rss uint64\n+ var vss, rss, data uint64\ns.t.WithMuLocked(func(t *kernel.Task) {\nif fdm := t.FDMap(); fdm != nil {\nfds = fdm.Size()\n@@ -586,11 +586,13 @@ func (s *statusData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) (\nif mm := t.MemoryManager(); mm != nil {\nvss = mm.VirtualMemorySize()\nrss = mm.ResidentSetSize()\n+ data = mm.VirtualDataSize()\n}\n})\nfmt.Fprintf(&buf, \"FDSize:\\t%d\\n\", fds)\nfmt.Fprintf(&buf, \"VmSize:\\t%d kB\\n\", vss>>10)\nfmt.Fprintf(&buf, \"VmRSS:\\t%d kB\\n\", rss>>10)\n+ fmt.Fprintf(&buf, \"VmData:\\t%d kB\\n\", data>>10)\nfmt.Fprintf(&buf, \"Threads:\\t%d\\n\", s.t.ThreadGroup().Count())\ncreds := s.t.Credentials()\nfmt.Fprintf(&buf, \"CapInh:\\t%016x\\n\", creds.InheritableCaps)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/lifecycle.go",
"new_path": "pkg/sentry/mm/lifecycle.go",
"diff": "@@ -69,6 +69,7 @@ func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) {\nusers: 1,\nbrk: mm.brk,\nusageAS: mm.usageAS,\n+ dataAS: mm.dataAS,\n// \"The child does not inherit its parent's memory locks (mlock(2),\n// mlockall(2)).\" - fork(2). So lockedAS is 0 and defMLockMode is\n// MLockNone, both of which are zero values. vma.mlockMode is reset\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/mm.go",
"new_path": "pkg/sentry/mm/mm.go",
"diff": "@@ -111,6 +111,12 @@ type MemoryManager struct {\n// lockedAS is protected by mappingMu.\nlockedAS uint64\n+ // dataAS is the size of private data segments, like mm_struct->data_vm.\n+ // It means the vma which is private, writable, not stack.\n+ //\n+ // dataAS is protected by mappingMu.\n+ dataAS uint64\n+\n// New VMAs created by MMap use whichever of memmap.MMapOpts.MLockMode or\n// defMLockMode is greater.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/mm_test.go",
"new_path": "pkg/sentry/mm/mm_test.go",
"diff": "@@ -68,6 +68,60 @@ func TestUsageASUpdates(t *testing.T) {\n}\n}\n+func (mm *MemoryManager) realDataAS() uint64 {\n+ var sz uint64\n+ for seg := mm.vmas.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {\n+ vma := seg.Value()\n+ if vma.isPrivateDataLocked() {\n+ sz += uint64(seg.Range().Length())\n+ }\n+ }\n+ return sz\n+}\n+\n+func TestDataASUpdates(t *testing.T) {\n+ ctx := contexttest.Context(t)\n+ mm := testMemoryManager(ctx)\n+ defer mm.DecUsers(ctx)\n+\n+ addr, err := mm.MMap(ctx, memmap.MMapOpts{\n+ Length: 3 * usermem.PageSize,\n+ Private: true,\n+ Perms: usermem.Write,\n+ MaxPerms: usermem.AnyAccess,\n+ })\n+ if err != nil {\n+ t.Fatalf(\"MMap got err %v want nil\", err)\n+ }\n+ if mm.dataAS == 0 {\n+ t.Fatalf(\"dataAS is 0, wanted not 0\")\n+ }\n+ realDataAS := mm.realDataAS()\n+ if mm.dataAS != realDataAS {\n+ t.Fatalf(\"dataAS believes %v bytes are mapped; %v bytes are actually mapped\", mm.dataAS, realDataAS)\n+ }\n+\n+ mm.MUnmap(ctx, addr, usermem.PageSize)\n+ realDataAS = mm.realDataAS()\n+ if mm.dataAS != realDataAS {\n+ t.Fatalf(\"dataAS believes %v bytes are mapped; %v bytes are actually mapped\", mm.dataAS, realDataAS)\n+ }\n+\n+ mm.MProtect(addr+usermem.PageSize, usermem.PageSize, usermem.Read, false)\n+ realDataAS = mm.realDataAS()\n+ if mm.dataAS != realDataAS {\n+ t.Fatalf(\"dataAS believes %v bytes are mapped; %v bytes are actually mapped\", mm.dataAS, realDataAS)\n+ }\n+\n+ mm.MRemap(ctx, addr+2*usermem.PageSize, usermem.PageSize, 2*usermem.PageSize, MRemapOpts{\n+ Move: MRemapMayMove,\n+ })\n+ realDataAS = mm.realDataAS()\n+ if mm.dataAS != realDataAS {\n+ t.Fatalf(\"dataAS believes %v bytes are mapped; %v bytes are actually mapped\", mm.dataAS, realDataAS)\n+ }\n+}\n+\nfunc TestBrkDataLimitUpdates(t *testing.T) {\nlimitSet := limits.NewLimitSet()\nlimitSet.Set(limits.Data, limits.Limit{}, true /* privileged */) // zero RLIMIT_DATA\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/syscalls.go",
"new_path": "pkg/sentry/mm/syscalls.go",
"diff": "@@ -527,6 +527,9 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi\n}\nvseg := mm.vmas.Insert(mm.vmas.FindGap(newAR.Start), newAR, vma)\nmm.usageAS += uint64(newAR.Length())\n+ if vma.isPrivateDataLocked() {\n+ mm.dataAS += uint64(newAR.Length())\n+ }\nif vma.mlockMode != memmap.MLockNone {\nmm.lockedAS += uint64(newAR.Length())\nif vma.mlockMode == memmap.MLockEager {\n@@ -556,6 +559,9 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi\nmm.vmas.Remove(vseg)\nvseg = mm.vmas.Insert(mm.vmas.FindGap(newAR.Start), newAR, vma)\nmm.usageAS = mm.usageAS - uint64(oldAR.Length()) + uint64(newAR.Length())\n+ if vma.isPrivateDataLocked() {\n+ mm.dataAS = mm.dataAS - uint64(oldAR.Length()) + uint64(newAR.Length())\n+ }\nif vma.mlockMode != memmap.MLockNone {\nmm.lockedAS = mm.lockedAS - uint64(oldAR.Length()) + uint64(newAR.Length())\n}\n@@ -643,8 +649,16 @@ func (mm *MemoryManager) MProtect(addr usermem.Addr, length uint64, realPerms us\n// Update vma permissions.\nvma := vseg.ValuePtr()\n+ vmaLength := vseg.Range().Length()\n+ if vma.isPrivateDataLocked() {\n+ mm.dataAS -= uint64(vmaLength)\n+ }\n+\nvma.realPerms = realPerms\nvma.effectivePerms = effectivePerms\n+ if vma.isPrivateDataLocked() {\n+ mm.dataAS += uint64(vmaLength)\n+ }\n// Propagate vma permission changes to pmas.\nfor pseg.Ok() && pseg.Start() < vseg.End() {\n@@ -1150,7 +1164,7 @@ func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr usermem.Add\nfunc (mm *MemoryManager) VirtualMemorySize() uint64 {\nmm.mappingMu.RLock()\ndefer mm.mappingMu.RUnlock()\n- return uint64(mm.usageAS)\n+ return mm.usageAS\n}\n// VirtualMemorySizeRange returns the combined length in bytes of all mappings\n@@ -1165,12 +1179,19 @@ func (mm *MemoryManager) VirtualMemorySizeRange(ar usermem.AddrRange) uint64 {\nfunc (mm *MemoryManager) ResidentSetSize() uint64 {\nmm.activeMu.RLock()\ndefer mm.activeMu.RUnlock()\n- return uint64(mm.curRSS)\n+ return mm.curRSS\n}\n// MaxResidentSetSize returns the value advertised as mm's max RSS in bytes.\nfunc (mm *MemoryManager) MaxResidentSetSize() uint64 {\nmm.activeMu.RLock()\ndefer mm.activeMu.RUnlock()\n- return uint64(mm.maxRSS)\n+ return mm.maxRSS\n+}\n+\n+// VirtualDataSize returns the size of private data segments in mm.\n+func (mm *MemoryManager) VirtualDataSize() uint64 {\n+ mm.mappingMu.RLock()\n+ defer mm.mappingMu.RUnlock()\n+ return mm.dataAS\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/vma.go",
"new_path": "pkg/sentry/mm/vma.go",
"diff": "@@ -98,7 +98,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp\n}\n// Finally insert the vma.\n- vseg := mm.vmas.Insert(vgap, ar, vma{\n+ v := vma{\nmappable: opts.Mappable,\noff: opts.Offset,\nrealPerms: opts.Perms,\n@@ -109,8 +109,13 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp\nmlockMode: opts.MLockMode,\nid: opts.MappingIdentity,\nhint: opts.Hint,\n- })\n+ }\n+\n+ vseg := mm.vmas.Insert(vgap, ar, v)\nmm.usageAS += opts.Length\n+ if v.isPrivateDataLocked() {\n+ mm.dataAS += opts.Length\n+ }\nif opts.MLockMode != memmap.MLockNone {\nmm.lockedAS += opts.Length\n}\n@@ -374,6 +379,9 @@ func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar usermem.AddrRa\nvma.id.DecRef()\n}\nmm.usageAS -= uint64(vmaAR.Length())\n+ if vma.isPrivateDataLocked() {\n+ mm.dataAS -= uint64(vmaAR.Length())\n+ }\nif vma.mlockMode != memmap.MLockNone {\nmm.lockedAS -= uint64(vmaAR.Length())\n}\n@@ -396,6 +404,13 @@ func (vma *vma) canWriteMappableLocked() bool {\nreturn !vma.private && vma.maxPerms.Write\n}\n+// isPrivateDataLocked identify the data segments - private, writable, not stack\n+//\n+// Preconditions: mm.mappingMu must be locked.\n+func (vma *vma) isPrivateDataLocked() bool {\n+ return vma.realPerms.Write && vma.private && !vma.growsDown\n+}\n+\n// vmaSetFunctions implements segment.Functions for vmaSet.\ntype vmaSetFunctions struct{}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/proc.cc",
"new_path": "test/syscalls/linux/proc.cc",
"diff": "@@ -1180,7 +1180,7 @@ bool IsDigits(absl::string_view s) {\nreturn std::all_of(s.begin(), s.end(), absl::ascii_isdigit);\n}\n-TEST(ProcPidStatTest, VSSRSS) {\n+TEST(ProcPidStatTest, VmStats) {\nstd::string status_str =\nASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/self/status\"));\nASSERT_FALSE(status_str.empty());\n@@ -1211,6 +1211,19 @@ TEST(ProcPidStatTest, VSSRSS) {\nEXPECT_TRUE(IsDigits(rss_str.substr(0, rss_str.length() - 3))) << rss_str;\n// ... which is not 0.\nEXPECT_NE('0', rss_str[0]);\n+\n+ const auto data_it = status.find(\"VmData\");\n+ ASSERT_NE(data_it, status.end());\n+\n+ absl::string_view data_str(data_it->second);\n+\n+ // Room for the \" kB\" suffix plus at least one digit.\n+ ASSERT_GT(data_str.length(), 3);\n+ EXPECT_TRUE(absl::EndsWith(data_str, \" kB\"));\n+ // Everything else is part of a number.\n+ EXPECT_TRUE(IsDigits(data_str.substr(0, data_str.length() - 3))) << data_str;\n+ // ... which is not 0.\n+ EXPECT_NE('0', data_str[0]);\n}\n// Parse an array of NUL-terminated char* arrays, returning a vector of strings.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add VmData field to /proc/{pid}/status
VmData is the size of private data segments.
It has the same meaning as in Linux.
Change-Id: Iebf1ae85940a810524a6cde9c2e767d4233ddb2a
PiperOrigin-RevId: 250593739 |
259,881 | 29.05.2019 17:46:50 | 25,200 | 8d25cd0b40694d1911724816d72b34d0717878d6 | Update procid for Go 1.13
Upstream Go has no changes here. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/procid/procid_amd64.s",
"new_path": "pkg/sentry/platform/procid/procid_amd64.s",
"diff": "// +build amd64\n// +build go1.8\n-// +build !go1.13\n+// +build !go1.14\n#include \"textflag.h\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/procid/procid_arm64.s",
"new_path": "pkg/sentry/platform/procid/procid_arm64.s",
"diff": "// +build arm64\n// +build go1.8\n-// +build !go1.13\n+// +build !go1.14\n#include \"textflag.h\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update procid for Go 1.13
Upstream Go has no changes here.
PiperOrigin-RevId: 250602731 |
259,992 | 30.05.2019 12:01:41 | 25,200 | 38de91b028639ef5f4a4c8874b3ee23503fd2f3a | Add build guard to files using go:linkname
Funcion signatures are not validated during compilation. Since
they are not exported, they can change at any time. The guard
ensures that they are verified at least on every version upgrade. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go",
"new_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+// +build go1.12\n+// +build !go1.14\n+\n+// Check go:linkname function signatures when updating Go version.\n+\npackage kvm\nimport (\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_unsafe.go",
"new_path": "pkg/sentry/platform/kvm/machine_unsafe.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+// +build go1.12\n+// +build !go1.14\n+\n+// Check go:linkname function signatures when updating Go version.\n+\npackage kvm\nimport (\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess_unsafe.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess_unsafe.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+// +build go1.12\n+// +build !go1.14\n+\n+// Check go:linkname function signatures when updating Go version.\n+\npackage ptrace\nimport (\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sleep/sleep_unsafe.go",
"new_path": "pkg/sleep/sleep_unsafe.go",
"diff": "// limitations under the License.\n// +build go1.11\n-// +build !go1.13\n+// +build !go1.14\n+\n+// Check go:linkname function signatures when updating Go version.\n// Package sleep allows goroutines to efficiently sleep on multiple sources of\n// notifications (wakers). It offers O(1) complexity, which is different from\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/rawfile/blockingpoll_amd64_unsafe.go",
"new_path": "pkg/tcpip/link/rawfile/blockingpoll_amd64_unsafe.go",
"diff": "// limitations under the License.\n// +build linux,amd64\n-// +build !go1.13\n+// +build go1.12\n+// +build !go1.14\n-// This must be validated with Go 1.13 and future releases.\n+// Check go:linkname function signatures when updating Go version.\npackage rawfile\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/time_unsafe.go",
"new_path": "pkg/tcpip/time_unsafe.go",
"diff": "// limitations under the License.\n// +build go1.9\n-// +build !go1.13\n+// +build !go1.14\n+\n+// Check go:linkname function signatures when updating Go version.\npackage tcpip\n"
},
{
"change_type": "MODIFY",
"old_path": "third_party/gvsync/BUILD",
"new_path": "third_party/gvsync/BUILD",
"diff": "@@ -5,6 +5,8 @@ package(\nlicenses = [\"notice\"],\n)\n+exports_files([\"LICENSE\"])\n+\nload(\"//tools/go_generics:defs.bzl\", \"go_template\")\ngo_template(\n@@ -29,6 +31,8 @@ go_template(\ngo_library(\nname = \"gvsync\",\nsrcs = [\n+ \"downgradable_rwmutex_1_12_unsafe.go\",\n+ \"downgradable_rwmutex_1_13_unsafe.go\",\n\"downgradable_rwmutex_unsafe.go\",\n\"gvsync.go\",\n\"memmove_unsafe.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "third_party/gvsync/downgradable_rwmutex_1_12_unsafe.go",
"diff": "+// Copyright 2009 The Go Authors. All rights reserved.\n+// Copyright 2019 The gVisor Authors.\n+// Use of this source code is governed by a BSD-style\n+// license that can be found in the LICENSE file.\n+\n+// +build go1.12\n+// +build !go1.13\n+\n+// TODO(b/133868570): Delete once Go 1.12 is no longer supported.\n+\n+package gvsync\n+\n+import _ \"unsafe\"\n+\n+//go:linkname runtimeSemrelease112 sync.runtime_Semrelease\n+func runtimeSemrelease112(s *uint32, handoff bool)\n+\n+func runtimeSemrelease(s *uint32, handoff bool, skipframes int) {\n+ // 'skipframes' is only available starting from 1.13.\n+ runtimeSemrelease112(s, handoff)\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "third_party/gvsync/downgradable_rwmutex_1_13_unsafe.go",
"diff": "+// Copyright 2009 The Go Authors. All rights reserved.\n+// Copyright 2019 The gVisor Authors.\n+// Use of this source code is governed by a BSD-style\n+// license that can be found in the LICENSE file.\n+\n+// +build go1.13\n+// +build !go1.14\n+\n+// Check go:linkname function signatures when updating Go version.\n+\n+package gvsync\n+\n+import _ \"unsafe\"\n+\n+//go:linkname runtimeSemrelease sync.runtime_Semrelease\n+func runtimeSemrelease(s *uint32, handoff bool, skipframes int)\n"
},
{
"change_type": "MODIFY",
"old_path": "third_party/gvsync/downgradable_rwmutex_unsafe.go",
"new_path": "third_party/gvsync/downgradable_rwmutex_unsafe.go",
"diff": "// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n+// +build go1.12\n+// +build !go1.14\n+\n+// Check go:linkname function signatures when updating Go version.\n+\n// This is mostly copied from the standard library's sync/rwmutex.go.\n//\n// Happens-before relationships indicated to the race detector:\n@@ -19,6 +24,9 @@ import (\n\"unsafe\"\n)\n+//go:linkname runtimeSemacquire sync.runtime_Semacquire\n+func runtimeSemacquire(s *uint32)\n+\n// DowngradableRWMutex is identical to sync.RWMutex, but adds the DowngradeLock\n// method.\ntype DowngradableRWMutex struct {\n@@ -62,7 +70,7 @@ func (rw *DowngradableRWMutex) RUnlock() {\n// A writer is pending.\nif atomic.AddInt32(&rw.readerWait, -1) == 0 {\n// The last reader unblocks the writer.\n- runtimeSemrelease(&rw.writerSem, false)\n+ runtimeSemrelease(&rw.writerSem, false, 0)\n}\n}\nif RaceEnabled {\n@@ -103,7 +111,7 @@ func (rw *DowngradableRWMutex) Unlock() {\n}\n// Unblock blocked readers, if any.\nfor i := 0; i < int(r); i++ {\n- runtimeSemrelease(&rw.readerSem, false)\n+ runtimeSemrelease(&rw.readerSem, false, 0)\n}\n// Allow other writers to proceed.\nrw.w.Unlock()\n@@ -126,7 +134,7 @@ func (rw *DowngradableRWMutex) DowngradeLock() {\n// Unblock blocked readers, if any. Note that this loop starts as 1 since r\n// includes this goroutine.\nfor i := 1; i < int(r); i++ {\n- runtimeSemrelease(&rw.readerSem, false)\n+ runtimeSemrelease(&rw.readerSem, false, 0)\n}\n// Allow other writers to proceed to rw.w.Lock(). Note that they will still\n// block on rw.writerSem since at least this reader exists, such that\n@@ -136,9 +144,3 @@ func (rw *DowngradableRWMutex) DowngradeLock() {\nRaceEnable()\n}\n}\n-\n-//go:linkname runtimeSemacquire sync.runtime_Semacquire\n-func runtimeSemacquire(s *uint32)\n-\n-//go:linkname runtimeSemrelease sync.runtime_Semrelease\n-func runtimeSemrelease(s *uint32, handoff bool)\n"
},
{
"change_type": "MODIFY",
"old_path": "third_party/gvsync/memmove_unsafe.go",
"new_path": "third_party/gvsync/memmove_unsafe.go",
"diff": "// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n+// +build go1.12\n+// +build !go1.14\n+\n+// Check go:linkname function signatures when updating Go version.\n+\npackage gvsync\nimport (\n\"unsafe\"\n)\n+//go:linkname memmove runtime.memmove\n+//go:noescape\n+func memmove(to, from unsafe.Pointer, n uintptr)\n+\n// Memmove is exported for SeqAtomicLoad/SeqAtomicTryLoad<T>, which can't\n// define it because go_generics can't update the go:linkname annotation.\n// Furthermore, go:linkname silently doesn't work if the local name is exported\n@@ -17,7 +26,3 @@ import (\nfunc Memmove(to, from unsafe.Pointer, n uintptr) {\nmemmove(to, from, n)\n}\n-\n-//go:linkname memmove runtime.memmove\n-//go:noescape\n-func memmove(to, from unsafe.Pointer, n uintptr)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add build guard to files using go:linkname
Funcion signatures are not validated during compilation. Since
they are not exported, they can change at any time. The guard
ensures that they are verified at least on every version upgrade.
PiperOrigin-RevId: 250733742 |
259,858 | 31.05.2019 15:42:08 | 25,200 | 132bf68de47569e761227de9fd6177e8b32f6c38 | Switch to new dedicated RBE project. | [
{
"change_type": "MODIFY",
"old_path": "kokoro/common.cfg",
"new_path": "kokoro/common.cfg",
"diff": "@@ -11,7 +11,7 @@ before_action {\n# Configure bazel to access RBE.\nbazel_setting {\n# Our GCP project name\n- project_id: \"copybara-shentu\"\n+ project_id: \"gvisor-rbe\"\n# Use RBE for execution as well as caching.\nlocal_execution: false\n"
},
{
"change_type": "MODIFY",
"old_path": "kokoro/run_tests.sh",
"new_path": "kokoro/run_tests.sh",
"diff": "@@ -25,7 +25,7 @@ set -eux\nreadonly WORKSPACE_DIR=\"${PWD}/git/repo\"\n# Used to configure RBE.\n-readonly CLOUD_PROJECT_ID=\"copybara-shentu\"\n+readonly CLOUD_PROJECT_ID=\"gvisor-rbe\"\nreadonly RBE_PROJECT_ID=\"projects/${CLOUD_PROJECT_ID}/instances/default_instance\"\n# Random runtime name to avoid collisions.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Switch to new dedicated RBE project.
PiperOrigin-RevId: 250970783 |
259,891 | 31.05.2019 16:14:04 | 25,200 | d58eb9ce828fd7c831f30e922e01f1d2b84e462c | Add basic iptables structures to netstack. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/iptables/BUILD",
"diff": "+package(licenses = [\"notice\"])\n+\n+load(\"//tools/go_stateify:defs.bzl\", \"go_library\", \"go_test\")\n+\n+go_library(\n+ name = \"iptables\",\n+ srcs = [\n+ \"iptables.go\",\n+ \"targets.go\",\n+ \"types.go\",\n+ ],\n+ importpath = \"gvisor.googlesource.com/gvisor/pkg/tcpip/iptables\",\n+ visibility = [\"//visibility:public\"],\n+ deps = [\n+ \"//pkg/state\",\n+ \"//pkg/tcpip\",\n+ \"//pkg/tcpip/buffer\",\n+ \"//pkg/tcpip/header\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/iptables/iptables.go",
"diff": "+// Copyright 2019 The gVisor authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package iptables supports packet filtering and manipulation via the iptables\n+// tool.\n+package iptables\n+\n+const (\n+ tablenameNat = \"nat\"\n+ tablenameMangle = \"mangle\"\n+)\n+\n+// Chain names as defined by net/ipv4/netfilter/ip_tables.c.\n+const (\n+ chainNamePrerouting = \"PREROUTING\"\n+ chainNameInput = \"INPUT\"\n+ chainNameForward = \"FORWARD\"\n+ chainNameOutput = \"OUTPUT\"\n+ chainNamePostrouting = \"POSTROUTING\"\n+)\n+\n+// DefaultTables returns a default set of tables. Each chain is set to accept\n+// all packets.\n+func DefaultTables() *IPTables {\n+ tables := IPTables{\n+ Tables: map[string]*Table{\n+ tablenameNat: &Table{\n+ BuiltinChains: map[Hook]*Chain{\n+ Prerouting: unconditionalAcceptChain(chainNamePrerouting),\n+ Input: unconditionalAcceptChain(chainNameInput),\n+ Output: unconditionalAcceptChain(chainNameOutput),\n+ Postrouting: unconditionalAcceptChain(chainNamePostrouting),\n+ },\n+ DefaultTargets: map[Hook]Target{\n+ Prerouting: UnconditionalAcceptTarget{},\n+ Input: UnconditionalAcceptTarget{},\n+ Output: UnconditionalAcceptTarget{},\n+ Postrouting: UnconditionalAcceptTarget{},\n+ },\n+ UserChains: map[string]*Chain{},\n+ },\n+ tablenameMangle: &Table{\n+ BuiltinChains: map[Hook]*Chain{\n+ Prerouting: unconditionalAcceptChain(chainNamePrerouting),\n+ Output: unconditionalAcceptChain(chainNameOutput),\n+ },\n+ DefaultTargets: map[Hook]Target{\n+ Prerouting: UnconditionalAcceptTarget{},\n+ Output: UnconditionalAcceptTarget{},\n+ },\n+ UserChains: map[string]*Chain{},\n+ },\n+ },\n+ Priorities: map[Hook][]string{\n+ Prerouting: []string{tablenameMangle, tablenameNat},\n+ Output: []string{tablenameMangle, tablenameNat},\n+ },\n+ }\n+\n+ // Initialize each table's Chains field.\n+ tables.Tables[tablenameNat].Chains = map[string]*Chain{\n+ chainNamePrerouting: tables.Tables[tablenameNat].BuiltinChains[Prerouting],\n+ chainNameInput: tables.Tables[tablenameNat].BuiltinChains[Input],\n+ chainNameOutput: tables.Tables[tablenameNat].BuiltinChains[Output],\n+ chainNamePostrouting: tables.Tables[tablenameNat].BuiltinChains[Postrouting],\n+ }\n+ tables.Tables[tablenameMangle].Chains = map[string]*Chain{\n+ chainNamePrerouting: tables.Tables[tablenameMangle].BuiltinChains[Prerouting],\n+ chainNameInput: tables.Tables[tablenameMangle].BuiltinChains[Input],\n+ chainNameOutput: tables.Tables[tablenameMangle].BuiltinChains[Output],\n+ chainNamePostrouting: tables.Tables[tablenameMangle].BuiltinChains[Postrouting],\n+ }\n+\n+ return &tables\n+}\n+\n+func unconditionalAcceptChain(name string) *Chain {\n+ return &Chain{\n+ Name: name,\n+ Rules: []*Rule{\n+ &Rule{\n+ Target: UnconditionalAcceptTarget{},\n+ },\n+ },\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/iptables/targets.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package iptables\n+\n+import \"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"\n+\n+// This file contains various Targets.\n+\n+// UnconditionalAcceptTarget accepts all packets.\n+type UnconditionalAcceptTarget struct{}\n+\n+// Action implements Target.Action.\n+func (_ UnconditionalAcceptTarget) Action(packet buffer.VectorisedView) (Verdict, string) {\n+ return Accept, \"\"\n+}\n+\n+// UnconditionalDropTarget denies all packets.\n+type UnconditionalDropTarget struct{}\n+\n+// Action implements Target.Action.\n+func (_ UnconditionalDropTarget) Action(packet buffer.VectorisedView) (Verdict, string) {\n+ return Drop, \"\"\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/iptables/types.go",
"diff": "+// Copyright 2019 The gVisor authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package iptables\n+\n+import (\n+ \"sync\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"\n+)\n+\n+// Hook specifies one of the hooks built into the network stack.\n+//\n+// Userspace app Userspace app\n+// ^ |\n+// | v\n+// [Input] [Output]\n+// ^ |\n+// | v\n+// | routing\n+// | |\n+// | v\n+// ----->[Prerouting]----->routing----->[Forward]---------[Postrouting]----->\n+type Hook uint\n+\n+// These values correspond to values in include/uapi/linux/netfilter.h.\n+const (\n+ // Prerouting happens before a packet is routed to applications or to\n+ // be forwarded.\n+ Prerouting Hook = iota\n+\n+ // Input happens before a packet reaches an application.\n+ Input\n+\n+ // Forward happens once it's decided that a packet should be forwarded\n+ // to another host.\n+ Forward\n+\n+ // Output happens after a packet is written by an application to be\n+ // sent out.\n+ Output\n+\n+ // Postrouting happens just before a packet goes out on the wire.\n+ Postrouting\n+\n+ // The total number of hooks.\n+ NumHooks\n+)\n+\n+// Verdict is returned by a rule's target to indicate how traversal of rules\n+// should (or should not) continue.\n+type Verdict int\n+\n+const (\n+ // Accept indicates the packet should continue traversing netstack as\n+ // normal.\n+ Accept Verdict = iota\n+\n+ // Drop inicates the packet should be dropped, stopping traversing\n+ // netstack.\n+ Drop\n+\n+ // Stolen indicates the packet was co-opted by the target and should\n+ // stop traversing netstack.\n+ Stolen\n+\n+ // Queue indicates the packet should be queued for userspace processing.\n+ Queue\n+\n+ // Repeat indicates the packet should re-traverse the chains for the\n+ // current hook.\n+ Repeat\n+\n+ // None indicates no verdict was reached.\n+ None\n+\n+ // Jump indicates a jump to another chain.\n+ Jump\n+\n+ // Continue indicates that traversal should continue at the next rule.\n+ Continue\n+\n+ // Return indicates that traversal should return to the calling chain.\n+ Return\n+)\n+\n+// IPTables holds all the tables for a netstack.\n+type IPTables struct {\n+ // mu protects the entire struct.\n+ mu sync.RWMutex\n+\n+ // Tables maps table names to tables. User tables have arbitrary names.\n+ Tables map[string]*Table\n+\n+ // Priorities maps each hook to a list of table names. The order of the\n+ // list is the order in which each table should be visited for that\n+ // hook.\n+ Priorities map[Hook][]string\n+}\n+\n+// Table defines a set of chains and hooks into the network stack. The\n+// currently supported tables are:\n+// * nat\n+// * mangle\n+type Table struct {\n+ // BuiltinChains holds the un-deletable chains built into netstack. If\n+ // a hook isn't present in the map, this table doesn't utilize that\n+ // hook.\n+ BuiltinChains map[Hook]*Chain\n+\n+ // DefaultTargets holds a target for each hook that will be executed if\n+ // chain traversal doesn't yield a verdict.\n+ DefaultTargets map[Hook]Target\n+\n+ // UserChains holds user-defined chains for the keyed by name. Users\n+ // can give their chains arbitrary names.\n+ UserChains map[string]*Chain\n+\n+ // Chains maps names to chains for both builtin and user-defined chains.\n+ // Its entries point to Chains already either in BuiltinChains and\n+ // UserChains, and its purpose is to make looking up tables by name\n+ // fast.\n+ Chains map[string]*Chain\n+}\n+\n+// ValidHooks returns a bitmap of the builtin hooks for the given table.\n+//\n+// Precondition: IPTables.mu must be locked for reading.\n+func (table *Table) ValidHooks() (uint32, *tcpip.Error) {\n+ hooks := uint32(0)\n+ for hook, _ := range table.BuiltinChains {\n+ hooks |= 1 << hook\n+ }\n+ return hooks, nil\n+}\n+\n+// Chain defines a list of rules for packet processing. When a packet traverses\n+// a chain, it is checked against each rule until either a rule returns a\n+// verdict or the chain ends.\n+//\n+// By convention, builtin chains end with a rule that matches everything and\n+// returns either Accept or Drop. User-defined chains end with Return. These\n+// aren't strictly necessary here, but the iptables tool writes tables this way.\n+type Chain struct {\n+ // Name is the chain name.\n+ Name string\n+\n+ // Rules is the list of rules to traverse.\n+ Rules []*Rule\n+}\n+\n+// Rule is a packet processing rule. It consists of two pieces. First it\n+// contains zero or more matchers, each of which is a specification of which\n+// packets this rule applies to. If there are no matchers in the rule, it\n+// applies to any packet.\n+type Rule struct {\n+ // Matchers is the list of matchers for this rule.\n+ Matchers []Matcher\n+\n+ // Target is the action to invoke if all the matchers match the packet.\n+ Target Target\n+}\n+\n+// Matcher is the interface for matching packets.\n+type Matcher interface {\n+ // Match returns whether the packet matches and whether the packet\n+ // should be \"hotdropped\", i.e. dropped immediately. This is usually\n+ // used for suspicious packets.\n+ Match(hook Hook, packet buffer.VectorisedView, interfaceName string) (matches bool, hotdrop bool)\n+}\n+\n+// Target is the interface for taking an action for a packet.\n+type Target interface {\n+ // Action takes an action on the packet and returns a verdict on how\n+ // traversal should (or should not) continue. If the return value is\n+ // Jump, it also returns the name of the chain to jump to.\n+ Action(packet buffer.VectorisedView) (Verdict, string)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add basic iptables structures to netstack.
Change-Id: Ib589906175a59dae315405a28f2d7f525ff8877f |
259,962 | 31.05.2019 16:16:24 | 25,200 | 033f96cc9313d7ceb3df14227ef3724ec3295d2a | Change segment queue limit to be of fixed size.
Netstack sets the unprocessed segment queue size to match the receive
buffer size. This is not required as this queue only needs to hold enough
for a short duration before the endpoint goroutine can process it.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -335,7 +335,7 @@ func newEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waite\ne.probe = p\n}\n- e.segmentQueue.setLimit(2 * e.rcvBufSize)\n+ e.segmentQueue.setLimit(MaxUnprocessedSegments)\ne.workMu.Init()\ne.workMu.Lock()\ne.tsOffset = timeStampOffset()\n@@ -757,8 +757,6 @@ func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\n}\ne.rcvListMu.Unlock()\n- e.segmentQueue.setLimit(2 * size)\n-\ne.notifyProtocolGoroutine(mask)\nreturn nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint_state.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint_state.go",
"diff": "@@ -163,7 +163,7 @@ func (e *endpoint) loadState(state endpointState) {\n// afterLoad is invoked by stateify.\nfunc (e *endpoint) afterLoad() {\ne.stack = stack.StackFromEnv\n- e.segmentQueue.setLimit(2 * e.rcvBufSize)\n+ e.segmentQueue.setLimit(MaxUnprocessedSegments)\ne.workMu.Init()\nstate := e.state\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/protocol.go",
"new_path": "pkg/tcpip/transport/tcp/protocol.go",
"diff": "@@ -48,6 +48,10 @@ const (\n// MaxBufferSize is the largest size a receive and send buffer can grow to.\nmaxBufferSize = 4 << 20 // 4MB\n+\n+ // MaxUnprocessedSegments is the maximum number of unprocessed segments\n+ // that can be queued for a given endpoint.\n+ MaxUnprocessedSegments = 300\n)\n// SACKEnabled option can be used to enable SACK support in the TCP\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/segment_queue.go",
"new_path": "pkg/tcpip/transport/tcp/segment_queue.go",
"diff": "@@ -16,8 +16,6 @@ package tcp\nimport (\n\"sync\"\n-\n- \"gvisor.googlesource.com/gvisor/pkg/tcpip/header\"\n)\n// segmentQueue is a bounded, thread-safe queue of TCP segments.\n@@ -58,7 +56,7 @@ func (q *segmentQueue) enqueue(s *segment) bool {\nr := q.used < q.limit\nif r {\nq.list.PushBack(s)\n- q.used += s.data.Size() + header.TCPMinimumSize\n+ q.used++\n}\nq.mu.Unlock()\n@@ -73,7 +71,7 @@ func (q *segmentQueue) dequeue() *segment {\ns := q.list.Front()\nif s != nil {\nq.list.Remove(s)\n- q.used -= s.data.Size() + header.TCPMinimumSize\n+ q.used--\n}\nq.mu.Unlock()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Change segment queue limit to be of fixed size.
Netstack sets the unprocessed segment queue size to match the receive
buffer size. This is not required as this queue only needs to hold enough
for a short duration before the endpoint goroutine can process it.
Updates #230
PiperOrigin-RevId: 250976323 |
259,853 | 03.06.2019 10:58:38 | 25,200 | 8e926e3f74cef3d04b37c6a68ba5de966e9d9839 | gvisor: validate a new map region in the mremap syscall
Right now, mremap allows to remap a memory region over MaxUserAddress,
this means that we can change the stub region. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/syscalls.go",
"new_path": "pkg/sentry/mm/syscalls.go",
"diff": "@@ -470,6 +470,16 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi\nreturn 0, syserror.EINVAL\n}\n+ // Check that the new region is valid.\n+ _, err := mm.findAvailableLocked(newSize, findAvailableOpts{\n+ Addr: newAddr,\n+ Fixed: true,\n+ Unmap: true,\n+ })\n+ if err != nil {\n+ return 0, err\n+ }\n+\n// Unmap any mappings at the destination.\nmm.unmapLocked(ctx, newAR)\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor: validate a new map region in the mremap syscall
Right now, mremap allows to remap a memory region over MaxUserAddress,
this means that we can change the stub region.
PiperOrigin-RevId: 251266886 |
259,881 | 03.06.2019 12:47:21 | 25,200 | 955685845e6c1d855315978291195f35a73d7cc1 | Remove spurious period | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -432,7 +432,7 @@ func createMemoryFile() (*pgalloc.MemoryFile, error) {\nreturn mf, nil\n}\n-// Run runs the root container..\n+// Run runs the root container.\nfunc (l *Loader) Run() error {\nerr := l.run()\nl.ctrl.manager.startResultChan <- err\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove spurious period
PiperOrigin-RevId: 251288885 |
259,881 | 03.06.2019 13:30:51 | 25,200 | 6e1f51f3eb44bdee85c50d075e750e857adef9fd | Remove duplicate socket tests
socket_unix_abstract.cc: Subset of socket_abstract.cc
socket_unix_filesystem.cc: Subset of socket_filesystem.cc | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -417,12 +417,6 @@ syscall_test(\ntest = \"//test/syscalls/linux:socket_stream_nonblock_local_test\",\n)\n-syscall_test(\n- size = \"large\",\n- shard_count = 10,\n- test = \"//test/syscalls/linux:socket_unix_abstract_test\",\n-)\n-\nsyscall_test(\n# NOTE(b/116636318): Large sendmsg may stall a long time.\nsize = \"enormous\",\n@@ -434,12 +428,6 @@ syscall_test(\ntest = \"//test/syscalls/linux:socket_unix_dgram_non_blocking_test\",\n)\n-syscall_test(\n- size = \"large\",\n- shard_count = 10,\n- test = \"//test/syscalls/linux:socket_unix_filesystem_test\",\n-)\n-\nsyscall_test(\nsize = \"large\",\nshard_count = 10,\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -2613,22 +2613,6 @@ cc_binary(\n],\n)\n-cc_binary(\n- name = \"socket_unix_abstract_test\",\n- testonly = 1,\n- srcs = [\n- \"socket_unix_abstract.cc\",\n- ],\n- linkstatic = 1,\n- deps = [\n- \":socket_test_util\",\n- \":socket_unix_test_cases\",\n- \":unix_domain_socket_test_util\",\n- \"//test/util:test_main\",\n- \"//test/util:test_util\",\n- ],\n-)\n-\ncc_binary(\nname = \"socket_unix_unbound_dgram_test\",\ntestonly = 1,\n@@ -2671,23 +2655,6 @@ cc_binary(\n],\n)\n-cc_binary(\n- name = \"socket_unix_filesystem_test\",\n- testonly = 1,\n- srcs = [\n- \"socket_unix_filesystem.cc\",\n- ],\n- linkstatic = 1,\n- deps = [\n- \":socket_test_util\",\n- \":socket_unix_test_cases\",\n- \":unix_domain_socket_test_util\",\n- \"//test/util:test_main\",\n- \"//test/util:test_util\",\n- \"@com_google_googletest//:gtest\",\n- ],\n-)\n-\ncc_binary(\nname = \"socket_blocking_local_test\",\ntestonly = 1,\n"
},
{
"change_type": "DELETE",
"old_path": "test/syscalls/linux/socket_unix_abstract.cc",
"new_path": null,
"diff": "-// Copyright 2018 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-#include <vector>\n-\n-#include \"test/syscalls/linux/socket_test_util.h\"\n-#include \"test/syscalls/linux/socket_unix.h\"\n-#include \"test/syscalls/linux/unix_domain_socket_test_util.h\"\n-#include \"test/util/test_util.h\"\n-\n-namespace gvisor {\n-namespace testing {\n-\n-std::vector<SocketPairKind> GetSocketPairs() {\n- return ApplyVec<SocketPairKind>(\n- AbstractBoundUnixDomainSocketPair,\n- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET},\n- List<int>{0, SOCK_NONBLOCK}));\n-}\n-\n-INSTANTIATE_TEST_SUITE_P(\n- AllUnixDomainSockets, UnixSocketPairTest,\n- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));\n-\n-} // namespace testing\n-} // namespace gvisor\n"
},
{
"change_type": "DELETE",
"old_path": "test/syscalls/linux/socket_unix_filesystem.cc",
"new_path": null,
"diff": "-// Copyright 2018 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-#include <vector>\n-\n-#include \"test/syscalls/linux/socket_test_util.h\"\n-#include \"test/syscalls/linux/socket_unix.h\"\n-#include \"test/syscalls/linux/unix_domain_socket_test_util.h\"\n-#include \"test/util/test_util.h\"\n-\n-namespace gvisor {\n-namespace testing {\n-\n-std::vector<SocketPairKind> GetSocketPairs() {\n- return ApplyVec<SocketPairKind>(\n- FilesystemBoundUnixDomainSocketPair,\n- AllBitwiseCombinations(List<int>{SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET},\n- List<int>{0, SOCK_NONBLOCK}));\n-}\n-\n-INSTANTIATE_TEST_SUITE_P(\n- AllUnixDomainSockets, UnixSocketPairTest,\n- ::testing::ValuesIn(IncludeReversals(GetSocketPairs())));\n-\n-} // namespace testing\n-} // namespace gvisor\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove duplicate socket tests
socket_unix_abstract.cc: Subset of socket_abstract.cc
socket_unix_filesystem.cc: Subset of socket_filesystem.cc
PiperOrigin-RevId: 251297117 |
259,962 | 03.06.2019 16:58:59 | 25,200 | bfe32209923472da2d8e263b6cb725a2e64a8689 | Delete debug log lines left by mistake.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/accept.go",
"new_path": "pkg/tcpip/transport/tcp/accept.go",
"diff": "@@ -19,7 +19,6 @@ import (\n\"encoding/binary\"\n\"hash\"\n\"io\"\n- \"log\"\n\"sync\"\n\"time\"\n@@ -307,7 +306,6 @@ func (e *endpoint) handleSynSegment(ctx *listenContext, s *segment, opts *header\nfunc (e *endpoint) incSynRcvdCount() bool {\ne.mu.Lock()\n- log.Printf(\"l: %d, c: %d, e.synRcvdCount: %d\", len(e.acceptedChan), cap(e.acceptedChan), e.synRcvdCount)\nif l, c := len(e.acceptedChan), cap(e.acceptedChan); l == c && e.synRcvdCount >= c {\ne.mu.Unlock()\nreturn false\n@@ -333,17 +331,14 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) {\n// Drop the SYN if the listen endpoint's accept queue is\n// overflowing.\nif e.incSynRcvdCount() {\n- log.Printf(\"processing syn packet\")\ns.incRef()\ngo e.handleSynSegment(ctx, s, &opts) // S/R-SAFE: synRcvdCount is the barrier.\nreturn\n}\n- log.Printf(\"dropping syn packet\")\ne.stack.Stats().TCP.ListenOverflowSynDrop.Increment()\ne.stack.Stats().DroppedPackets.Increment()\nreturn\n} else {\n- // TODO(bhaskerh): Increment syncookie sent stat.\ncookie := ctx.createCookie(s.id, s.sequenceNumber, encodeMSS(opts.MSS))\n// Send SYN with window scaling because we currently\n// dont't encode this information in the cookie.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Delete debug log lines left by mistake.
Updates #236
PiperOrigin-RevId: 251337915 |
259,858 | 03.06.2019 18:04:43 | 25,200 | 18e6e63503251cdc0b9432765b6eaa9ffa002824 | Allow specification of origin in cloudbuild. | [
{
"change_type": "MODIFY",
"old_path": "cloudbuild/go.yaml",
"new_path": "cloudbuild/go.yaml",
"diff": "@@ -17,4 +17,6 @@ steps:\nentrypoint: 'bash'\nargs:\n- '-c'\n- - 'if [[ \"$BRANCH_NAME\" == \"master\" ]]; then git push origin go:go; fi'\n+ - 'if [[ \"$BRANCH_NAME\" == \"master\" ]]; then git push \"${_ORIGIN}\" go:go; fi'\n+substitutions:\n+ _ORIGIN: origin\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow specification of origin in cloudbuild.
PiperOrigin-RevId: 251347966 |
259,853 | 03.06.2019 21:24:56 | 25,200 | 00f8663887cbf9057d93e8848eb9538cf1c0cff4 | gvisor/fs: return a proper error from FileWriter.Write in case of a short-write
The io.Writer contract requires that Write writes all available
bytes and does not return short writes. This causes errors with
io.Copy, since our own Write interface does not have this same
contract. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/file.go",
"new_path": "pkg/sentry/fs/file.go",
"diff": "@@ -545,12 +545,28 @@ type lockedWriter struct {\n// Write implements io.Writer.Write.\nfunc (w *lockedWriter) Write(buf []byte) (int, error) {\n- n, err := w.File.FileOperations.Write(w.Ctx, w.File, usermem.BytesIOSequence(buf), w.File.offset)\n- return int(n), err\n+ return w.WriteAt(buf, w.File.offset)\n}\n// WriteAt implements io.Writer.WriteAt.\nfunc (w *lockedWriter) WriteAt(buf []byte, offset int64) (int, error) {\n- n, err := w.File.FileOperations.Write(w.Ctx, w.File, usermem.BytesIOSequence(buf), offset)\n- return int(n), err\n+ var (\n+ written int\n+ err error\n+ )\n+ // The io.Writer contract requires that Write writes all available\n+ // bytes and does not return short writes. This causes errors with\n+ // io.Copy, since our own Write interface does not have this same\n+ // contract. Enforce that here.\n+ for written < len(buf) {\n+ var n int64\n+ n, err = w.File.FileOperations.Write(w.Ctx, w.File, usermem.BytesIOSequence(buf[written:]), offset+int64(written))\n+ if n > 0 {\n+ written += int(n)\n+ }\n+ if err != nil {\n+ break\n+ }\n+ }\n+ return written, err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor/fs: return a proper error from FileWriter.Write in case of a short-write
The io.Writer contract requires that Write writes all available
bytes and does not return short writes. This causes errors with
io.Copy, since our own Write interface does not have this same
contract.
PiperOrigin-RevId: 251368730 |
259,853 | 03.06.2019 21:47:09 | 25,200 | 90a116890fcea9fd39911bae854e4e67608a141d | gvisor/sock/unix: pass creds when a message is sent between unconnected sockets
and don't report a sender address if it doesn't have one | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/gofer/socket.go",
"new_path": "pkg/sentry/fs/gofer/socket.go",
"diff": "@@ -139,3 +139,8 @@ func (e *endpoint) UnidirectionalConnect() (transport.ConnectedEndpoint, *syserr\nfunc (e *endpoint) Release() {\ne.inode.DecRef()\n}\n+\n+// Passcred implements transport.BoundEndpoint.Passcred.\n+func (e *endpoint) Passcred() bool {\n+ return false\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/control/control.go",
"new_path": "pkg/sentry/socket/control/control.go",
"diff": "@@ -406,11 +406,19 @@ func makeCreds(t *kernel.Task, socketOrEndpoint interface{}) SCMCredentials {\nreturn nil\n}\nif cr, ok := socketOrEndpoint.(transport.Credentialer); ok && (cr.Passcred() || cr.ConnectedPasscred()) {\n- tcred := t.Credentials()\n- return &scmCredentials{t, tcred.EffectiveKUID, tcred.EffectiveKGID}\n+ return MakeCreds(t)\n+ }\n+ return nil\n}\n+\n+// MakeCreds creates default SCMCredentials.\n+func MakeCreds(t *kernel.Task) SCMCredentials {\n+ if t == nil {\nreturn nil\n}\n+ tcred := t.Credentials()\n+ return &scmCredentials{t, tcred.EffectiveKUID, tcred.EffectiveKGID}\n+}\n// New creates default control messages if needed.\nfunc New(t *kernel.Task, socketOrEndpoint interface{}, rights SCMRights) transport.ControlMessages {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/transport/unix.go",
"new_path": "pkg/sentry/socket/unix/transport/unix.go",
"diff": "@@ -237,6 +237,10 @@ type BoundEndpoint interface {\n// endpoint.\nUnidirectionalConnect() (ConnectedEndpoint, *syserr.Error)\n+ // Passcred returns whether or not the SO_PASSCRED socket option is\n+ // enabled on this end.\n+ Passcred() bool\n+\n// Release releases any resources held by the BoundEndpoint. It must be\n// called before dropping all references to a BoundEndpoint returned by a\n// function.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/unix/unix.go",
"new_path": "pkg/sentry/socket/unix/unix.go",
"diff": "@@ -385,6 +385,10 @@ func (s *SocketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []\n}\ndefer ep.Release()\nw.To = ep\n+\n+ if ep.Passcred() && w.Control.Credentials == nil {\n+ w.Control.Credentials = control.MakeCreds(t)\n+ }\n}\nn, err := src.CopyInTo(t, &w)\n@@ -516,7 +520,7 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nif n, err := dst.CopyOutFrom(t, &r); err != syserror.ErrWouldBlock || dontWait {\nvar from interface{}\nvar fromLen uint32\n- if r.From != nil {\n+ if r.From != nil && len([]byte(r.From.Addr)) != 0 {\nfrom, fromLen = epsocket.ConvertAddress(linux.AF_UNIX, *r.From)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/accept_bind.cc",
"new_path": "test/syscalls/linux/accept_bind.cc",
"diff": "@@ -448,19 +448,7 @@ TEST_P(AllSocketPairTest, UnboundSenderAddr) {\nRetryEINTR(recvfrom)(accepted_fd.get(), &i, sizeof(i), 0,\nreinterpret_cast<sockaddr*>(&addr), &addr_len),\nSyscallSucceedsWithValue(sizeof(i)));\n- if (!IsRunningOnGvisor()) {\n- // Linux returns a zero length for addresses from recvfrom(2) and\n- // recvmsg(2). This differs from the behavior of getpeername(2) and\n- // getsockname(2). For simplicity, we use the getpeername(2) and\n- // getsockname(2) behavior for recvfrom(2) and recvmsg(2).\nEXPECT_EQ(addr_len, 0);\n- return;\n- }\n- EXPECT_EQ(addr_len, 2);\n- EXPECT_EQ(\n- memcmp(&addr, sockets->second_addr(),\n- std::min((size_t)addr_len, (size_t)sockets->second_addr_len())),\n- 0);\n}\nTEST_P(AllSocketPairTest, BoundSenderAddr) {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_unix_unbound_dgram.cc",
"new_path": "test/syscalls/linux/socket_unix_unbound_dgram.cc",
"diff": "// limitations under the License.\n#include <stdio.h>\n+#include <sys/socket.h>\n#include <sys/un.h>\n+\n#include \"gtest/gtest.h\"\n#include \"gtest/gtest.h\"\n#include \"test/syscalls/linux/socket_test_util.h\"\n@@ -142,6 +144,28 @@ TEST_P(UnboundDgramUnixSocketPairTest, SendtoWithoutConnect) {\nSyscallSucceedsWithValue(sizeof(data)));\n}\n+TEST_P(UnboundDgramUnixSocketPairTest, SendtoWithoutConnectPassCreds) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),\n+ sockets->first_addr_size()),\n+ SyscallSucceeds());\n+\n+ SetSoPassCred(sockets->first_fd());\n+ char data = 'a';\n+ ASSERT_THAT(\n+ RetryEINTR(sendto)(sockets->second_fd(), &data, sizeof(data), 0,\n+ sockets->first_addr(), sockets->first_addr_size()),\n+ SyscallSucceedsWithValue(sizeof(data)));\n+ ucred creds;\n+ creds.pid = -1;\n+ char buf[sizeof(data) + 1];\n+ ASSERT_NO_FATAL_FAILURE(\n+ RecvCreds(sockets->first_fd(), &creds, buf, sizeof(buf), sizeof(data)));\n+ EXPECT_EQ(0, memcmp(&data, buf, sizeof(data)));\n+ EXPECT_THAT(getpid(), SyscallSucceedsWithValue(creds.pid));\n+}\n+\nINSTANTIATE_TEST_SUITE_P(\nAllUnixDomainSockets, UnboundDgramUnixSocketPairTest,\n::testing::ValuesIn(VecCat<SocketPairKind>(\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor/sock/unix: pass creds when a message is sent between unconnected sockets
and don't report a sender address if it doesn't have one
PiperOrigin-RevId: 251371284 |
259,858 | 03.06.2019 22:59:35 | 25,200 | f520d0d585e159da902b2880c5e115abeaacf9cb | Resolve impossible dependencies. | [
{
"change_type": "MODIFY",
"old_path": "WORKSPACE",
"new_path": "WORKSPACE",
"diff": "@@ -38,8 +38,8 @@ http_archive(\n# External repositories, in sorted order.\ngo_repository(\nname = \"com_github_cenkalti_backoff\",\n+ commit = \"2146c9339422\",\nimportpath = \"github.com/cenkalti/backoff\",\n- tag = \"v2.1.1\",\n)\ngo_repository(\n"
},
{
"change_type": "MODIFY",
"old_path": "go.mod",
"new_path": "go.mod",
"diff": "module gvisor.googlesource.com/gvisor\n+\ngo 1.12\nrequire (\n- github.com/cenkalti/backoff v2.1.1\n+ github.com/cenkalti/backoff v2.2.0\ngithub.com/gofrs/flock v0.6.1-0.20180915234121-886344bea079\ngithub.com/golang/mock v1.3.1\ngithub.com/golang/protobuf v1.3.1\n"
}
] | Go | Apache License 2.0 | google/gvisor | Resolve impossible dependencies.
PiperOrigin-RevId: 251377523 |
259,884 | 02.06.2019 15:20:37 | 14,400 | 837a5b13223c33f81e094ccbef1bfa86950c2899 | Add y-axis label to redis benchmark
Adds a "method" to the redis benchmark csv
fixes | [
{
"change_type": "MODIFY",
"old_path": "static/performance/redis.csv",
"new_path": "static/performance/redis.csv",
"diff": "-runtime,metric,result\n-runc,PING_INLINE,30525.03\n-runc,PING_BULK,30293.85\n-runc,SET,30257.19\n-runc,GET,30312.21\n-runc,INCR,30525.03\n-runc,LPUSH,30712.53\n-runc,RPUSH,30459.95\n-runc,LPOP,30367.45\n-runc,RPOP,30665.44\n-runc,SADD,30030.03\n-runc,HSET,30656.04\n-runc,SPOP,29940.12\n-runc,LRANGE_100,24224.81\n-runc,LRANGE_300,14302.06\n-runc,LRANGE_500,11728.83\n-runc,LRANGE_600,9900.99\n-runc,MSET,30120.48\n-runsc,PING_INLINE,14528.55\n-runsc,PING_BULK,15627.44\n-runsc,SET,15403.57\n-runsc,GET,15325.67\n-runsc,INCR,15269.51\n-runsc,LPUSH,15172.2\n-runsc,RPUSH,15117.16\n-runsc,LPOP,15257.86\n-runsc,RPOP,15188.33\n-runsc,SADD,15432.1\n-runsc,HSET,15163.0\n-runsc,SPOP,15561.78\n-runsc,LRANGE_100,13365.41\n-runsc,LRANGE_300,9520.18\n-runsc,LRANGE_500,8248.78\n-runsc,LRANGE_600,6544.07\n-runsc,MSET,14367.82\n+runtime,method,metric,result\n+runc,PING_INLINE,requests_per_second,30525.03\n+runc,PING_BULK,requests_per_second,30293.85\n+runc,SET,requests_per_second,30257.19\n+runc,GET,requests_per_second,30312.21\n+runc,INCR,requests_per_second,30525.03\n+runc,LPUSH,requests_per_second,30712.53\n+runc,RPUSH,requests_per_second,30459.95\n+runc,LPOP,requests_per_second,30367.45\n+runc,RPOP,requests_per_second,30665.44\n+runc,SADD,requests_per_second,30030.03\n+runc,HSET,requests_per_second,30656.04\n+runc,SPOP,requests_per_second,29940.12\n+runc,LRANGE_100,requests_per_second,24224.81\n+runc,LRANGE_300,requests_per_second,14302.06\n+runc,LRANGE_500,requests_per_second,11728.83\n+runc,LRANGE_600,requests_per_second,9900.99\n+runc,MSET,requests_per_second,30120.48\n+runsc,PING_INLINE,requests_per_second,14528.55\n+runsc,PING_BULK,requests_per_second,15627.44\n+runsc,SET,requests_per_second,15403.57\n+runsc,GET,requests_per_second,15325.67\n+runsc,INCR,requests_per_second,15269.51\n+runsc,LPUSH,requests_per_second,15172.2\n+runsc,RPUSH,requests_per_second,15117.16\n+runsc,LPOP,requests_per_second,15257.86\n+runsc,RPOP,requests_per_second,15188.33\n+runsc,SADD,requests_per_second,15432.1\n+runsc,HSET,requests_per_second,15163.0\n+runsc,SPOP,requests_per_second,15561.78\n+runsc,LRANGE_100,requests_per_second,13365.41\n+runsc,LRANGE_300,requests_per_second,9520.18\n+runsc,LRANGE_500,requests_per_second,8248.78\n+runsc,LRANGE_600,requests_per_second,6544.07\n+runsc,MSET,requests_per_second,14367.82\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add y-axis label to redis benchmark
Adds a "method" to the redis benchmark csv
fixes #69 |
259,858 | 04.06.2019 11:06:13 | 25,200 | 7436ea247bc946b36a7e5e6ca6019796ef76d85c | Fix Kokoro revision and 'go get usage'
As a convenience for debugging, also factor the scripts such that
can be run without Kokoro. In the future, this may be used to add
additional presubmit hooks that run without Kokoro. | [
{
"change_type": "DELETE",
"old_path": "kokoro/run_build.sh",
"new_path": null,
"diff": "-#!/bin/bash\n-\n-# Copyright 2018 The gVisor Authors.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-# Fail on any error.\n-set -e\n-# Display commands to stderr.\n-set -x\n-\n-# Install the latest version of Bazel.\n-use_bazel.sh latest\n-\n-# Log the bazel path and version.\n-which bazel\n-bazel version\n-\n-cd git/repo\n-\n-# Build runsc.\n-bazel build //runsc\n-\n-# Move the runsc binary into \"latest\" directory, and also a directory with the\n-# current date.\n-latest_dir=\"${KOKORO_ARTIFACTS_DIR}\"/latest\n-today_dir=\"${KOKORO_ARTIFACTS_DIR}\"/\"$(date -Idate)\"\n-mkdir -p \"${latest_dir}\" \"${today_dir}\"\n-cp bazel-bin/runsc/linux_amd64_pure_stripped/runsc \"${latest_dir}\"\n-sha512sum \"${latest_dir}\"/runsc | awk '{print $1 \" runsc\"}' > \"${latest_dir}\"/runsc.sha512\n-cp bazel-bin/runsc/linux_amd64_pure_stripped/runsc \"${today_dir}\"\n-sha512sum \"${today_dir}\"/runsc | awk '{print $1 \" runsc\"}' > \"${today_dir}\"/runsc.sha512\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "kokoro/run_build.sh",
"diff": "+../tools/run_build.sh\n\\ No newline at end of file\n"
},
{
"change_type": "DELETE",
"old_path": "kokoro/run_tests.sh",
"new_path": null,
"diff": "-#!/bin/bash\n-\n-# Copyright 2018 The gVisor Authors.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-# Fail on any error. Treat unset variables as error. Print commands as executed.\n-set -eux\n-\n-\n-###################\n-# GLOBAL ENV VARS #\n-###################\n-\n-readonly WORKSPACE_DIR=\"${PWD}/git/repo\"\n-\n-# Used to configure RBE.\n-readonly CLOUD_PROJECT_ID=\"gvisor-rbe\"\n-readonly RBE_PROJECT_ID=\"projects/${CLOUD_PROJECT_ID}/instances/default_instance\"\n-\n-# Random runtime name to avoid collisions.\n-readonly RUNTIME=\"runsc_test_$((RANDOM))\"\n-\n-# Packages that will be built and tested.\n-readonly BUILD_PACKAGES=(\"//...\")\n-readonly TEST_PACKAGES=(\"//pkg/...\" \"//runsc/...\" \"//tools/...\")\n-\n-#######################\n-# BAZEL CONFIGURATION #\n-#######################\n-\n-# Install the latest version of Bazel, and log the location and version.\n-use_bazel.sh latest\n-which bazel\n-bazel version\n-\n-# Load the kvm module\n-sudo -n -E modprobe kvm\n-\n-# General Bazel build/test flags.\n-BAZEL_BUILD_FLAGS=(\n- \"--show_timestamps\"\n- \"--test_output=errors\"\n- \"--keep_going\"\n- \"--verbose_failures=true\"\n-)\n-\n-# Bazel build/test for RBE, a super-set of BAZEL_BUILD_FLAGS.\n-BAZEL_BUILD_RBE_FLAGS=(\n- \"${BAZEL_BUILD_FLAGS[@]}\"\n- \"--config=remote\"\n- \"--project_id=${CLOUD_PROJECT_ID}\"\n- \"--remote_instance_name=${RBE_PROJECT_ID}\"\n- \"--auth_credentials=${KOKORO_BAZEL_AUTH_CREDENTIAL}\"\n-)\n-\n-####################\n-# Helper Functions #\n-####################\n-\n-sanity_checks() {\n- cd ${WORKSPACE_DIR}\n- bazel run //:gazelle -- update-repos -from_file=go.mod\n- git diff --exit-code WORKSPACE\n-}\n-\n-build_everything() {\n- FLAVOR=\"${1}\"\n-\n- cd ${WORKSPACE_DIR}\n- bazel build \\\n- -c \"${FLAVOR}\" \"${BAZEL_BUILD_RBE_FLAGS[@]}\" \\\n- \"${BUILD_PACKAGES[@]}\"\n-}\n-\n-# Run simple tests runs the tests that require no special setup or\n-# configuration.\n-run_simple_tests() {\n- cd ${WORKSPACE_DIR}\n- bazel test \\\n- \"${BAZEL_BUILD_FLAGS[@]}\" \\\n- \"${TEST_PACKAGES[@]}\"\n-}\n-\n-install_runtime() {\n- cd ${WORKSPACE_DIR}\n- sudo -n ${WORKSPACE_DIR}/runsc/test/install.sh --runtime ${RUNTIME}\n-}\n-\n-# Install dependencies for the crictl tests.\n-install_crictl_test_deps() {\n- # Install containerd.\n- sudo -n -E apt-get update\n- sudo -n -E apt-get install -y btrfs-tools libseccomp-dev\n- # go get will exit with a status of 1 despite succeeding, so ignore errors.\n- go get -d github.com/containerd/containerd || true\n- cd ${GOPATH}/src/github.com/containerd/containerd\n- git checkout v1.2.2\n- make\n- sudo -n -E make install\n-\n- # Install crictl.\n- # go get will exit with a status of 1 despite succeeding, so ignore errors.\n- go get -d github.com/kubernetes-sigs/cri-tools || true\n- cd ${GOPATH}/src/github.com/kubernetes-sigs/cri-tools\n- git checkout tags/v1.11.0\n- make\n- sudo -n -E make install\n-\n- # Install gvisor-containerd-shim.\n- local latest=/tmp/gvisor-containerd-shim-latest\n- local shim_path=/tmp/gvisor-containerd-shim\n- wget --no-verbose https://storage.googleapis.com/cri-containerd-staging/gvisor-containerd-shim/latest -O ${latest}\n- wget --no-verbose https://storage.googleapis.com/cri-containerd-staging/gvisor-containerd-shim/gvisor-containerd-shim-$(cat ${latest}) -O ${shim_path}\n- chmod +x ${shim_path}\n- sudo -n -E mv ${shim_path} /usr/local/bin\n-\n- # Configure containerd-shim.\n- local shim_config_path=/etc/containerd\n- local shim_config_tmp_path=/tmp/gvisor-containerd-shim.toml\n- sudo -n -E mkdir -p ${shim_config_path}\n- cat > ${shim_config_tmp_path} <<-EOF\n- runc_shim = \"/usr/local/bin/containerd-shim\"\n-\n- [runsc_config]\n- debug = \"true\"\n- debug-log = \"/tmp/runsc-logs/\"\n- strace = \"true\"\n- file-access = \"shared\"\n-EOF\n- sudo mv ${shim_config_tmp_path} ${shim_config_path}\n-\n- # Configure CNI.\n- sudo -n -E env PATH=${PATH} ${GOPATH}/src/github.com/containerd/containerd/script/setup/install-cni\n-}\n-\n-# Run the tests that require docker.\n-run_docker_tests() {\n- cd ${WORKSPACE_DIR}\n-\n- # Run tests with a default runtime (runc).\n- bazel test \\\n- \"${BAZEL_BUILD_FLAGS[@]}\" \\\n- --test_env=RUNSC_RUNTIME=\"\" \\\n- --test_output=all \\\n- //runsc/test/image:image_test\n-\n- # These names are used to exclude tests not supported in certain\n- # configuration, e.g. save/restore not supported with hostnet.\n- declare -a variations=(\"\" \"-kvm\" \"-hostnet\" \"-overlay\")\n- for v in \"${variations[@]}\"; do\n- # Run runsc tests with docker that are tagged manual.\n- bazel test \\\n- \"${BAZEL_BUILD_FLAGS[@]}\" \\\n- --test_env=RUNSC_RUNTIME=\"${RUNTIME}${v}\" \\\n- --test_output=all \\\n- //runsc/test/image:image_test \\\n- //runsc/test/integration:integration_test\n- done\n-}\n-\n-# Run the tests that require root.\n-run_root_tests() {\n- cd ${WORKSPACE_DIR}\n- bazel build //runsc/test/root:root_test\n- local root_test=$(find -L ./bazel-bin/ -executable -type f -name root_test | grep __main__)\n- if [[ ! -f \"${root_test}\" ]]; then\n- echo \"root_test executable not found\"\n- exit 1\n- fi\n- sudo -n -E RUNSC_RUNTIME=\"${RUNTIME}\" RUNSC_EXEC=/tmp/\"${RUNTIME}\"/runsc ${root_test}\n-}\n-\n-# Run syscall unit tests.\n-run_syscall_tests() {\n- cd ${WORKSPACE_DIR}\n- bazel test \"${BAZEL_BUILD_RBE_FLAGS[@]}\" \\\n- --test_tag_filters=runsc_ptrace //test/syscalls/...\n-}\n-\n-run_runsc_do_tests() {\n- local runsc=$(find bazel-bin/runsc -type f -executable -name \"runsc\" | head -n1)\n-\n- # run runsc do without root privileges.\n- unshare -Ur ${runsc} --network=none --TESTONLY-unsafe-nonroot do true\n- unshare -Ur ${runsc} --TESTONLY-unsafe-nonroot --network=host do --netns=false true\n-\n- # run runsc do with root privileges.\n- sudo -n -E ${runsc} do true\n-}\n-\n-# Find and rename all test xml and log files so that Sponge can pick them up.\n-# XML files must be named sponge_log.xml, and log files must be named\n-# sponge_log.log. We move all such files into KOKORO_ARTIFACTS_DIR, in a\n-# subdirectory named with the test name.\n-upload_test_artifacts() {\n- cd ${WORKSPACE_DIR}\n- find -L \"bazel-testlogs\" -name \"test.xml\" -o -name \"test.log\" -o -name \"outputs.zip\" |\n- tar --create --files-from - --transform 's/test\\./sponge_log./' |\n- tar --extract --directory ${KOKORO_ARTIFACTS_DIR}\n- if [[ -d \"/tmp/${RUNTIME}/logs\" ]]; then\n- tar --create --gzip \"--file=${KOKORO_ARTIFACTS_DIR}/runsc-logs.tar.gz\" -C /tmp/ ${RUNTIME}/logs\n- fi\n-}\n-\n-# Finish runs at exit, even in the event of an error, and uploads all test\n-# artifacts.\n-finish() {\n- # Grab the last exit code, we will return it.\n- local exit_code=${?}\n- upload_test_artifacts\n- exit ${exit_code}\n-}\n-\n-# Run bazel in a docker container\n-build_in_docker() {\n- cd ${WORKSPACE_DIR}\n- bazel clean\n- bazel shutdown\n- make\n- make runsc\n- make bazel-shutdown\n-}\n-\n-########\n-# MAIN #\n-########\n-\n-main() {\n- # Register finish to run at exit.\n- trap finish EXIT\n-\n- # Build and run the simple tests.\n- sanity_checks\n- build_everything opt\n- run_simple_tests\n-\n- # So far so good. Install more deps and run the integration tests.\n- install_runtime\n- install_crictl_test_deps\n- run_docker_tests\n- run_root_tests\n-\n- run_syscall_tests\n- run_runsc_do_tests\n-\n- # Build other flavors too.\n- build_everything dbg\n-\n- build_in_docker\n- # No need to call \"finish\" here, it will happen at exit.\n-}\n-\n-# Kick it off.\n-main\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "kokoro/run_tests.sh",
"diff": "+../tools/run_tests.sh\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tools/run_build.sh",
"diff": "+#!/bin/bash\n+\n+# Copyright 2018 The gVisor Authors.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Fail on any error.\n+set -e\n+# Display commands to stderr.\n+set -x\n+\n+# Install the latest version of Bazel and log the version.\n+(which use_bazel.sh && use_bazel.sh latest) || which bazel\n+bazel version\n+\n+# Switch into the workspace and checkout the appropriate commit.\n+if [[ -v KOKORO_GIT_COMMIT ]]; then\n+ cd git/repo && git checkout \"${KOKORO_GIT_COMMIT}\"\n+fi\n+\n+# Build runsc.\n+bazel build //runsc\n+\n+# Move the runsc binary into \"latest\" directory, and also a directory with the\n+# current date.\n+if [[ -v KOKORO_ARTIFACTS_DIR ]]; then\n+ latest_dir=\"${KOKORO_ARTIFACTS_DIR}\"/latest\n+ today_dir=\"${KOKORO_ARTIFACTS_DIR}\"/\"$(date -Idate)\"\n+ mkdir -p \"${latest_dir}\" \"${today_dir}\"\n+ cp bazel-bin/runsc/linux_amd64_pure_stripped/runsc \"${latest_dir}\"\n+ sha512sum \"${latest_dir}\"/runsc | awk '{print $1 \" runsc\"}' > \"${latest_dir}\"/runsc.sha512\n+ cp bazel-bin/runsc/linux_amd64_pure_stripped/runsc \"${today_dir}\"\n+ sha512sum \"${today_dir}\"/runsc | awk '{print $1 \" runsc\"}' > \"${today_dir}\"/runsc.sha512\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tools/run_tests.sh",
"diff": "+#!/bin/bash\n+\n+# Copyright 2018 The gVisor Authors.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Fail on any error. Treat unset variables as error. Print commands as executed.\n+set -eux\n+\n+###################\n+# GLOBAL ENV VARS #\n+###################\n+\n+if [[ -v KOKORO_GIT_COMMIT ]]; then\n+ readonly WORKSPACE_DIR=\"${PWD}/git/repo\"\n+else\n+ readonly WORKSPACE_DIR=\"${PWD}\"\n+fi\n+\n+# Used to configure RBE.\n+readonly CLOUD_PROJECT_ID=\"gvisor-rbe\"\n+readonly RBE_PROJECT_ID=\"projects/${CLOUD_PROJECT_ID}/instances/default_instance\"\n+\n+# Random runtime name to avoid collisions.\n+readonly RUNTIME=\"runsc_test_$((RANDOM))\"\n+\n+# Packages that will be built and tested.\n+readonly BUILD_PACKAGES=(\"//...\")\n+readonly TEST_PACKAGES=(\"//pkg/...\" \"//runsc/...\" \"//tools/...\")\n+\n+#######################\n+# BAZEL CONFIGURATION #\n+#######################\n+\n+# Install the latest version of Bazel and log the version.\n+(which use_bazel.sh && use_bazel.sh latest) || which bazel\n+bazel version\n+\n+# Checkout the appropriate commit.\n+if [[ -v KOKORO_GIT_COMMIT ]]; then\n+ (cd \"${WORKSPACE_DIR}\" && git checkout \"${KOKORO_GIT_COMMIT}\")\n+fi\n+\n+# Load the kvm module.\n+sudo -n -E modprobe kvm\n+\n+# General Bazel build/test flags.\n+BAZEL_BUILD_FLAGS=(\n+ \"--show_timestamps\"\n+ \"--test_output=errors\"\n+ \"--keep_going\"\n+ \"--verbose_failures=true\"\n+)\n+\n+# Bazel build/test for RBE, a super-set of BAZEL_BUILD_FLAGS.\n+BAZEL_BUILD_RBE_FLAGS=(\n+ \"${BAZEL_BUILD_FLAGS[@]}\"\n+ \"--config=remote\"\n+ \"--project_id=${CLOUD_PROJECT_ID}\"\n+ \"--remote_instance_name=${RBE_PROJECT_ID}\"\n+)\n+if [[ -v KOKORO_BAZEL_AUTH_CREDENTIAL ]]; then\n+ BAZEL_BUILD_RBE_FLAGS=(\n+ \"${BAZEL_BUILD_RBE_FLAGS[@]}\"\n+ \"--auth_credentials=${KOKORO_BAZEL_AUTH_CREDENTIAL}\"\n+ )\n+fi\n+\n+####################\n+# Helper Functions #\n+####################\n+\n+sanity_checks() {\n+ cd ${WORKSPACE_DIR}\n+ bazel run //:gazelle -- update-repos -from_file=go.mod\n+ git diff --exit-code WORKSPACE\n+}\n+\n+build_everything() {\n+ FLAVOR=\"${1}\"\n+\n+ cd ${WORKSPACE_DIR}\n+ bazel build \\\n+ -c \"${FLAVOR}\" \"${BAZEL_BUILD_RBE_FLAGS[@]}\" \\\n+ \"${BUILD_PACKAGES[@]}\"\n+}\n+\n+# Run simple tests runs the tests that require no special setup or\n+# configuration.\n+run_simple_tests() {\n+ cd ${WORKSPACE_DIR}\n+ bazel test \\\n+ \"${BAZEL_BUILD_FLAGS[@]}\" \\\n+ \"${TEST_PACKAGES[@]}\"\n+}\n+\n+install_runtime() {\n+ cd ${WORKSPACE_DIR}\n+ sudo -n ${WORKSPACE_DIR}/runsc/test/install.sh --runtime ${RUNTIME}\n+}\n+\n+# Install dependencies for the crictl tests.\n+install_crictl_test_deps() {\n+ sudo -n -E apt-get update\n+ sudo -n -E apt-get install -y btrfs-tools libseccomp-dev\n+\n+ # Install containerd.\n+ [[ -d containerd ]] || git clone https://github.com/containerd/containerd\n+ (cd containerd && git checkout v1.2.2 && make && sudo -n -E make install)\n+\n+ # Install crictl.\n+ [[ -d cri-tools ]] || git clone https://github.com/kubernetes-sigs/cri-tools\n+ (cd cri-tools && git checkout tags/v1.11.0 && make && sudo -n -E make install)\n+\n+ # Install gvisor-containerd-shim.\n+ local latest=/tmp/gvisor-containerd-shim-latest\n+ local shim_path=/tmp/gvisor-containerd-shim\n+ wget --no-verbose https://storage.googleapis.com/cri-containerd-staging/gvisor-containerd-shim/latest -O ${latest}\n+ wget --no-verbose https://storage.googleapis.com/cri-containerd-staging/gvisor-containerd-shim/gvisor-containerd-shim-$(cat ${latest}) -O ${shim_path}\n+ chmod +x ${shim_path}\n+ sudo -n -E mv ${shim_path} /usr/local/bin\n+\n+ # Configure containerd-shim.\n+ local shim_config_path=/etc/containerd\n+ local shim_config_tmp_path=/tmp/gvisor-containerd-shim.toml\n+ sudo -n -E mkdir -p ${shim_config_path}\n+ cat > ${shim_config_tmp_path} <<-EOF\n+ runc_shim = \"/usr/local/bin/containerd-shim\"\n+\n+ [runsc_config]\n+ debug = \"true\"\n+ debug-log = \"/tmp/runsc-logs/\"\n+ strace = \"true\"\n+ file-access = \"shared\"\n+EOF\n+ sudo mv ${shim_config_tmp_path} ${shim_config_path}\n+\n+ # Configure CNI.\n+ sudo -n -E env PATH=${PATH} containerd/script/setup/install-cni\n+}\n+\n+# Run the tests that require docker.\n+run_docker_tests() {\n+ cd ${WORKSPACE_DIR}\n+\n+ # Run tests with a default runtime (runc).\n+ bazel test \\\n+ \"${BAZEL_BUILD_FLAGS[@]}\" \\\n+ --test_env=RUNSC_RUNTIME=\"\" \\\n+ --test_output=all \\\n+ //runsc/test/image:image_test\n+\n+ # These names are used to exclude tests not supported in certain\n+ # configuration, e.g. save/restore not supported with hostnet.\n+ declare -a variations=(\"\" \"-kvm\" \"-hostnet\" \"-overlay\")\n+ for v in \"${variations[@]}\"; do\n+ # Run runsc tests with docker that are tagged manual.\n+ bazel test \\\n+ \"${BAZEL_BUILD_FLAGS[@]}\" \\\n+ --test_env=RUNSC_RUNTIME=\"${RUNTIME}${v}\" \\\n+ --test_output=all \\\n+ //runsc/test/image:image_test \\\n+ //runsc/test/integration:integration_test\n+ done\n+}\n+\n+# Run the tests that require root.\n+run_root_tests() {\n+ cd ${WORKSPACE_DIR}\n+ bazel build //runsc/test/root:root_test\n+ local root_test=$(find -L ./bazel-bin/ -executable -type f -name root_test | grep __main__)\n+ if [[ ! -f \"${root_test}\" ]]; then\n+ echo \"root_test executable not found\"\n+ exit 1\n+ fi\n+ sudo -n -E RUNSC_RUNTIME=\"${RUNTIME}\" RUNSC_EXEC=/tmp/\"${RUNTIME}\"/runsc ${root_test}\n+}\n+\n+# Run syscall unit tests.\n+run_syscall_tests() {\n+ cd ${WORKSPACE_DIR}\n+ bazel test \"${BAZEL_BUILD_RBE_FLAGS[@]}\" \\\n+ --test_tag_filters=runsc_ptrace //test/syscalls/...\n+}\n+\n+run_runsc_do_tests() {\n+ local runsc=$(find bazel-bin/runsc -type f -executable -name \"runsc\" | head -n1)\n+\n+ # run runsc do without root privileges.\n+ unshare -Ur ${runsc} --network=none --TESTONLY-unsafe-nonroot do true\n+ unshare -Ur ${runsc} --TESTONLY-unsafe-nonroot --network=host do --netns=false true\n+\n+ # run runsc do with root privileges.\n+ sudo -n -E ${runsc} do true\n+}\n+\n+# Find and rename all test xml and log files so that Sponge can pick them up.\n+# XML files must be named sponge_log.xml, and log files must be named\n+# sponge_log.log. We move all such files into KOKORO_ARTIFACTS_DIR, in a\n+# subdirectory named with the test name.\n+upload_test_artifacts() {\n+ # Skip if no kokoro directory.\n+ [[ -v KOKORO_ARTIFACTS_DIR ]] || return\n+\n+ cd ${WORKSPACE_DIR}\n+ find -L \"bazel-testlogs\" -name \"test.xml\" -o -name \"test.log\" -o -name \"outputs.zip\" |\n+ tar --create --files-from - --transform 's/test\\./sponge_log./' |\n+ tar --extract --directory ${KOKORO_ARTIFACTS_DIR}\n+ if [[ -d \"/tmp/${RUNTIME}/logs\" ]]; then\n+ tar --create --gzip \"--file=${KOKORO_ARTIFACTS_DIR}/runsc-logs.tar.gz\" -C /tmp/ ${RUNTIME}/logs\n+ fi\n+}\n+\n+# Finish runs at exit, even in the event of an error, and uploads all test\n+# artifacts.\n+finish() {\n+ # Grab the last exit code, we will return it.\n+ local exit_code=${?}\n+ upload_test_artifacts\n+ exit ${exit_code}\n+}\n+\n+# Run bazel in a docker container\n+build_in_docker() {\n+ cd ${WORKSPACE_DIR}\n+ bazel clean\n+ bazel shutdown\n+ make\n+ make runsc\n+ make bazel-shutdown\n+}\n+\n+########\n+# MAIN #\n+########\n+\n+main() {\n+ # Register finish to run at exit.\n+ trap finish EXIT\n+\n+ # Build and run the simple tests.\n+ sanity_checks\n+ build_everything opt\n+ run_simple_tests\n+\n+ # So far so good. Install more deps and run the integration tests.\n+ install_runtime\n+ install_crictl_test_deps\n+ run_docker_tests\n+ run_root_tests\n+\n+ run_syscall_tests\n+ run_runsc_do_tests\n+\n+ # Build other flavors too.\n+ build_everything dbg\n+\n+ build_in_docker\n+ # No need to call \"finish\" here, it will happen at exit.\n+}\n+\n+# Kick it off.\n+main\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix Kokoro revision and 'go get usage'
As a convenience for debugging, also factor the scripts such that
can be run without Kokoro. In the future, this may be used to add
additional presubmit hooks that run without Kokoro.
PiperOrigin-RevId: 251474868 |
259,858 | 04.06.2019 14:42:25 | 25,200 | 6f92038ce0d2062c3dfd84fe65141ee09deeabfc | Use github directory if it exists.
Unfortunately, kokoro names the top-level directory per the SCM type. This
means there's no way to make the job names match; we simply need to probe for
the existence of the correct directory. | [
{
"change_type": "MODIFY",
"old_path": "tools/run_build.sh",
"new_path": "tools/run_build.sh",
"diff": "@@ -23,9 +23,11 @@ set -x\n(which use_bazel.sh && use_bazel.sh latest) || which bazel\nbazel version\n-# Switch into the workspace and checkout the appropriate commit.\n-if [[ -v KOKORO_GIT_COMMIT ]]; then\n- cd git/repo && git checkout \"${KOKORO_GIT_COMMIT}\"\n+# Switch into the workspace.\n+if [[ -v KOKORO_GIT_COMMIT ]] && [[ -d git/repo ]]; then\n+ cd git/repo\n+elif [[ -v KOKORO_GIT_COMMIT ]] && [[ -d github/repo ]]; then\n+ cd github/repo\nfi\n# Build runsc.\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/run_tests.sh",
"new_path": "tools/run_tests.sh",
"diff": "@@ -21,8 +21,10 @@ set -eux\n# GLOBAL ENV VARS #\n###################\n-if [[ -v KOKORO_GIT_COMMIT ]]; then\n+if [[ -v KOKORO_GIT_COMMIT ]] && [[ -d git/repo ]]; then\nreadonly WORKSPACE_DIR=\"${PWD}/git/repo\"\n+elif [[ -v KOKORO_GIT_COMMIT ]] && [[ -d github/repo ]]; then\n+ readonly WORKSPACE_DIR=\"${PWD}/github/repo\"\nelse\nreadonly WORKSPACE_DIR=\"${PWD}\"\nfi\n@@ -46,11 +48,6 @@ readonly TEST_PACKAGES=(\"//pkg/...\" \"//runsc/...\" \"//tools/...\")\n(which use_bazel.sh && use_bazel.sh latest) || which bazel\nbazel version\n-# Checkout the appropriate commit.\n-if [[ -v KOKORO_GIT_COMMIT ]]; then\n- (cd \"${WORKSPACE_DIR}\" && git checkout \"${KOKORO_GIT_COMMIT}\")\n-fi\n-\n# Load the kvm module.\nsudo -n -E modprobe kvm\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use github directory if it exists.
Unfortunately, kokoro names the top-level directory per the SCM type. This
means there's no way to make the job names match; we simply need to probe for
the existence of the correct directory.
PiperOrigin-RevId: 251519409 |
259,858 | 04.06.2019 23:08:20 | 25,200 | cecb71dc37a77d8e4e88cdfada92a37a72c67602 | Building containerd with go modules is broken, use GOPATH. | [
{
"change_type": "MODIFY",
"old_path": "tools/run_tests.sh",
"new_path": "tools/run_tests.sh",
"diff": "@@ -106,18 +106,31 @@ install_runtime() {\nsudo -n ${WORKSPACE_DIR}/runsc/test/install.sh --runtime ${RUNTIME}\n}\n+install_helper() {\n+ PACKAGE=\"${1}\"\n+ TAG=\"${2}\"\n+ GOPATH=\"${3}\"\n+\n+ # Clone the repository.\n+ mkdir -p \"${GOPATH}\"/src/$(dirname \"${PACKAGE}\") && \\\n+ git clone https://\"${PACKAGE}\" \"${GOPATH}\"/src/\"${PACKAGE}\"\n+\n+ # Checkout and build the repository.\n+ (cd \"${GOPATH}\"/src/\"${PACKAGE}\" && \\\n+ git checkout \"${TAG}\" && \\\n+ GOPATH=\"${GOPATH}\" make && \\\n+ sudo -n -E env GOPATH=\"${GOPATH}\" make install)\n+}\n+\n# Install dependencies for the crictl tests.\ninstall_crictl_test_deps() {\nsudo -n -E apt-get update\nsudo -n -E apt-get install -y btrfs-tools libseccomp-dev\n- # Install containerd.\n- [[ -d containerd ]] || git clone https://github.com/containerd/containerd\n- (cd containerd && git checkout v1.2.2 && make && sudo -n -E make install)\n-\n- # Install crictl.\n- [[ -d cri-tools ]] || git clone https://github.com/kubernetes-sigs/cri-tools\n- (cd cri-tools && git checkout tags/v1.11.0 && make && sudo -n -E make install)\n+ # Install containerd & cri-tools.\n+ GOPATH=$(mktemp -d --tmpdir gopathXXXXX)\n+ install_helper github.com/containerd/containerd v1.2.2 \"${GOPATH}\"\n+ install_helper github.com/kubernetes-sigs/cri-tools v1.11.0 \"${GOPATH}\"\n# Install gvisor-containerd-shim.\nlocal latest=/tmp/gvisor-containerd-shim-latest\n@@ -143,7 +156,8 @@ EOF\nsudo mv ${shim_config_tmp_path} ${shim_config_path}\n# Configure CNI.\n- sudo -n -E env PATH=${PATH} containerd/script/setup/install-cni\n+ (cd \"${GOPATH}\" && sudo -n -E env PATH=\"${PATH}\" GOPATH=\"${GOPATH}\" \\\n+ src/github.com/containerd/containerd/script/setup/install-cni)\n}\n# Run the tests that require docker.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Building containerd with go modules is broken, use GOPATH.
PiperOrigin-RevId: 251583707 |
259,858 | 01.06.2019 00:42:25 | 25,200 | 483794db83cf238ba0825a7a0ec0b47b4e173bc3 | Intercept all ?go-get=1 requests. | [
{
"change_type": "MODIFY",
"old_path": "cmd/gvisor-website/main.go",
"new_path": "cmd/gvisor-website/main.go",
"diff": "@@ -49,7 +49,28 @@ var prefixHelpers = map[string]string{\n\"c/linux/amd64\": \"/docs/user_guide/compatibility/amd64/#%s\",\n}\n-var validId = regexp.MustCompile(`^[A-Za-z0-9-]*/?$`)\n+var (\n+ validId = regexp.MustCompile(`^[A-Za-z0-9-]*/?$`)\n+ goGetHeader = `<meta name=\"go-import\" content=\"gvisor.dev git https://github.com/google/gvisor\">`\n+ goGetHTML5 = `<!doctype html><html><head><meta charset=utf-8>` + goGetHeader + `<title>Go-get</title></head><body></html>`\n+)\n+\n+// wrappedHandler wraps an http.Handler.\n+//\n+// If the query parameters include go-get=1, then we redirect to a single\n+// static page that allows us to serve arbitrary Go packages.\n+func wrappedHandler(h http.Handler) http.Handler {\n+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n+ gg, ok := r.URL.Query()[\"go-get\"]\n+ if ok && len(gg) == 1 && gg[0] == \"1\" {\n+ // Serve a trivial html page.\n+ w.Write([]byte(goGetHTML5))\n+ return\n+ }\n+ // Fallthrough.\n+ h.ServeHTTP(w, r)\n+ })\n+}\n// redirectWithQuery redirects to the given target url preserving query parameters.\nfunc redirectWithQuery(w http.ResponseWriter, r *http.Request, target string) {\n@@ -107,11 +128,11 @@ func registerRedirects(mux *http.ServeMux) {\nfor prefix, baseURL := range prefixHelpers {\np := \"/\" + prefix + \"/\"\n- mux.Handle(p, hostRedirectHandler(prefixRedirectHandler(p, baseURL)))\n+ mux.Handle(p, hostRedirectHandler(wrappedHandler(prefixRedirectHandler(p, baseURL))))\n}\nfor path, redirect := range redirects {\n- mux.Handle(path, hostRedirectHandler(redirectHandler(redirect)))\n+ mux.Handle(path, hostRedirectHandler(wrappedHandler(redirectHandler(redirect))))\n}\n}\n@@ -120,7 +141,7 @@ func registerStatic(mux *http.ServeMux, staticDir string) {\nif mux == nil {\nmux = http.DefaultServeMux\n}\n- mux.Handle(\"/\", hostRedirectHandler(http.FileServer(http.Dir(staticDir))))\n+ mux.Handle(\"/\", hostRedirectHandler(wrappedHandler(http.FileServer(http.Dir(staticDir)))))\n}\n// registerRebuild registers the rebuild handler.\n@@ -129,7 +150,7 @@ func registerRebuild(mux *http.ServeMux) {\nmux = http.DefaultServeMux\n}\n- mux.Handle(\"/rebuild\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n+ mux.Handle(\"/rebuild\", wrappedHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\nctx := context.Background()\ncredentials, err := google.FindDefaultCredentials(ctx, cloudbuild.CloudPlatformScope)\nif err != nil {\n@@ -170,7 +191,7 @@ func registerRebuild(mux *http.ServeMux) {\nhttp.Error(w, \"run error: \"+err.Error(), 500)\nreturn\n}\n- }))\n+ })))\n}\nfunc envFlagString(name, def string) string {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Intercept all ?go-get=1 requests. |
259,853 | 05.06.2019 22:50:48 | 25,200 | 79f7cb6c1c4c16e3aca44d7fdc8e9f2487a605cf | netstack/sniffer: log GSO attributes | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sniffer/sniffer.go",
"new_path": "pkg/tcpip/link/sniffer/sniffer.go",
"diff": "@@ -118,7 +118,7 @@ func NewWithFile(lower tcpip.LinkEndpointID, file *os.File, snapLen uint32) (tcp\n// logs the packet before forwarding to the actual dispatcher.\nfunc (e *endpoint) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remote, local tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\nif atomic.LoadUint32(&LogPackets) == 1 && e.file == nil {\n- logPacket(\"recv\", protocol, vv.First())\n+ logPacket(\"recv\", protocol, vv.First(), nil)\n}\nif e.file != nil && atomic.LoadUint32(&LogPacketsToFile) == 1 {\nvs := vv.Views()\n@@ -198,7 +198,7 @@ func (e *endpoint) GSOMaxSize() uint32 {\n// the request to the lower endpoint.\nfunc (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, hdr buffer.Prependable, payload buffer.VectorisedView, protocol tcpip.NetworkProtocolNumber) *tcpip.Error {\nif atomic.LoadUint32(&LogPackets) == 1 && e.file == nil {\n- logPacket(\"send\", protocol, hdr.View())\n+ logPacket(\"send\", protocol, hdr.View(), gso)\n}\nif e.file != nil && atomic.LoadUint32(&LogPacketsToFile) == 1 {\nhdrBuf := hdr.View()\n@@ -240,7 +240,7 @@ func (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, hdr buffer.Prepen\nreturn e.lower.WritePacket(r, gso, hdr, payload, protocol)\n}\n-func logPacket(prefix string, protocol tcpip.NetworkProtocolNumber, b buffer.View) {\n+func logPacket(prefix string, protocol tcpip.NetworkProtocolNumber, b buffer.View, gso *stack.GSO) {\n// Figure out the network layer info.\nvar transProto uint8\nsrc := tcpip.Address(\"unknown\")\n@@ -404,5 +404,9 @@ func logPacket(prefix string, protocol tcpip.NetworkProtocolNumber, b buffer.Vie\nreturn\n}\n+ if gso != nil {\n+ details += fmt.Sprintf(\" gso: %+v\", gso)\n+ }\n+\nlog.Infof(\"%s %s %v:%v -> %v:%v len:%d id:%04x %s\", prefix, transName, src, srcPort, dst, dstPort, size, id, details)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | netstack/sniffer: log GSO attributes
PiperOrigin-RevId: 251788534 |
259,962 | 06.06.2019 08:05:46 | 25,200 | 85be01b42d4ac48698d1e8f50a4cf2607a4fc50b | Add multi-fd support to fdbased endpoint.
This allows an fdbased endpoint to have multiple underlying fd's from which
packets can be read and dispatched/written to.
This should allow for higher throughput as well as better scalability of the
network stack as number of connections increases.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint.go",
"diff": "// FD based endpoints can be used in the networking stack by calling New() to\n// create a new endpoint, and then passing it as an argument to\n// Stack.CreateNIC().\n+//\n+// FD based endpoints can use more than one file descriptor to read incoming\n+// packets. If there are more than one FDs specified and the underlying FD is an\n+// AF_PACKET then the endpoint will enable FANOUT mode on the socket so that the\n+// host kernel will consistently hash the packets to the sockets. This ensures\n+// that packets for the same TCP streams are not reordered.\n+//\n+// Similarly if more than one FD's are specified where the underlying FD is not\n+// AF_PACKET then it's the caller's responsibility to ensure that all inbound\n+// packets on the descriptors are consistently 5 tuple hashed to one of the\n+// descriptors to prevent TCP reordering.\n+//\n+// Since netstack today does not compute 5 tuple hashes for outgoing packets we\n+// only use the first FD to write outbound packets. Once 5 tuple hashes for\n+// all outbound packets are available we will make use of all underlying FD's to\n+// write outbound packets.\npackage fdbased\nimport (\n\"fmt\"\n\"syscall\"\n+ \"golang.org/x/sys/unix\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/header\"\n@@ -65,8 +82,10 @@ const (\n)\ntype endpoint struct {\n- // fd is the file descriptor used to send and receive packets.\n- fd int\n+ // fds is the set of file descriptors each identifying one inbound/outbound\n+ // channel. The endpoint will dispatch from all inbound channels as well as\n+ // hash outbound packets to specific channels based on the packet hash.\n+ fds []int\n// mtu (maximum transmission unit) is the maximum size of a packet.\nmtu uint32\n@@ -85,7 +104,7 @@ type endpoint struct {\n// its end of the communication pipe.\nclosed func(*tcpip.Error)\n- inboundDispatcher linkDispatcher\n+ inboundDispatchers []linkDispatcher\ndispatcher stack.NetworkDispatcher\n// packetDispatchMode controls the packet dispatcher used by this\n@@ -99,16 +118,46 @@ type endpoint struct {\n// Options specify the details about the fd-based endpoint to be created.\ntype Options struct {\n- FD int\n+ // FDs is a set of FDs used to read/write packets.\n+ FDs []int\n+\n+ // MTU is the mtu to use for this endpoint.\nMTU uint32\n+\n+ // EthernetHeader if true, indicates that the endpoint should read/write\n+ // ethernet frames instead of IP packets.\nEthernetHeader bool\n+\n+ // ClosedFunc is a function to be called when an endpoint's peer (if\n+ // any) closes its end of the communication pipe.\nClosedFunc func(*tcpip.Error)\n+\n+ // Address is the link address for this endpoint. Only used if\n+ // EthernetHeader is true.\nAddress tcpip.LinkAddress\n+\n+ // SaveRestore if true, indicates that this NIC capability set should\n+ // include CapabilitySaveRestore\nSaveRestore bool\n+\n+ // DisconnectOk if true, indicates that this NIC capability set should\n+ // include CapabilityDisconnectOk.\nDisconnectOk bool\n+\n+ // GSOMaxSize is the maximum GSO packet size. It is zero if GSO is\n+ // disabled.\nGSOMaxSize uint32\n+\n+ // PacketDispatchMode specifies the type of inbound dispatcher to be\n+ // used for this endpoint.\nPacketDispatchMode PacketDispatchMode\n+\n+ // TXChecksumOffload if true, indicates that this endpoints capability\n+ // set should include CapabilityTXChecksumOffload.\nTXChecksumOffload bool\n+\n+ // RXChecksumOffload if true, indicates that this endpoints capability\n+ // set should include CapabilityRXChecksumOffload.\nRXChecksumOffload bool\n}\n@@ -117,10 +166,6 @@ type Options struct {\n// Makes fd non-blocking, but does not take ownership of fd, which must remain\n// open for the lifetime of the returned endpoint.\nfunc New(opts *Options) (tcpip.LinkEndpointID, error) {\n- if err := syscall.SetNonblock(opts.FD, true); err != nil {\n- return 0, fmt.Errorf(\"syscall.SetNonblock(%v) failed: %v\", opts.FD, err)\n- }\n-\ncaps := stack.LinkEndpointCapabilities(0)\nif opts.RXChecksumOffload {\ncaps |= stack.CapabilityRXChecksumOffload\n@@ -144,8 +189,12 @@ func New(opts *Options) (tcpip.LinkEndpointID, error) {\ncaps |= stack.CapabilityDisconnectOk\n}\n+ if len(opts.FDs) == 0 {\n+ return 0, fmt.Errorf(\"opts.FD is empty, at least one FD must be specified\")\n+ }\n+\ne := &endpoint{\n- fd: opts.FD,\n+ fds: opts.FDs,\nmtu: opts.MTU,\ncaps: caps,\nclosed: opts.ClosedFunc,\n@@ -154,7 +203,14 @@ func New(opts *Options) (tcpip.LinkEndpointID, error) {\npacketDispatchMode: opts.PacketDispatchMode,\n}\n- isSocket, err := isSocketFD(e.fd)\n+ // Create per channel dispatchers.\n+ for i := 0; i < len(e.fds); i++ {\n+ fd := e.fds[i]\n+ if err := syscall.SetNonblock(fd, true); err != nil {\n+ return 0, fmt.Errorf(\"syscall.SetNonblock(%v) failed: %v\", fd, err)\n+ }\n+\n+ isSocket, err := isSocketFD(fd)\nif err != nil {\nreturn 0, err\n}\n@@ -164,36 +220,54 @@ func New(opts *Options) (tcpip.LinkEndpointID, error) {\ne.gsoMaxSize = opts.GSOMaxSize\n}\n}\n- e.inboundDispatcher, err = createInboundDispatcher(e, isSocket)\n+ inboundDispatcher, err := createInboundDispatcher(e, fd, isSocket)\nif err != nil {\nreturn 0, fmt.Errorf(\"createInboundDispatcher(...) = %v\", err)\n}\n+ e.inboundDispatchers = append(e.inboundDispatchers, inboundDispatcher)\n+ }\nreturn stack.RegisterLinkEndpoint(e), nil\n}\n-func createInboundDispatcher(e *endpoint, isSocket bool) (linkDispatcher, error) {\n+func createInboundDispatcher(e *endpoint, fd int, isSocket bool) (linkDispatcher, error) {\n// By default use the readv() dispatcher as it works with all kinds of\n// FDs (tap/tun/unix domain sockets and af_packet).\n- inboundDispatcher, err := newReadVDispatcher(e.fd, e)\n+ inboundDispatcher, err := newReadVDispatcher(fd, e)\nif err != nil {\n- return nil, fmt.Errorf(\"newReadVDispatcher(%d, %+v) = %v\", e.fd, e, err)\n+ return nil, fmt.Errorf(\"newReadVDispatcher(%d, %+v) = %v\", fd, e, err)\n}\nif isSocket {\n+ sa, err := unix.Getsockname(fd)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"unix.Getsockname(%d) = %v\", fd, err)\n+ }\n+ switch sa.(type) {\n+ case *unix.SockaddrLinklayer:\n+ // enable PACKET_FANOUT mode is the underlying socket is\n+ // of type AF_PACKET.\n+ const fanoutID = 1\n+ const fanoutType = 0x8000 // PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG\n+ fanoutArg := fanoutID | fanoutType<<16\n+ if err := syscall.SetsockoptInt(fd, syscall.SOL_PACKET, unix.PACKET_FANOUT, fanoutArg); err != nil {\n+ return nil, fmt.Errorf(\"failed to enable PACKET_FANOUT option: %v\", err)\n+ }\n+ }\n+\nswitch e.packetDispatchMode {\ncase PacketMMap:\n- inboundDispatcher, err = newPacketMMapDispatcher(e.fd, e)\n+ inboundDispatcher, err = newPacketMMapDispatcher(fd, e)\nif err != nil {\n- return nil, fmt.Errorf(\"newPacketMMapDispatcher(%d, %+v) = %v\", e.fd, e, err)\n+ return nil, fmt.Errorf(\"newPacketMMapDispatcher(%d, %+v) = %v\", fd, e, err)\n}\ncase RecvMMsg:\n// If the provided FD is a socket then we optimize\n// packet reads by using recvmmsg() instead of read() to\n// read packets in a batch.\n- inboundDispatcher, err = newRecvMMsgDispatcher(e.fd, e)\n+ inboundDispatcher, err = newRecvMMsgDispatcher(fd, e)\nif err != nil {\n- return nil, fmt.Errorf(\"newRecvMMsgDispatcher(%d, %+v) = %v\", e.fd, e, err)\n+ return nil, fmt.Errorf(\"newRecvMMsgDispatcher(%d, %+v) = %v\", fd, e, err)\n}\n}\n}\n@@ -215,7 +289,9 @@ func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) {\n// Link endpoints are not savable. When transportation endpoints are\n// saved, they stop sending outgoing packets and all incoming packets\n// are rejected.\n- go e.dispatchLoop() // S/R-SAFE: See above.\n+ for i := range e.inboundDispatchers {\n+ go e.dispatchLoop(e.inboundDispatchers[i]) // S/R-SAFE: See above.\n+ }\n}\n// IsAttached implements stack.LinkEndpoint.IsAttached.\n@@ -305,26 +381,26 @@ func (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, hdr buffer.Prepen\n}\n}\n- return rawfile.NonBlockingWrite3(e.fd, vnetHdrBuf, hdr.View(), payload.ToView())\n+ return rawfile.NonBlockingWrite3(e.fds[0], vnetHdrBuf, hdr.View(), payload.ToView())\n}\nif payload.Size() == 0 {\n- return rawfile.NonBlockingWrite(e.fd, hdr.View())\n+ return rawfile.NonBlockingWrite(e.fds[0], hdr.View())\n}\n- return rawfile.NonBlockingWrite3(e.fd, hdr.View(), payload.ToView(), nil)\n+ return rawfile.NonBlockingWrite3(e.fds[0], hdr.View(), payload.ToView(), nil)\n}\n// WriteRawPacket writes a raw packet directly to the file descriptor.\nfunc (e *endpoint) WriteRawPacket(dest tcpip.Address, packet []byte) *tcpip.Error {\n- return rawfile.NonBlockingWrite(e.fd, packet)\n+ return rawfile.NonBlockingWrite(e.fds[0], packet)\n}\n// dispatchLoop reads packets from the file descriptor in a loop and dispatches\n// them to the network stack.\n-func (e *endpoint) dispatchLoop() *tcpip.Error {\n+func (e *endpoint) dispatchLoop(inboundDispatcher linkDispatcher) *tcpip.Error {\nfor {\n- cont, err := e.inboundDispatcher.dispatch()\n+ cont, err := inboundDispatcher.dispatch()\nif err != nil || !cont {\nif e.closed != nil {\ne.closed(err)\n@@ -363,7 +439,7 @@ func NewInjectable(fd int, mtu uint32, capabilities stack.LinkEndpointCapabiliti\nsyscall.SetNonblock(fd, true)\ne := &InjectableEndpoint{endpoint: endpoint{\n- fd: fd,\n+ fds: []int{fd},\nmtu: mtu,\ncaps: capabilities,\n}}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"diff": "@@ -67,7 +67,7 @@ func newContext(t *testing.T, opt *Options) *context {\ndone <- struct{}{}\n}\n- opt.FD = fds[1]\n+ opt.FDs = []int{fds[1]}\nepID, err := New(opt)\nif err != nil {\nt.Fatalf(\"Failed to create FD endpoint: %v\", err)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/sample/tun_tcp_connect/main.go",
"new_path": "pkg/tcpip/sample/tun_tcp_connect/main.go",
"diff": "@@ -137,7 +137,7 @@ func main() {\nlog.Fatal(err)\n}\n- linkID, err := fdbased.New(&fdbased.Options{FD: fd, MTU: mtu})\n+ linkID, err := fdbased.New(&fdbased.Options{FDs: []int{fd}, MTU: mtu})\nif err != nil {\nlog.Fatal(err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/sample/tun_tcp_echo/main.go",
"new_path": "pkg/tcpip/sample/tun_tcp_echo/main.go",
"diff": "@@ -129,7 +129,7 @@ func main() {\n}\nlinkID, err := fdbased.New(&fdbased.Options{\n- FD: fd,\n+ FDs: []int{fd},\nMTU: mtu,\nEthernetHeader: *tap,\nAddress: tcpip.LinkAddress(maddr),\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/urpc/urpc.go",
"new_path": "pkg/urpc/urpc.go",
"diff": "@@ -35,7 +35,7 @@ import (\n)\n// maxFiles determines the maximum file payload.\n-const maxFiles = 16\n+const maxFiles = 32\n// ErrTooManyFiles is returned when too many file descriptors are mapped.\nvar ErrTooManyFiles = errors.New(\"too many files\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/config.go",
"new_path": "runsc/boot/config.go",
"diff": "@@ -221,6 +221,11 @@ type Config struct {\n// user, and without chrooting the sandbox process. This can be\n// necessary in test environments that have limited capabilities.\nTestOnlyAllowRunAsCurrentUserWithoutChroot bool\n+\n+ // NumNetworkChannels controls the number of AF_PACKET sockets that map\n+ // to the same underlying network device. This allows netstack to better\n+ // scale for high throughput use cases.\n+ NumNetworkChannels int\n}\n// ToFlags returns a slice of flags that correspond to the given Config.\n@@ -244,6 +249,7 @@ func (c *Config) ToFlags() []string {\n\"--panic-signal=\" + strconv.Itoa(c.PanicSignal),\n\"--profile=\" + strconv.FormatBool(c.ProfileEnable),\n\"--net-raw=\" + strconv.FormatBool(c.EnableRaw),\n+ \"--num-network-channels=\" + strconv.Itoa(c.NumNetworkChannels),\n}\nif c.TestOnlyAllowRunAsCurrentUserWithoutChroot {\n// Only include if set since it is never to be used by users.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/network.go",
"new_path": "runsc/boot/network.go",
"diff": "@@ -57,6 +57,10 @@ type FDBasedLink struct {\nRoutes []Route\nGSOMaxSize uint32\nLinkAddress []byte\n+\n+ // NumChannels controls how many underlying FD's are to be used to\n+ // create this endpoint.\n+ NumChannels int\n}\n// LoopbackLink configures a loopback li nk.\n@@ -69,7 +73,8 @@ type LoopbackLink struct {\n// CreateLinksAndRoutesArgs are arguments to CreateLinkAndRoutes.\ntype CreateLinksAndRoutesArgs struct {\n// FilePayload contains the fds associated with the FDBasedLinks. The\n- // two slices must have the same length.\n+ // number of fd's should match the sum of the NumChannels field of the\n+ // FDBasedLink entries below.\nurpc.FilePayload\nLoopbackLinks []LoopbackLink\n@@ -95,8 +100,12 @@ func (r *Route) toTcpipRoute(id tcpip.NICID) tcpip.Route {\n// CreateLinksAndRoutes creates links and routes in a network stack. It should\n// only be called once.\nfunc (n *Network) CreateLinksAndRoutes(args *CreateLinksAndRoutesArgs, _ *struct{}) error {\n- if len(args.FilePayload.Files) != len(args.FDBasedLinks) {\n- return fmt.Errorf(\"FilePayload must be same length at FDBasedLinks\")\n+ wantFDs := 0\n+ for _, l := range args.FDBasedLinks {\n+ wantFDs += l.NumChannels\n+ }\n+ if got := len(args.FilePayload.Files); got != wantFDs {\n+ return fmt.Errorf(\"args.FilePayload.Files has %d FD's but we need %d entries based on FDBasedLinks\", got, wantFDs)\n}\nvar nicID tcpip.NICID\n@@ -123,20 +132,26 @@ func (n *Network) CreateLinksAndRoutes(args *CreateLinksAndRoutesArgs, _ *struct\n}\n}\n- for i, link := range args.FDBasedLinks {\n+ fdOffset := 0\n+ for _, link := range args.FDBasedLinks {\nnicID++\nnicids[link.Name] = nicID\n+ FDs := []int{}\n+ for j := 0; j < link.NumChannels; j++ {\n// Copy the underlying FD.\n- oldFD := args.FilePayload.Files[i].Fd()\n+ oldFD := args.FilePayload.Files[fdOffset].Fd()\nnewFD, err := syscall.Dup(int(oldFD))\nif err != nil {\nreturn fmt.Errorf(\"failed to dup FD %v: %v\", oldFD, err)\n}\n+ FDs = append(FDs, newFD)\n+ fdOffset++\n+ }\nmac := tcpip.LinkAddress(link.LinkAddress)\nlinkEP, err := fdbased.New(&fdbased.Options{\n- FD: newFD,\n+ FDs: FDs,\nMTU: uint32(link.MTU),\nEthernetHeader: true,\nAddress: mac,\n@@ -148,7 +163,7 @@ func (n *Network) CreateLinksAndRoutes(args *CreateLinksAndRoutesArgs, _ *struct\nreturn err\n}\n- log.Infof(\"Enabling interface %q with id %d on addresses %+v (%v)\", link.Name, nicID, link.Addresses, mac)\n+ log.Infof(\"Enabling interface %q with id %d on addresses %+v (%v) w/ %d channels\", link.Name, nicID, link.Addresses, mac, link.NumChannels)\nif err := n.createNICWithAddrs(nicID, link.Name, linkEP, link.Addresses, false /* loopback */); err != nil {\nreturn err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/main.go",
"new_path": "runsc/main.go",
"diff": "@@ -69,7 +69,7 @@ var (\npanicSignal = flag.Int(\"panic-signal\", -1, \"register signal handling that panics. Usually set to SIGUSR2(12) to troubleshoot hangs. -1 disables it.\")\nprofile = flag.Bool(\"profile\", false, \"prepares the sandbox to use Golang profiler. Note that enabling profiler loosens the seccomp protection added to the sandbox (DO NOT USE IN PRODUCTION).\")\nnetRaw = flag.Bool(\"net-raw\", false, \"enable raw sockets. When false, raw sockets are disabled by removing CAP_NET_RAW from containers (`runsc exec` will still be able to utilize raw sockets). Raw sockets allow malicious containers to craft packets and potentially attack the network.\")\n-\n+ numNetworkChannels = flag.Int(\"num-network-channels\", 1, \"number of underlying channels(FDs) to use for network link endpoints.\")\ntestOnlyAllowRunAsCurrentUserWithoutChroot = flag.Bool(\"TESTONLY-unsafe-nonroot\", false, \"TEST ONLY; do not ever use! This skips many security measures that isolate the host from the sandbox.\")\n)\n@@ -141,6 +141,10 @@ func main() {\ncmd.Fatalf(\"%v\", err)\n}\n+ if *numNetworkChannels <= 0 {\n+ cmd.Fatalf(\"num_network_channels must be > 0, got: %d\", *numNetworkChannels)\n+ }\n+\n// Create a new Config from the flags.\nconf := &boot.Config{\nRootDir: *rootDir,\n@@ -162,6 +166,7 @@ func main() {\nProfileEnable: *profile,\nEnableRaw: *netRaw,\nTestOnlyAllowRunAsCurrentUserWithoutChroot: *testOnlyAllowRunAsCurrentUserWithoutChroot,\n+ NumNetworkChannels: *numNetworkChannels,\n}\nif len(*straceSyscalls) != 0 {\nconf.StraceSyscalls = strings.Split(*straceSyscalls, \",\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/network.go",
"new_path": "runsc/sandbox/network.go",
"diff": "@@ -68,7 +68,7 @@ func setupNetwork(conn *urpc.Client, pid int, spec *specs.Spec, conf *boot.Confi\n// Build the path to the net namespace of the sandbox process.\n// This is what we will copy.\nnsPath := filepath.Join(\"/proc\", strconv.Itoa(pid), \"ns/net\")\n- if err := createInterfacesAndRoutesFromNS(conn, nsPath, conf.GSO); err != nil {\n+ if err := createInterfacesAndRoutesFromNS(conn, nsPath, conf.GSO, conf.NumNetworkChannels); err != nil {\nreturn fmt.Errorf(\"creating interfaces from net namespace %q: %v\", nsPath, err)\n}\ncase boot.NetworkHost:\n@@ -138,7 +138,7 @@ func isRootNS() (bool, error) {\n// createInterfacesAndRoutesFromNS scrapes the interface and routes from the\n// net namespace with the given path, creates them in the sandbox, and removes\n// them from the host.\n-func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, enableGSO bool) error {\n+func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, enableGSO bool, numNetworkChannels int) error {\n// Join the network namespace that we will be copying.\nrestore, err := joinNetNS(nsPath)\nif err != nil {\n@@ -202,25 +202,6 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, enableGSO\ncontinue\n}\n- // Create the socket.\n- const protocol = 0x0300 // htons(ETH_P_ALL)\n- fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, protocol)\n- if err != nil {\n- return fmt.Errorf(\"unable to create raw socket: %v\", err)\n- }\n- deviceFile := os.NewFile(uintptr(fd), \"raw-device-fd\")\n-\n- // Bind to the appropriate device.\n- ll := syscall.SockaddrLinklayer{\n- Protocol: protocol,\n- Ifindex: iface.Index,\n- Hatype: 0, // No ARP type.\n- Pkttype: syscall.PACKET_OTHERHOST,\n- }\n- if err := syscall.Bind(fd, &ll); err != nil {\n- return fmt.Errorf(\"unable to bind to %q: %v\", iface.Name, err)\n- }\n-\n// Scrape the routes before removing the address, since that\n// will remove the routes as well.\nroutes, def, err := routesForIface(iface)\n@@ -239,6 +220,7 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, enableGSO\nName: iface.Name,\nMTU: iface.MTU,\nRoutes: routes,\n+ NumChannels: numNetworkChannels,\n}\n// Get the link for the interface.\n@@ -248,30 +230,23 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, enableGSO\n}\nlink.LinkAddress = []byte(ifaceLink.Attrs().HardwareAddr)\n- if enableGSO {\n- gso, err := isGSOEnabled(fd, iface.Name)\n+ log.Debugf(\"Setting up network channels\")\n+ // Create the socket for the device.\n+ for i := 0; i < link.NumChannels; i++ {\n+ log.Debugf(\"Creating Channel %d\", i)\n+ socketEntry, err := createSocket(iface, ifaceLink, enableGSO)\nif err != nil {\n- return fmt.Errorf(\"getting GSO for interface %q: %v\", iface.Name, err)\n+ return fmt.Errorf(\"failed to createSocket for %s : %v\", iface.Name, err)\n}\n- if gso {\n- if err := syscall.SetsockoptInt(fd, syscall.SOL_PACKET, unix.PACKET_VNET_HDR, 1); err != nil {\n- return fmt.Errorf(\"unable to enable the PACKET_VNET_HDR option: %v\", err)\n- }\n- link.GSOMaxSize = ifaceLink.Attrs().GSOMaxSize\n+ if i == 0 {\n+ link.GSOMaxSize = socketEntry.gsoMaxSize\n} else {\n- log.Infof(\"GSO not available in host.\")\n+ if link.GSOMaxSize != socketEntry.gsoMaxSize {\n+ return fmt.Errorf(\"inconsistent gsoMaxSize %d and %d when creating multiple channels for same interface: %s\",\n+ link.GSOMaxSize, socketEntry.gsoMaxSize, iface.Name)\n}\n}\n-\n- // Use SO_RCVBUFFORCE because on linux the receive buffer for an\n- // AF_PACKET socket is capped by \"net.core.rmem_max\". rmem_max\n- // defaults to a unusually low value of 208KB. This is too low\n- // for gVisor to be able to receive packets at high throughputs\n- // without incurring packet drops.\n- const rcvBufSize = 4 << 20 // 4MB.\n-\n- if err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUFFORCE, rcvBufSize); err != nil {\n- return fmt.Errorf(\"failed to increase socket rcv buffer to %d: %v\", rcvBufSize, err)\n+ args.FilePayload.Files = append(args.FilePayload.Files, socketEntry.deviceFile)\n}\n// Collect the addresses for the interface, enable forwarding,\n@@ -285,7 +260,6 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, enableGSO\n}\n}\n- args.FilePayload.Files = append(args.FilePayload.Files, deviceFile)\nargs.FDBasedLinks = append(args.FDBasedLinks, link)\n}\n@@ -296,6 +270,61 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, enableGSO\nreturn nil\n}\n+type socketEntry struct {\n+ deviceFile *os.File\n+ gsoMaxSize uint32\n+}\n+\n+// createSocket creates an underlying AF_PACKET socket and configures it for use by\n+// the sentry and returns an *os.File that wraps the underlying socket fd.\n+func createSocket(iface net.Interface, ifaceLink netlink.Link, enableGSO bool) (*socketEntry, error) {\n+ // Create the socket.\n+ const protocol = 0x0300 // htons(ETH_P_ALL)\n+ fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, protocol)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"unable to create raw socket: %v\", err)\n+ }\n+ deviceFile := os.NewFile(uintptr(fd), \"raw-device-fd\")\n+ // Bind to the appropriate device.\n+ ll := syscall.SockaddrLinklayer{\n+ Protocol: protocol,\n+ Ifindex: iface.Index,\n+ Hatype: 0, // No ARP type.\n+ Pkttype: syscall.PACKET_OTHERHOST,\n+ }\n+ if err := syscall.Bind(fd, &ll); err != nil {\n+ return nil, fmt.Errorf(\"unable to bind to %q: %v\", iface.Name, err)\n+ }\n+\n+ gsoMaxSize := uint32(0)\n+ if enableGSO {\n+ gso, err := isGSOEnabled(fd, iface.Name)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"getting GSO for interface %q: %v\", iface.Name, err)\n+ }\n+ if gso {\n+ if err := syscall.SetsockoptInt(fd, syscall.SOL_PACKET, unix.PACKET_VNET_HDR, 1); err != nil {\n+ return nil, fmt.Errorf(\"unable to enable the PACKET_VNET_HDR option: %v\", err)\n+ }\n+ gsoMaxSize = ifaceLink.Attrs().GSOMaxSize\n+ } else {\n+ log.Infof(\"GSO not available in host.\")\n+ }\n+ }\n+\n+ // Use SO_RCVBUFFORCE because on linux the receive buffer for an\n+ // AF_PACKET socket is capped by \"net.core.rmem_max\". rmem_max\n+ // defaults to a unusually low value of 208KB. This is too low\n+ // for gVisor to be able to receive packets at high throughputs\n+ // without incurring packet drops.\n+ const rcvBufSize = 4 << 20 // 4MB.\n+\n+ if err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUFFORCE, rcvBufSize); err != nil {\n+ return nil, fmt.Errorf(\"failed to increase socket rcv buffer to %d: %v\", rcvBufSize, err)\n+ }\n+ return &socketEntry{deviceFile, gsoMaxSize}, nil\n+}\n+\n// loopbackLinks collects the links for a loopback interface.\nfunc loopbackLinks(iface net.Interface, addrs []net.Addr) ([]boot.LoopbackLink, error) {\nvar links []boot.LoopbackLink\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -136,6 +136,7 @@ func TestConfig() *boot.Config {\nStrace: true,\nFileAccess: boot.FileAccessExclusive,\nTestOnlyAllowRunAsCurrentUserWithoutChroot: true,\n+ NumNetworkChannels: 1,\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add multi-fd support to fdbased endpoint.
This allows an fdbased endpoint to have multiple underlying fd's from which
packets can be read and dispatched/written to.
This should allow for higher throughput as well as better scalability of the
network stack as number of connections increases.
Updates #231
PiperOrigin-RevId: 251852825 |
259,985 | 06.06.2019 14:30:50 | 25,200 | 8b8bd8d5b28a8e41f59fc3465c38964986bfb084 | Try increase listen backlog. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_inet_loopback.cc",
"new_path": "test/syscalls/linux/socket_inet_loopback.cc",
"diff": "@@ -198,7 +198,7 @@ TEST_P(SocketInetReusePortTest, TcpPortReuseMultiThread) {\nASSERT_THAT(\nbind(fd, reinterpret_cast<sockaddr*>(&listen_addr), listener.addr_len),\nSyscallSucceeds());\n- ASSERT_THAT(listen(fd, 40), SyscallSucceeds());\n+ ASSERT_THAT(listen(fd, 512), SyscallSucceeds());\n// On the first bind we need to determine which port was bound.\nif (i != 0) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Try increase listen backlog.
PiperOrigin-RevId: 251928000 |
259,992 | 06.06.2019 14:37:12 | 25,200 | bf0b1b9d767736e632fa56b90d904fee968d8d3d | Add overlay dimension to FS related syscall tests | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -13,11 +13,17 @@ syscall_test(\ntest = \"//test/syscalls/linux:accept_bind_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:access_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:access_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:affinity_test\")\n-syscall_test(test = \"//test/syscalls/linux:aio_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:aio_test\",\n+)\nsyscall_test(\nsize = \"medium\",\n@@ -30,6 +36,7 @@ syscall_test(test = \"//test/syscalls/linux:bad_test\")\nsyscall_test(\nsize = \"large\",\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:bind_test\",\n)\n@@ -37,17 +44,27 @@ syscall_test(test = \"//test/syscalls/linux:brk_test\")\nsyscall_test(test = \"//test/syscalls/linux:socket_test\")\n-syscall_test(test = \"//test/syscalls/linux:chdir_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:chdir_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:chmod_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:chmod_test\",\n+)\nsyscall_test(\nsize = \"medium\",\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:chown_test\",\nuse_tmpfs = True, # chwon tests require gofer to be running as root.\n)\n-syscall_test(test = \"//test/syscalls/linux:chroot_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:chroot_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:clock_getres_test\")\n@@ -60,11 +77,17 @@ syscall_test(test = \"//test/syscalls/linux:clock_nanosleep_test\")\nsyscall_test(test = \"//test/syscalls/linux:concurrency_test\")\n-syscall_test(test = \"//test/syscalls/linux:creat_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:creat_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:dev_test\")\n-syscall_test(test = \"//test/syscalls/linux:dup_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:dup_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:epoll_test\")\n@@ -74,23 +97,34 @@ syscall_test(test = \"//test/syscalls/linux:exceptions_test\")\nsyscall_test(\nsize = \"medium\",\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:exec_test\",\n)\nsyscall_test(\nsize = \"medium\",\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:exec_binary_test\",\n)\nsyscall_test(test = \"//test/syscalls/linux:exit_test\")\n-syscall_test(test = \"//test/syscalls/linux:fadvise64_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:fadvise64_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:fallocate_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:fallocate_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:fault_test\")\n-syscall_test(test = \"//test/syscalls/linux:fchdir_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:fchdir_test\",\n+)\nsyscall_test(\nsize = \"medium\",\n@@ -99,6 +133,7 @@ syscall_test(\nsyscall_test(\nsize = \"medium\",\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:flock_test\",\n)\n@@ -108,7 +143,10 @@ syscall_test(test = \"//test/syscalls/linux:fpsig_fork_test\")\nsyscall_test(test = \"//test/syscalls/linux:fpsig_nested_test\")\n-syscall_test(test = \"//test/syscalls/linux:fsync_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:fsync_test\",\n+)\nsyscall_test(\nsize = \"medium\",\n@@ -120,7 +158,10 @@ syscall_test(test = \"//test/syscalls/linux:getcpu_host_test\")\nsyscall_test(test = \"//test/syscalls/linux:getcpu_test\")\n-syscall_test(test = \"//test/syscalls/linux:getdents_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:getdents_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:getrandom_test\")\n@@ -128,11 +169,13 @@ syscall_test(test = \"//test/syscalls/linux:getrusage_test\")\nsyscall_test(\nsize = \"medium\",\n+ add_overlay = False, # TODO(gvisor.dev/issue/317): enable when fixed.\ntest = \"//test/syscalls/linux:inotify_test\",\n)\nsyscall_test(\nsize = \"medium\",\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:ioctl_test\",\n)\n@@ -144,11 +187,15 @@ syscall_test(\nsyscall_test(test = \"//test/syscalls/linux:kill_test\")\nsyscall_test(\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:link_test\",\nuse_tmpfs = True, # gofer needs CAP_DAC_READ_SEARCH to use AT_EMPTY_PATH with linkat(2)\n)\n-syscall_test(test = \"//test/syscalls/linux:lseek_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:lseek_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:madvise_test\")\n@@ -158,9 +205,13 @@ syscall_test(test = \"//test/syscalls/linux:mempolicy_test\")\nsyscall_test(test = \"//test/syscalls/linux:mincore_test\")\n-syscall_test(test = \"//test/syscalls/linux:mkdir_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:mkdir_test\",\n+)\nsyscall_test(\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:mknod_test\",\nuse_tmpfs = True, # mknod is not supported over gofer.\n)\n@@ -171,7 +222,10 @@ syscall_test(\ntest = \"//test/syscalls/linux:mmap_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:mount_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:mount_test\",\n+)\nsyscall_test(\nsize = \"medium\",\n@@ -185,9 +239,15 @@ syscall_test(\nsyscall_test(test = \"//test/syscalls/linux:munmap_test\")\n-syscall_test(test = \"//test/syscalls/linux:open_create_test\")\n+syscall_test(\n+ add_overlay = False, # TODO(gvisor.dev/issue/316): enable when fixed.\n+ test = \"//test/syscalls/linux:open_create_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:open_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:open_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:partial_bad_buffer_test\")\n@@ -195,6 +255,7 @@ syscall_test(test = \"//test/syscalls/linux:pause_test\")\nsyscall_test(\nsize = \"large\",\n+ add_overlay = False, # TODO(gvisor.dev/issue/318): enable when fixed.\nshard_count = 5,\ntest = \"//test/syscalls/linux:pipe_test\",\n)\n@@ -210,11 +271,20 @@ syscall_test(test = \"//test/syscalls/linux:prctl_setuid_test\")\nsyscall_test(test = \"//test/syscalls/linux:prctl_test\")\n-syscall_test(test = \"//test/syscalls/linux:pread64_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:pread64_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:preadv_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:preadv_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:preadv2_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:preadv2_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:priority_test\")\n@@ -239,13 +309,22 @@ syscall_test(\ntest = \"//test/syscalls/linux:pty_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:pwritev2_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:pwritev2_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:pwrite64_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:pwrite64_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:raw_socket_ipv4_test\")\n-syscall_test(test = \"//test/syscalls/linux:read_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:read_test\",\n+)\nsyscall_test(\nsize = \"medium\",\n@@ -254,11 +333,13 @@ syscall_test(\nsyscall_test(\nsize = \"medium\",\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:readv_test\",\n)\nsyscall_test(\nsize = \"medium\",\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:rename_test\",\n)\n@@ -279,11 +360,20 @@ syscall_test(\ntest = \"//test/syscalls/linux:semaphore_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:sendfile_socket_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:sendfile_socket_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:sendfile_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:sendfile_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:splice_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:splice_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:sigaction_test\")\n@@ -330,11 +420,13 @@ syscall_test(\nsyscall_test(\nsize = \"medium\",\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:socket_filesystem_non_blocking_test\",\n)\nsyscall_test(\nsize = \"large\",\n+ add_overlay = True,\nshard_count = 10,\ntest = \"//test/syscalls/linux:socket_filesystem_test\",\n)\n@@ -430,6 +522,7 @@ syscall_test(\nsyscall_test(\nsize = \"large\",\n+ add_overlay = True,\nshard_count = 10,\ntest = \"//test/syscalls/linux:socket_unix_pair_test\",\n)\n@@ -472,19 +565,40 @@ syscall_test(\ntest = \"//test/syscalls/linux:socket_unix_unbound_stream_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:statfs_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:statfs_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:stat_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:stat_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:stat_times_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:stat_times_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:sticky_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:sticky_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:symlink_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:symlink_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:sync_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:sync_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:sync_file_range_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:sync_file_range_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:sysinfo_test\")\n@@ -508,7 +622,10 @@ syscall_test(test = \"//test/syscalls/linux:time_test\")\nsyscall_test(test = \"//test/syscalls/linux:tkill_test\")\n-syscall_test(test = \"//test/syscalls/linux:truncate_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:truncate_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:udp_bind_test\")\n@@ -522,7 +639,10 @@ syscall_test(test = \"//test/syscalls/linux:uidgid_test\")\nsyscall_test(test = \"//test/syscalls/linux:uname_test\")\n-syscall_test(test = \"//test/syscalls/linux:unlink_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:unlink_test\",\n+)\nsyscall_test(test = \"//test/syscalls/linux:unshare_test\")\n@@ -544,7 +664,10 @@ syscall_test(\ntest = \"//test/syscalls/linux:wait_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:write_test\")\n+syscall_test(\n+ add_overlay = True,\n+ test = \"//test/syscalls/linux:write_test\",\n+)\nsyscall_test(\ntest = \"//test/syscalls/linux:proc_net_unix_test\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/build_defs.bzl",
"new_path": "test/syscalls/build_defs.bzl",
"diff": "@@ -7,6 +7,7 @@ def syscall_test(\nshard_count = 1,\nsize = \"small\",\nuse_tmpfs = False,\n+ add_overlay = False,\ntags = None,\nparallel = True):\n_syscall_test(\n@@ -39,6 +40,18 @@ def syscall_test(\nparallel = parallel,\n)\n+ if add_overlay:\n+ _syscall_test(\n+ test = test,\n+ shard_count = shard_count,\n+ size = size,\n+ platform = \"ptrace\",\n+ use_tmpfs = False, # overlay is adding a writable tmpfs on top of root.\n+ tags = tags,\n+ parallel = parallel,\n+ overlay = True,\n+ )\n+\nif not use_tmpfs:\n# Also test shared gofer access.\n_syscall_test(\n@@ -60,7 +73,8 @@ def _syscall_test(\nuse_tmpfs,\ntags,\nparallel,\n- file_access = \"exclusive\"):\n+ file_access = \"exclusive\",\n+ overlay = False):\ntest_name = test.split(\":\")[1]\n# Prepend \"runsc\" to non-native platform names.\n@@ -69,6 +83,8 @@ def _syscall_test(\nname = test_name + \"_\" + full_platform\nif file_access == \"shared\":\nname += \"_shared\"\n+ if overlay:\n+ name += \"_overlay\"\nif tags == None:\ntags = []\n@@ -92,6 +108,7 @@ def _syscall_test(\n\"--platform=\" + platform,\n\"--use-tmpfs=\" + str(use_tmpfs),\n\"--file-access=\" + file_access,\n+ \"--overlay=\" + str(overlay),\n]\nif parallel:\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/syscall_test_runner.go",
"new_path": "test/syscalls/syscall_test_runner.go",
"diff": "@@ -47,6 +47,7 @@ var (\nplatform = flag.String(\"platform\", \"ptrace\", \"platform to run on\")\nuseTmpfs = flag.Bool(\"use-tmpfs\", false, \"mounts tmpfs for /tmp\")\nfileAccess = flag.String(\"file-access\", \"exclusive\", \"mounts root in exclusive or shared mode\")\n+ overlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable tmpfs overlay\")\nparallel = flag.Bool(\"parallel\", false, \"run tests in parallel\")\nrunscPath = flag.String(\"runsc\", \"\", \"path to runsc binary\")\n)\n@@ -184,10 +185,13 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\n\"-platform\", *platform,\n\"-root\", rootDir,\n\"-file-access\", *fileAccess,\n- \"--network=none\",\n+ \"-network=none\",\n\"-log-format=text\",\n\"-TESTONLY-unsafe-nonroot=true\",\n- \"--net-raw=true\",\n+ \"-net-raw=true\",\n+ }\n+ if *overlay {\n+ args = append(args, \"-overlay\")\n}\nif *debug {\nargs = append(args, \"-debug\", \"-log-packets=true\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add overlay dimension to FS related syscall tests
PiperOrigin-RevId: 251929314 |
259,992 | 06.06.2019 15:54:54 | 25,200 | 93aa7d11673392ca51ba69122ff5fe1aad7331b9 | Remove tmpfs restriction from test
runsc supports UDS over gofer mounts and tmpfs is
not needed for this test. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -669,12 +669,7 @@ syscall_test(\ntest = \"//test/syscalls/linux:write_test\",\n)\n-syscall_test(\n- test = \"//test/syscalls/linux:proc_net_unix_test\",\n- # Unix domain socket creation isn't supported on all file systems. The\n- # sentry-internal tmpfs is known to support it.\n- use_tmpfs = True,\n-)\n+syscall_test(test = \"//test/syscalls/linux:proc_net_unix_test\")\ngo_binary(\nname = \"syscall_test_runner\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove tmpfs restriction from test
runsc supports UDS over gofer mounts and tmpfs is
not needed for this test.
PiperOrigin-RevId: 251944870 |
259,885 | 06.06.2019 16:26:00 | 25,200 | a26043ee53a2f38b81c9eaa098d115025e87f4c3 | Implement reclaim-driven MemoryFile eviction. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/hostmm/BUILD",
"diff": "+load(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"hostmm\",\n+ srcs = [\n+ \"cgroup.go\",\n+ \"hostmm.go\",\n+ ],\n+ importpath = \"gvisor.googlesource.com/gvisor/pkg/sentry/hostmm\",\n+ visibility = [\"//pkg/sentry:internal\"],\n+ deps = [\n+ \"//pkg/fd\",\n+ \"//pkg/log\",\n+ \"//pkg/sentry/usermem\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/hostmm/cgroup.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package hostmm\n+\n+import (\n+ \"bufio\"\n+ \"fmt\"\n+ \"os\"\n+ \"path\"\n+ \"strings\"\n+)\n+\n+// currentCgroupDirectory returns the directory for the cgroup for the given\n+// controller in which the calling process resides.\n+func currentCgroupDirectory(ctrl string) (string, error) {\n+ root, err := cgroupRootDirectory(ctrl)\n+ if err != nil {\n+ return \"\", err\n+ }\n+ cg, err := currentCgroup(ctrl)\n+ if err != nil {\n+ return \"\", err\n+ }\n+ return path.Join(root, cg), nil\n+}\n+\n+// cgroupRootDirectory returns the root directory for the cgroup hierarchy in\n+// which the given cgroup controller is mounted in the calling process' mount\n+// namespace.\n+func cgroupRootDirectory(ctrl string) (string, error) {\n+ const path = \"/proc/self/mounts\"\n+ file, err := os.Open(path)\n+ if err != nil {\n+ return \"\", err\n+ }\n+ defer file.Close()\n+\n+ // Per proc(5) -> fstab(5):\n+ // Each line of /proc/self/mounts describes a mount.\n+ scanner := bufio.NewScanner(file)\n+ for scanner.Scan() {\n+ // Each line consists of 6 space-separated fields. Find the line for\n+ // which the third field (fs_vfstype) is cgroup, and the fourth field\n+ // (fs_mntops, a comma-separated list of mount options) contains\n+ // ctrl.\n+ var spec, file, vfstype, mntopts, freq, passno string\n+ const nrfields = 6\n+ line := scanner.Text()\n+ n, err := fmt.Sscan(line, &spec, &file, &vfstype, &mntopts, &freq, &passno)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"failed to parse %s: %v\", path, err)\n+ }\n+ if n != nrfields {\n+ return \"\", fmt.Errorf(\"failed to parse %s: line %q: got %d fields, wanted %d\", path, line, n, nrfields)\n+ }\n+ if vfstype != \"cgroup\" {\n+ continue\n+ }\n+ for _, mntopt := range strings.Split(mntopts, \",\") {\n+ if mntopt == ctrl {\n+ return file, nil\n+ }\n+ }\n+ }\n+ return \"\", fmt.Errorf(\"no cgroup hierarchy mounted for controller %s\", ctrl)\n+}\n+\n+// currentCgroup returns the cgroup for the given controller in which the\n+// calling process resides. The returned string is a path that should be\n+// interpreted as relative to cgroupRootDirectory(ctrl).\n+func currentCgroup(ctrl string) (string, error) {\n+ const path = \"/proc/self/cgroup\"\n+ file, err := os.Open(path)\n+ if err != nil {\n+ return \"\", err\n+ }\n+ defer file.Close()\n+\n+ // Per proc(5) -> cgroups(7):\n+ // Each line of /proc/self/cgroups describes a cgroup hierarchy.\n+ scanner := bufio.NewScanner(file)\n+ for scanner.Scan() {\n+ // Each line consists of 3 colon-separated fields. Find the line for\n+ // which the second field (controller-list, a comma-separated list of\n+ // cgroup controllers) contains ctrl.\n+ line := scanner.Text()\n+ const nrfields = 3\n+ fields := strings.Split(line, \":\")\n+ if len(fields) != nrfields {\n+ return \"\", fmt.Errorf(\"failed to parse %s: line %q: got %d fields, wanted %d\", path, line, len(fields), nrfields)\n+ }\n+ for _, controller := range strings.Split(fields[1], \",\") {\n+ if controller == ctrl {\n+ return fields[2], nil\n+ }\n+ }\n+ }\n+ return \"\", fmt.Errorf(\"not a member of a cgroup hierarchy for controller %s\", ctrl)\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/hostmm/hostmm.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package hostmm provides tools for interacting with the host Linux kernel's\n+// virtual memory management subsystem.\n+package hostmm\n+\n+import (\n+ \"fmt\"\n+ \"os\"\n+ \"path\"\n+ \"syscall\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/fd\"\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n+)\n+\n+// NotifyCurrentMemcgPressureCallback requests that f is called whenever the\n+// calling process' memory cgroup indicates memory pressure of the given level,\n+// as specified by Linux's Documentation/cgroup-v1/memory.txt.\n+//\n+// If NotifyCurrentMemcgPressureCallback succeeds, it returns a function that\n+// terminates the requested memory pressure notifications. This function may be\n+// called at most once.\n+func NotifyCurrentMemcgPressureCallback(f func(), level string) (func(), error) {\n+ cgdir, err := currentCgroupDirectory(\"memory\")\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ pressurePath := path.Join(cgdir, \"memory.pressure_level\")\n+ pressureFile, err := os.Open(pressurePath)\n+ if err != nil {\n+ return nil, err\n+ }\n+ defer pressureFile.Close()\n+\n+ eventControlPath := path.Join(cgdir, \"cgroup.event_control\")\n+ eventControlFile, err := os.OpenFile(eventControlPath, os.O_WRONLY, 0)\n+ if err != nil {\n+ return nil, err\n+ }\n+ defer eventControlFile.Close()\n+\n+ eventFD, err := newEventFD()\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ // Don't use fmt.Fprintf since the whole string needs to be written in a\n+ // single syscall.\n+ eventControlStr := fmt.Sprintf(\"%d %d %s\", eventFD.FD(), pressureFile.Fd(), level)\n+ if n, err := eventControlFile.Write([]byte(eventControlStr)); n != len(eventControlStr) || err != nil {\n+ eventFD.Close()\n+ return nil, fmt.Errorf(\"error writing %q to %s: got (%d, %v), wanted (%d, nil)\", eventControlStr, eventControlPath, n, err, len(eventControlStr))\n+ }\n+\n+ log.Debugf(\"Receiving memory pressure level notifications from %s at level %q\", pressurePath, level)\n+ const sizeofUint64 = 8\n+ // The most significant bit of the eventfd value is set by the stop\n+ // function, which is practically unambiguous since it's not plausible for\n+ // 2**63 pressure events to occur between eventfd reads.\n+ const stopVal = 1 << 63\n+ stopCh := make(chan struct{})\n+ go func() { // S/R-SAFE: f provides synchronization if necessary\n+ rw := fd.NewReadWriter(eventFD.FD())\n+ var buf [sizeofUint64]byte\n+ for {\n+ n, err := rw.Read(buf[:])\n+ if err != nil {\n+ if err == syscall.EINTR {\n+ continue\n+ }\n+ panic(fmt.Sprintf(\"failed to read from memory pressure level eventfd: %v\", err))\n+ }\n+ if n != sizeofUint64 {\n+ panic(fmt.Sprintf(\"short read from memory pressure level eventfd: got %d bytes, wanted %d\", n, sizeofUint64))\n+ }\n+ val := usermem.ByteOrder.Uint64(buf[:])\n+ if val >= stopVal {\n+ // Assume this was due to the notifier's \"destructor\" (the\n+ // function returned by NotifyCurrentMemcgPressureCallback\n+ // below) being called.\n+ eventFD.Close()\n+ close(stopCh)\n+ return\n+ }\n+ f()\n+ }\n+ }()\n+ return func() {\n+ rw := fd.NewReadWriter(eventFD.FD())\n+ var buf [sizeofUint64]byte\n+ usermem.ByteOrder.PutUint64(buf[:], stopVal)\n+ for {\n+ n, err := rw.Write(buf[:])\n+ if err != nil {\n+ if err == syscall.EINTR {\n+ continue\n+ }\n+ panic(fmt.Sprintf(\"failed to write to memory pressure level eventfd: %v\", err))\n+ }\n+ if n != sizeofUint64 {\n+ panic(fmt.Sprintf(\"short write to memory pressure level eventfd: got %d bytes, wanted %d\", n, sizeofUint64))\n+ }\n+ break\n+ }\n+ <-stopCh\n+ }, nil\n+}\n+\n+func newEventFD() (*fd.FD, error) {\n+ f, _, e := syscall.Syscall(syscall.SYS_EVENTFD2, 0, 0, 0)\n+ if e != 0 {\n+ return nil, fmt.Errorf(\"failed to create eventfd: %v\", e)\n+ }\n+ return fd.New(int(f)), nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/pgalloc/BUILD",
"new_path": "pkg/sentry/pgalloc/BUILD",
"diff": "@@ -65,6 +65,7 @@ go_library(\n\"//pkg/log\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/context\",\n+ \"//pkg/sentry/hostmm\",\n\"//pkg/sentry/memutil\",\n\"//pkg/sentry/platform\",\n\"//pkg/sentry/safemem\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/pgalloc/pgalloc.go",
"new_path": "pkg/sentry/pgalloc/pgalloc.go",
"diff": "@@ -32,6 +32,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/hostmm\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/safemem\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usage\"\n@@ -162,6 +163,11 @@ type MemoryFile struct {\n// evictionWG counts the number of goroutines currently performing evictions.\nevictionWG sync.WaitGroup\n+\n+ // stopNotifyPressure stops memory cgroup pressure level\n+ // notifications used to drive eviction. stopNotifyPressure is\n+ // immutable.\n+ stopNotifyPressure func()\n}\n// MemoryFileOpts provides options to NewMemoryFile.\n@@ -169,6 +175,11 @@ type MemoryFileOpts struct {\n// DelayedEviction controls the extent to which the MemoryFile may delay\n// eviction of evictable allocations.\nDelayedEviction DelayedEvictionType\n+\n+ // If UseHostMemcgPressure is true, use host memory cgroup pressure level\n+ // notifications to determine when eviction is necessary. This option has\n+ // no effect unless DelayedEviction is DelayedEvictionEnabled.\n+ UseHostMemcgPressure bool\n}\n// DelayedEvictionType is the type of MemoryFileOpts.DelayedEviction.\n@@ -186,9 +197,14 @@ const (\n// evictable allocations until doing so is considered necessary to avoid\n// performance degradation due to host memory pressure, or OOM kills.\n//\n- // As of this writing, DelayedEvictionEnabled delays evictions until the\n- // reclaimer goroutine is out of work (pages to reclaim), then evicts all\n- // pending evictable allocations immediately.\n+ // As of this writing, the behavior of DelayedEvictionEnabled depends on\n+ // whether or not MemoryFileOpts.UseHostMemcgPressure is enabled:\n+ //\n+ // - If UseHostMemcgPressure is true, evictions are delayed until memory\n+ // pressure is indicated.\n+ //\n+ // - Otherwise, evictions are only delayed until the reclaimer goroutine\n+ // is out of work (pages to reclaim).\nDelayedEvictionEnabled\n// DelayedEvictionManual requires that evictable allocations are only\n@@ -292,6 +308,22 @@ func NewMemoryFile(file *os.File, opts MemoryFileOpts) (*MemoryFile, error) {\n}\nf.mappings.Store(make([]uintptr, initialSize/chunkSize))\nf.reclaimCond.L = &f.mu\n+\n+ if f.opts.DelayedEviction == DelayedEvictionEnabled && f.opts.UseHostMemcgPressure {\n+ stop, err := hostmm.NotifyCurrentMemcgPressureCallback(func() {\n+ f.mu.Lock()\n+ startedAny := f.startEvictionsLocked()\n+ f.mu.Unlock()\n+ if startedAny {\n+ log.Debugf(\"pgalloc.MemoryFile performing evictions due to memcg pressure\")\n+ }\n+ }, \"low\")\n+ if err != nil {\n+ return nil, fmt.Errorf(\"failed to configure memcg pressure level notifications: %v\", err)\n+ }\n+ f.stopNotifyPressure = stop\n+ }\n+\ngo f.runReclaim() // S/R-SAFE: f.mu\n// The Linux kernel contains an optional feature called \"Integrity\n@@ -692,12 +724,14 @@ func (f *MemoryFile) MarkEvictable(user EvictableMemoryUser, er EvictableRange)\n// Kick off eviction immediately.\nf.startEvictionGoroutineLocked(user, info)\ncase DelayedEvictionEnabled:\n- // Ensure that the reclaimer goroutine is running, so that it can\n- // start eviction when necessary.\n+ if !f.opts.UseHostMemcgPressure {\n+ // Ensure that the reclaimer goroutine is running, so that it\n+ // can start eviction when necessary.\nf.reclaimCond.Signal()\n}\n}\n}\n+}\n// MarkUnevictable informs f that user no longer considers er to be evictable,\n// so the MemoryFile should no longer call user.Evict(er). Note that, per\n@@ -992,11 +1026,12 @@ func (f *MemoryFile) runReclaim() {\n}\nf.markReclaimed(fr)\n}\n+\n// We only get here if findReclaimable finds f.destroyed set and returns\n// false.\nf.mu.Lock()\n- defer f.mu.Unlock()\nif !f.destroyed {\n+ f.mu.Unlock()\npanic(\"findReclaimable broke out of reclaim loop, but destroyed is no longer set\")\n}\nf.file.Close()\n@@ -1016,6 +1051,13 @@ func (f *MemoryFile) runReclaim() {\n}\n// Similarly, invalidate f.mappings. (atomic.Value.Store(nil) panics.)\nf.mappings.Store([]uintptr{})\n+ f.mu.Unlock()\n+\n+ // This must be called without holding f.mu to avoid circular lock\n+ // ordering.\n+ if f.stopNotifyPressure != nil {\n+ f.stopNotifyPressure()\n+ }\n}\nfunc (f *MemoryFile) findReclaimable() (platform.FileRange, bool) {\n@@ -1029,7 +1071,7 @@ func (f *MemoryFile) findReclaimable() (platform.FileRange, bool) {\nif f.reclaimable {\nbreak\n}\n- if f.opts.DelayedEviction == DelayedEvictionEnabled {\n+ if f.opts.DelayedEviction == DelayedEvictionEnabled && !f.opts.UseHostMemcgPressure {\n// No work to do. Evict any pending evictable allocations to\n// get more reclaimable pages before going to sleep.\nf.startEvictionsLocked()\n@@ -1089,14 +1131,17 @@ func (f *MemoryFile) StartEvictions() {\n}\n// Preconditions: f.mu must be locked.\n-func (f *MemoryFile) startEvictionsLocked() {\n+func (f *MemoryFile) startEvictionsLocked() bool {\n+ startedAny := false\nfor user, info := range f.evictable {\n// Don't start multiple goroutines to evict the same user's\n// allocations.\nif !info.evicting {\nf.startEvictionGoroutineLocked(user, info)\n+ startedAny = true\n}\n}\n+ return startedAny\n}\n// Preconditions: info == f.evictable[user]. !info.evicting. f.mu must be\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -424,6 +424,9 @@ func createMemoryFile() (*pgalloc.MemoryFile, error) {\nreturn nil, fmt.Errorf(\"error creating memfd: %v\", err)\n}\nmemfile := os.NewFile(uintptr(memfd), memfileName)\n+ // We can't enable pgalloc.MemoryFileOpts.UseHostMemcgPressure even if\n+ // there are memory cgroups specified, because at this point we're already\n+ // in a mount namespace in which the relevant cgroupfs is not visible.\nmf, err := pgalloc.NewMemoryFile(memfile, pgalloc.MemoryFileOpts{})\nif err != nil {\nmemfile.Close()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement reclaim-driven MemoryFile eviction.
PiperOrigin-RevId: 251950660 |
259,885 | 06.06.2019 16:27:09 | 25,200 | b3f104507d7a04c0ca058cbcacc5ff78d853f4ba | "Implement" mbind(2).
We still only advertise a single NUMA node, and ignore mempolicy
accordingly, but mbind() at least now succeeds and has effects reflected
by get_mempolicy().
Also fix handling of nodemasks: round sizes to unsigned long (as
documented and done by Linux), and zero trailing bits when copying them
out. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/mm.go",
"new_path": "pkg/abi/linux/mm.go",
"diff": "@@ -114,3 +114,12 @@ const (\nMPOL_MODE_FLAGS = (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)\n)\n+\n+// Flags for mbind(2).\n+const (\n+ MPOL_MF_STRICT = 1 << 0\n+ MPOL_MF_MOVE = 1 << 1\n+ MPOL_MF_MOVE_ALL = 1 << 2\n+\n+ MPOL_MF_VALID = MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task.go",
"new_path": "pkg/sentry/kernel/task.go",
"diff": "@@ -455,12 +455,13 @@ type Task struct {\n// single numa node, all policies are no-ops. We only track this information\n// so that we can return reasonable values if the application calls\n// get_mempolicy(2) after setting a non-default policy. Note that in the\n- // real syscall, nodemask can be longer than 4 bytes, but we always report a\n- // single node so never need to save more than a single bit.\n+ // real syscall, nodemask can be longer than a single unsigned long, but we\n+ // always report a single node so never need to save more than a single\n+ // bit.\n//\n// numaPolicy and numaNodeMask are protected by mu.\nnumaPolicy int32\n- numaNodeMask uint32\n+ numaNodeMask uint64\n// If netns is true, the task is in a non-root network namespace. Network\n// namespaces aren't currently implemented in full; being in a network\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_sched.go",
"new_path": "pkg/sentry/kernel/task_sched.go",
"diff": "@@ -622,14 +622,14 @@ func (t *Task) SetNiceness(n int) {\n}\n// NumaPolicy returns t's current numa policy.\n-func (t *Task) NumaPolicy() (policy int32, nodeMask uint32) {\n+func (t *Task) NumaPolicy() (policy int32, nodeMask uint64) {\nt.mu.Lock()\ndefer t.mu.Unlock()\nreturn t.numaPolicy, t.numaNodeMask\n}\n// SetNumaPolicy sets t's numa policy.\n-func (t *Task) SetNumaPolicy(policy int32, nodeMask uint32) {\n+func (t *Task) SetNumaPolicy(policy int32, nodeMask uint64) {\nt.mu.Lock()\ndefer t.mu.Unlock()\nt.numaPolicy = policy\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/mm.go",
"new_path": "pkg/sentry/mm/mm.go",
"diff": "@@ -276,6 +276,12 @@ type vma struct {\nmlockMode memmap.MLockMode\n+ // numaPolicy is the NUMA policy for this vma set by mbind().\n+ numaPolicy int32\n+\n+ // numaNodemask is the NUMA nodemask for this vma set by mbind().\n+ numaNodemask uint64\n+\n// If id is not nil, it controls the lifecycle of mappable and provides vma\n// metadata shown in /proc/[pid]/maps, and the vma holds a reference.\nid memmap.MappingIdentity\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/syscalls.go",
"new_path": "pkg/sentry/mm/syscalls.go",
"diff": "@@ -973,6 +973,59 @@ func (mm *MemoryManager) MLockAll(ctx context.Context, opts MLockAllOpts) error\nreturn nil\n}\n+// NumaPolicy implements the semantics of Linux's get_mempolicy(MPOL_F_ADDR).\n+func (mm *MemoryManager) NumaPolicy(addr usermem.Addr) (int32, uint64, error) {\n+ mm.mappingMu.RLock()\n+ defer mm.mappingMu.RUnlock()\n+ vseg := mm.vmas.FindSegment(addr)\n+ if !vseg.Ok() {\n+ return 0, 0, syserror.EFAULT\n+ }\n+ vma := vseg.ValuePtr()\n+ return vma.numaPolicy, vma.numaNodemask, nil\n+}\n+\n+// SetNumaPolicy implements the semantics of Linux's mbind().\n+func (mm *MemoryManager) SetNumaPolicy(addr usermem.Addr, length uint64, policy int32, nodemask uint64) error {\n+ if !addr.IsPageAligned() {\n+ return syserror.EINVAL\n+ }\n+ // Linux allows this to overflow.\n+ la, _ := usermem.Addr(length).RoundUp()\n+ ar, ok := addr.ToRange(uint64(la))\n+ if !ok {\n+ return syserror.EINVAL\n+ }\n+ if ar.Length() == 0 {\n+ return nil\n+ }\n+\n+ mm.mappingMu.Lock()\n+ defer mm.mappingMu.Unlock()\n+ defer func() {\n+ mm.vmas.MergeRange(ar)\n+ mm.vmas.MergeAdjacent(ar)\n+ }()\n+ vseg := mm.vmas.LowerBoundSegment(ar.Start)\n+ lastEnd := ar.Start\n+ for {\n+ if !vseg.Ok() || lastEnd < vseg.Start() {\n+ // \"EFAULT: ... there was an unmapped hole in the specified memory\n+ // range specified [sic] by addr and len.\" - mbind(2)\n+ return syserror.EFAULT\n+ }\n+ vseg = mm.vmas.Isolate(vseg, ar)\n+ vma := vseg.ValuePtr()\n+ vma.numaPolicy = policy\n+ vma.numaNodemask = nodemask\n+ lastEnd = vseg.End()\n+ if ar.End <= lastEnd {\n+ return nil\n+ }\n+ vseg, _ = vseg.NextNonEmpty()\n+ }\n+}\n+\n// Decommit implements the semantics of Linux's madvise(MADV_DONTNEED).\nfunc (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error {\nar, ok := addr.ToRange(length)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/vma.go",
"new_path": "pkg/sentry/mm/vma.go",
"diff": "@@ -107,6 +107,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp\nprivate: opts.Private,\ngrowsDown: opts.GrowsDown,\nmlockMode: opts.MLockMode,\n+ numaPolicy: linux.MPOL_DEFAULT,\nid: opts.MappingIdentity,\nhint: opts.Hint,\n}\n@@ -436,6 +437,8 @@ func (vmaSetFunctions) Merge(ar1 usermem.AddrRange, vma1 vma, ar2 usermem.AddrRa\nvma1.private != vma2.private ||\nvma1.growsDown != vma2.growsDown ||\nvma1.mlockMode != vma2.mlockMode ||\n+ vma1.numaPolicy != vma2.numaPolicy ||\n+ vma1.numaNodemask != vma2.numaNodemask ||\nvma1.id != vma2.id ||\nvma1.hint != vma2.hint {\nreturn vma{}, false\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/BUILD",
"new_path": "pkg/sentry/syscalls/linux/BUILD",
"diff": "@@ -19,6 +19,7 @@ go_library(\n\"sys_identity.go\",\n\"sys_inotify.go\",\n\"sys_lseek.go\",\n+ \"sys_mempolicy.go\",\n\"sys_mmap.go\",\n\"sys_mount.go\",\n\"sys_pipe.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64.go",
"diff": "@@ -360,8 +360,7 @@ var AMD64 = &kernel.SyscallTable{\n235: Utimes,\n// @Syscall(Vserver, note:Not implemented by Linux)\n236: syscalls.Error(syscall.ENOSYS), // Vserver, not implemented by Linux\n- // @Syscall(Mbind, returns:EPERM or ENOSYS, note:Returns EPERM if the process does not have cap_sys_nice; ENOSYS otherwise), TODO(b/117792295)\n- 237: syscalls.CapError(linux.CAP_SYS_NICE), // may require cap_sys_nice\n+ 237: Mbind,\n238: SetMempolicy,\n239: GetMempolicy,\n// 240: @Syscall(MqOpen), TODO(b/29354921)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/syscalls/linux/sys_mempolicy.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+import (\n+ \"fmt\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n+ \"gvisor.googlesource.com/gvisor/pkg/syserror\"\n+)\n+\n+// We unconditionally report a single NUMA node. This also means that our\n+// \"nodemask_t\" is a single unsigned long (uint64).\n+const (\n+ maxNodes = 1\n+ allowedNodemask = (1 << maxNodes) - 1\n+)\n+\n+func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64, error) {\n+ // \"nodemask points to a bit mask of node IDs that contains up to maxnode\n+ // bits. The bit mask size is rounded to the next multiple of\n+ // sizeof(unsigned long), but the kernel will use bits only up to maxnode.\n+ // A NULL value of nodemask or a maxnode value of zero specifies the empty\n+ // set of nodes. If the value of maxnode is zero, the nodemask argument is\n+ // ignored.\" - set_mempolicy(2). Unfortunately, most of this is inaccurate\n+ // because of what appears to be a bug: mm/mempolicy.c:get_nodes() uses\n+ // maxnode-1, not maxnode, as the number of bits.\n+ bits := maxnode - 1\n+ if bits > usermem.PageSize*8 { // also handles overflow from maxnode == 0\n+ return 0, syserror.EINVAL\n+ }\n+ if bits == 0 {\n+ return 0, nil\n+ }\n+ // Copy in the whole nodemask.\n+ numUint64 := (bits + 63) / 64\n+ buf := t.CopyScratchBuffer(int(numUint64) * 8)\n+ if _, err := t.CopyInBytes(addr, buf); err != nil {\n+ return 0, err\n+ }\n+ val := usermem.ByteOrder.Uint64(buf)\n+ // Check that only allowed bits in the first unsigned long in the nodemask\n+ // are set.\n+ if val&^allowedNodemask != 0 {\n+ return 0, syserror.EINVAL\n+ }\n+ // Check that all remaining bits in the nodemask are 0.\n+ for i := 8; i < len(buf); i++ {\n+ if buf[i] != 0 {\n+ return 0, syserror.EINVAL\n+ }\n+ }\n+ return val, nil\n+}\n+\n+func copyOutNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32, val uint64) error {\n+ // mm/mempolicy.c:copy_nodes_to_user() also uses maxnode-1 as the number of\n+ // bits.\n+ bits := maxnode - 1\n+ if bits > usermem.PageSize*8 { // also handles overflow from maxnode == 0\n+ return syserror.EINVAL\n+ }\n+ if bits == 0 {\n+ return nil\n+ }\n+ // Copy out the first unsigned long in the nodemask.\n+ buf := t.CopyScratchBuffer(8)\n+ usermem.ByteOrder.PutUint64(buf, val)\n+ if _, err := t.CopyOutBytes(addr, buf); err != nil {\n+ return err\n+ }\n+ // Zero out remaining unsigned longs in the nodemask.\n+ if bits > 64 {\n+ remAddr, ok := addr.AddLength(8)\n+ if !ok {\n+ return syserror.EFAULT\n+ }\n+ remUint64 := (bits - 1) / 64\n+ if _, err := t.MemoryManager().ZeroOut(t, remAddr, int64(remUint64)*8, usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ }); err != nil {\n+ return err\n+ }\n+ }\n+ return nil\n+}\n+\n+// GetMempolicy implements the syscall get_mempolicy(2).\n+func GetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ mode := args[0].Pointer()\n+ nodemask := args[1].Pointer()\n+ maxnode := args[2].Uint()\n+ addr := args[3].Pointer()\n+ flags := args[4].Uint()\n+\n+ if flags&^(linux.MPOL_F_NODE|linux.MPOL_F_ADDR|linux.MPOL_F_MEMS_ALLOWED) != 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ nodeFlag := flags&linux.MPOL_F_NODE != 0\n+ addrFlag := flags&linux.MPOL_F_ADDR != 0\n+ memsAllowed := flags&linux.MPOL_F_MEMS_ALLOWED != 0\n+\n+ // \"EINVAL: The value specified by maxnode is less than the number of node\n+ // IDs supported by the system.\" - get_mempolicy(2)\n+ if nodemask != 0 && maxnode < maxNodes {\n+ return 0, nil, syserror.EINVAL\n+ }\n+\n+ // \"If flags specifies MPOL_F_MEMS_ALLOWED [...], the mode argument is\n+ // ignored and the set of nodes (memories) that the thread is allowed to\n+ // specify in subsequent calls to mbind(2) or set_mempolicy(2) (in the\n+ // absence of any mode flags) is returned in nodemask.\"\n+ if memsAllowed {\n+ // \"It is not permitted to combine MPOL_F_MEMS_ALLOWED with either\n+ // MPOL_F_ADDR or MPOL_F_NODE.\"\n+ if nodeFlag || addrFlag {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ if err := copyOutNodemask(t, nodemask, maxnode, allowedNodemask); err != nil {\n+ return 0, nil, err\n+ }\n+ return 0, nil, nil\n+ }\n+\n+ // \"If flags specifies MPOL_F_ADDR, then information is returned about the\n+ // policy governing the memory address given in addr. ... If the mode\n+ // argument is not NULL, then get_mempolicy() will store the policy mode\n+ // and any optional mode flags of the requested NUMA policy in the location\n+ // pointed to by this argument. If nodemask is not NULL, then the nodemask\n+ // associated with the policy will be stored in the location pointed to by\n+ // this argument.\"\n+ if addrFlag {\n+ policy, nodemaskVal, err := t.MemoryManager().NumaPolicy(addr)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+ if nodeFlag {\n+ // \"If flags specifies both MPOL_F_NODE and MPOL_F_ADDR,\n+ // get_mempolicy() will return the node ID of the node on which the\n+ // address addr is allocated into the location pointed to by mode.\n+ // If no page has yet been allocated for the specified address,\n+ // get_mempolicy() will allocate a page as if the thread had\n+ // performed a read (load) access to that address, and return the\n+ // ID of the node where that page was allocated.\"\n+ buf := t.CopyScratchBuffer(1)\n+ _, err := t.CopyInBytes(addr, buf)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+ policy = 0 // maxNodes == 1\n+ }\n+ if mode != 0 {\n+ if _, err := t.CopyOut(mode, policy); err != nil {\n+ return 0, nil, err\n+ }\n+ }\n+ if nodemask != 0 {\n+ if err := copyOutNodemask(t, nodemask, maxnode, nodemaskVal); err != nil {\n+ return 0, nil, err\n+ }\n+ }\n+ return 0, nil, nil\n+ }\n+\n+ // \"EINVAL: ... flags specified MPOL_F_ADDR and addr is NULL, or flags did\n+ // not specify MPOL_F_ADDR and addr is not NULL.\" This is partially\n+ // inaccurate: if flags specifies MPOL_F_ADDR,\n+ // mm/mempolicy.c:do_get_mempolicy() doesn't special-case NULL; it will\n+ // just (usually) fail to find a VMA at address 0 and return EFAULT.\n+ if addr != 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+\n+ // \"If flags is specified as 0, then information about the calling thread's\n+ // default policy (as set by set_mempolicy(2)) is returned, in the buffers\n+ // pointed to by mode and nodemask. ... If flags specifies MPOL_F_NODE, but\n+ // not MPOL_F_ADDR, and the thread's current policy is MPOL_INTERLEAVE,\n+ // then get_mempolicy() will return in the location pointed to by a\n+ // non-NULL mode argument, the node ID of the next node that will be used\n+ // for interleaving of internal kernel pages allocated on behalf of the\n+ // thread.\"\n+ policy, nodemaskVal := t.NumaPolicy()\n+ if nodeFlag {\n+ if policy&^linux.MPOL_MODE_FLAGS != linux.MPOL_INTERLEAVE {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ policy = 0 // maxNodes == 1\n+ }\n+ if mode != 0 {\n+ if _, err := t.CopyOut(mode, policy); err != nil {\n+ return 0, nil, err\n+ }\n+ }\n+ if nodemask != 0 {\n+ if err := copyOutNodemask(t, nodemask, maxnode, nodemaskVal); err != nil {\n+ return 0, nil, err\n+ }\n+ }\n+ return 0, nil, nil\n+}\n+\n+// SetMempolicy implements the syscall set_mempolicy(2).\n+func SetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ modeWithFlags := args[0].Int()\n+ nodemask := args[1].Pointer()\n+ maxnode := args[2].Uint()\n+\n+ modeWithFlags, nodemaskVal, err := copyInMempolicyNodemask(t, modeWithFlags, nodemask, maxnode)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+\n+ t.SetNumaPolicy(modeWithFlags, nodemaskVal)\n+ return 0, nil, nil\n+}\n+\n+// Mbind implements the syscall mbind(2).\n+func Mbind(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ addr := args[0].Pointer()\n+ length := args[1].Uint64()\n+ mode := args[2].Int()\n+ nodemask := args[3].Pointer()\n+ maxnode := args[4].Uint()\n+ flags := args[5].Uint()\n+\n+ if flags&^linux.MPOL_MF_VALID != 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ // \"If MPOL_MF_MOVE_ALL is passed in flags ... [the] calling thread must be\n+ // privileged (CAP_SYS_NICE) to use this flag.\" - mbind(2)\n+ if flags&linux.MPOL_MF_MOVE_ALL != 0 && !t.HasCapability(linux.CAP_SYS_NICE) {\n+ return 0, nil, syserror.EPERM\n+ }\n+\n+ mode, nodemaskVal, err := copyInMempolicyNodemask(t, mode, nodemask, maxnode)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+\n+ // Since we claim to have only a single node, all flags can be ignored\n+ // (since all pages must already be on that single node).\n+ err = t.MemoryManager().SetNumaPolicy(addr, length, mode, nodemaskVal)\n+ return 0, nil, err\n+}\n+\n+func copyInMempolicyNodemask(t *kernel.Task, modeWithFlags int32, nodemask usermem.Addr, maxnode uint32) (int32, uint64, error) {\n+ flags := modeWithFlags & linux.MPOL_MODE_FLAGS\n+ mode := modeWithFlags &^ linux.MPOL_MODE_FLAGS\n+ if flags == linux.MPOL_MODE_FLAGS {\n+ // Can't specify both mode flags simultaneously.\n+ return 0, 0, syserror.EINVAL\n+ }\n+ if mode < 0 || mode >= linux.MPOL_MAX {\n+ // Must specify a valid mode.\n+ return 0, 0, syserror.EINVAL\n+ }\n+\n+ var nodemaskVal uint64\n+ if nodemask != 0 {\n+ var err error\n+ nodemaskVal, err = copyInNodemask(t, nodemask, maxnode)\n+ if err != nil {\n+ return 0, 0, err\n+ }\n+ }\n+\n+ switch mode {\n+ case linux.MPOL_DEFAULT:\n+ // \"nodemask must be specified as NULL.\" - set_mempolicy(2). This is inaccurate;\n+ // Linux allows a nodemask to be specified, as long as it is empty.\n+ if nodemaskVal != 0 {\n+ return 0, 0, syserror.EINVAL\n+ }\n+ case linux.MPOL_BIND, linux.MPOL_INTERLEAVE:\n+ // These require a non-empty nodemask.\n+ if nodemaskVal == 0 {\n+ return 0, 0, syserror.EINVAL\n+ }\n+ case linux.MPOL_PREFERRED:\n+ // This permits an empty nodemask, as long as no flags are set.\n+ if nodemaskVal == 0 && flags != 0 {\n+ return 0, 0, syserror.EINVAL\n+ }\n+ case linux.MPOL_LOCAL:\n+ // This requires an empty nodemask and no flags set ...\n+ if nodemaskVal != 0 || flags != 0 {\n+ return 0, 0, syserror.EINVAL\n+ }\n+ // ... and is implemented as MPOL_PREFERRED.\n+ mode = linux.MPOL_PREFERRED\n+ default:\n+ // Unknown mode, which we should have rejected above.\n+ panic(fmt.Sprintf(\"unknown mode: %v\", mode))\n+ }\n+\n+ return mode | flags, nodemaskVal, nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_mmap.go",
"new_path": "pkg/sentry/syscalls/linux/sys_mmap.go",
"diff": "@@ -204,151 +204,6 @@ func Madvise(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca\n}\n}\n-func copyOutIfNotNull(t *kernel.Task, ptr usermem.Addr, val interface{}) (int, error) {\n- if ptr != 0 {\n- return t.CopyOut(ptr, val)\n- }\n- return 0, nil\n-}\n-\n-// GetMempolicy implements the syscall get_mempolicy(2).\n-func GetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n- mode := args[0].Pointer()\n- nodemask := args[1].Pointer()\n- maxnode := args[2].Uint()\n- addr := args[3].Pointer()\n- flags := args[4].Uint()\n-\n- memsAllowed := flags&linux.MPOL_F_MEMS_ALLOWED != 0\n- nodeFlag := flags&linux.MPOL_F_NODE != 0\n- addrFlag := flags&linux.MPOL_F_ADDR != 0\n-\n- // TODO(rahat): Once sysfs is implemented, report a single numa node in\n- // /sys/devices/system/node.\n- if nodemask != 0 && maxnode < 1 {\n- return 0, nil, syserror.EINVAL\n- }\n-\n- // 'addr' provided iff 'addrFlag' set.\n- if addrFlag == (addr == 0) {\n- return 0, nil, syserror.EINVAL\n- }\n-\n- // Default policy for the thread.\n- if flags == 0 {\n- policy, nodemaskVal := t.NumaPolicy()\n- if _, err := copyOutIfNotNull(t, mode, policy); err != nil {\n- return 0, nil, syserror.EFAULT\n- }\n- if _, err := copyOutIfNotNull(t, nodemask, nodemaskVal); err != nil {\n- return 0, nil, syserror.EFAULT\n- }\n- return 0, nil, nil\n- }\n-\n- // Report all nodes available to caller.\n- if memsAllowed {\n- // MPOL_F_NODE and MPOL_F_ADDR not allowed with MPOL_F_MEMS_ALLOWED.\n- if nodeFlag || addrFlag {\n- return 0, nil, syserror.EINVAL\n- }\n-\n- // Report a single numa node.\n- if _, err := copyOutIfNotNull(t, nodemask, uint32(0x1)); err != nil {\n- return 0, nil, syserror.EFAULT\n- }\n- return 0, nil, nil\n- }\n-\n- if addrFlag {\n- if nodeFlag {\n- // Return the id for the node where 'addr' resides, via 'mode'.\n- //\n- // The real get_mempolicy(2) allocates the page referenced by 'addr'\n- // by simulating a read, if it is unallocated before the call. It\n- // then returns the node the page is allocated on through the mode\n- // pointer.\n- b := t.CopyScratchBuffer(1)\n- _, err := t.CopyInBytes(addr, b)\n- if err != nil {\n- return 0, nil, syserror.EFAULT\n- }\n- if _, err := copyOutIfNotNull(t, mode, int32(0)); err != nil {\n- return 0, nil, syserror.EFAULT\n- }\n- } else {\n- storedPolicy, _ := t.NumaPolicy()\n- // Return the policy governing the memory referenced by 'addr'.\n- if _, err := copyOutIfNotNull(t, mode, int32(storedPolicy)); err != nil {\n- return 0, nil, syserror.EFAULT\n- }\n- }\n- return 0, nil, nil\n- }\n-\n- storedPolicy, _ := t.NumaPolicy()\n- if nodeFlag && (storedPolicy&^linux.MPOL_MODE_FLAGS == linux.MPOL_INTERLEAVE) {\n- // Policy for current thread is to interleave memory between\n- // nodes. Return the next node we'll allocate on. Since we only have a\n- // single node, this is always node 0.\n- if _, err := copyOutIfNotNull(t, mode, int32(0)); err != nil {\n- return 0, nil, syserror.EFAULT\n- }\n- return 0, nil, nil\n- }\n-\n- return 0, nil, syserror.EINVAL\n-}\n-\n-func allowedNodesMask() uint32 {\n- const maxNodes = 1\n- return ^uint32((1 << maxNodes) - 1)\n-}\n-\n-// SetMempolicy implements the syscall set_mempolicy(2).\n-func SetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n- modeWithFlags := args[0].Int()\n- nodemask := args[1].Pointer()\n- maxnode := args[2].Uint()\n-\n- if nodemask != 0 && maxnode < 1 {\n- return 0, nil, syserror.EINVAL\n- }\n-\n- if modeWithFlags&linux.MPOL_MODE_FLAGS == linux.MPOL_MODE_FLAGS {\n- // Can't specify multiple modes simultaneously.\n- return 0, nil, syserror.EINVAL\n- }\n-\n- mode := modeWithFlags &^ linux.MPOL_MODE_FLAGS\n- if mode < 0 || mode >= linux.MPOL_MAX {\n- // Must specify a valid mode.\n- return 0, nil, syserror.EINVAL\n- }\n-\n- var nodemaskVal uint32\n- // Nodemask may be empty for some policy modes.\n- if nodemask != 0 && maxnode > 0 {\n- if _, err := t.CopyIn(nodemask, &nodemaskVal); err != nil {\n- return 0, nil, syserror.EFAULT\n- }\n- }\n-\n- if (mode == linux.MPOL_INTERLEAVE || mode == linux.MPOL_BIND) && nodemaskVal == 0 {\n- // Mode requires a non-empty nodemask, but got an empty nodemask.\n- return 0, nil, syserror.EINVAL\n- }\n-\n- if nodemaskVal&allowedNodesMask() != 0 {\n- // Invalid node specified.\n- return 0, nil, syserror.EINVAL\n- }\n-\n- t.SetNumaPolicy(int32(modeWithFlags), nodemaskVal)\n-\n- return 0, nil, nil\n-}\n-\n// Mincore implements the syscall mincore(2).\nfunc Mincore(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\naddr := args[0].Pointer()\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -999,6 +999,7 @@ cc_binary(\nlinkstatic = 1,\ndeps = [\n\"//test/util:cleanup\",\n+ \"//test/util:memory_util\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n\"//test/util:thread_util\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/mempolicy.cc",
"new_path": "test/syscalls/linux/mempolicy.cc",
"diff": "#include \"gtest/gtest.h\"\n#include \"absl/memory/memory.h\"\n#include \"test/util/cleanup.h\"\n+#include \"test/util/memory_util.h\"\n#include \"test/util/test_util.h\"\n#include \"test/util/thread_util.h\"\n@@ -34,7 +35,7 @@ namespace {\n#define MPOL_PREFERRED 1\n#define MPOL_BIND 2\n#define MPOL_INTERLEAVE 3\n-#define MPOL_MAX MPOL_INTERLEAVE\n+#define MPOL_LOCAL 4\n#define MPOL_F_NODE (1 << 0)\n#define MPOL_F_ADDR (1 << 1)\n#define MPOL_F_MEMS_ALLOWED (1 << 2)\n@@ -44,11 +45,17 @@ namespace {\nint get_mempolicy(int *policy, uint64_t *nmask, uint64_t maxnode, void *addr,\nint flags) {\n- return syscall(__NR_get_mempolicy, policy, nmask, maxnode, addr, flags);\n+ return syscall(SYS_get_mempolicy, policy, nmask, maxnode, addr, flags);\n}\nint set_mempolicy(int mode, uint64_t *nmask, uint64_t maxnode) {\n- return syscall(__NR_set_mempolicy, mode, nmask, maxnode);\n+ return syscall(SYS_set_mempolicy, mode, nmask, maxnode);\n+}\n+\n+int mbind(void *addr, unsigned long len, int mode,\n+ const unsigned long *nodemask, unsigned long maxnode,\n+ unsigned flags) {\n+ return syscall(SYS_mbind, addr, len, mode, nodemask, maxnode, flags);\n}\n// Creates a cleanup object that resets the calling thread's mempolicy to the\n@@ -252,6 +259,30 @@ TEST(MempolicyTest, GetMempolicyNextInterleaveNode) {\nEXPECT_EQ(0, mode);\n}\n+TEST(MempolicyTest, Mbind) {\n+ // Temporarily set the thread policy to MPOL_PREFERRED.\n+ const auto cleanup_thread_policy =\n+ ASSERT_NO_ERRNO_AND_VALUE(ScopedSetMempolicy(MPOL_PREFERRED, nullptr, 0));\n+\n+ const auto mapping = ASSERT_NO_ERRNO_AND_VALUE(\n+ MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS));\n+\n+ // vmas default to MPOL_DEFAULT irrespective of the thread policy (currently\n+ // MPOL_PREFERRED).\n+ int mode;\n+ ASSERT_THAT(get_mempolicy(&mode, nullptr, 0, mapping.ptr(), MPOL_F_ADDR),\n+ SyscallSucceeds());\n+ EXPECT_EQ(mode, MPOL_DEFAULT);\n+\n+ // Set MPOL_PREFERRED for the vma and read it back.\n+ ASSERT_THAT(\n+ mbind(mapping.ptr(), mapping.len(), MPOL_PREFERRED, nullptr, 0, 0),\n+ SyscallSucceeds());\n+ ASSERT_THAT(get_mempolicy(&mode, nullptr, 0, mapping.ptr(), MPOL_F_ADDR),\n+ SyscallSucceeds());\n+ EXPECT_EQ(mode, MPOL_PREFERRED);\n+}\n+\n} // namespace\n} // namespace testing\n"
}
] | Go | Apache License 2.0 | google/gvisor | "Implement" mbind(2).
We still only advertise a single NUMA node, and ignore mempolicy
accordingly, but mbind() at least now succeeds and has effects reflected
by get_mempolicy().
Also fix handling of nodemasks: round sizes to unsigned long (as
documented and done by Linux), and zero trailing bits when copying them
out.
PiperOrigin-RevId: 251950859 |
259,884 | 06.06.2019 16:57:18 | 25,200 | 6a4c0065642922c157511fa2cd3feea85cb7c44b | Add the gVisor gitter badge to the README
Moves the build badge to just below the logo and adds the gitter badge next to
it for consistency. | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "\n+[](https://storage.googleapis.com/gvisor-build-badges/build.html)\n+[](https://gitter.im/gvisor/community)\n+\n## What is gVisor?\n**gVisor** is a user-space kernel, written in Go, that implements a substantial\n@@ -36,8 +39,6 @@ be found at [gvisor.dev][gvisor-dev].\n## Installing from source\n-[](https://storage.googleapis.com/gvisor-build-badges/build.html)\n-\ngVisor currently requires x86\\_64 Linux to build, though support for other\narchitectures may become available in the future.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add the gVisor gitter badge to the README
Moves the build badge to just below the logo and adds the gitter badge next to
it for consistency.
PiperOrigin-RevId: 251956383 |
259,992 | 06.06.2019 17:48:53 | 25,200 | 2e43dcb26b4ccbc4d4f314be61806a82f073a50e | Add alsologtostderr option
When set sends log messages to the error log:
sudo ./runsc --logtostderr do ls
I0531 17:59:58.105064 144564 x:0] ***************************
I0531 17:59:58.105087 144564 x:0] Args: [runsc --logtostderr do ls]
I0531 17:59:58.105112 144564 x:0] PID: 144564
I0531 17:59:58.105125 144564 x:0] UID: 0, GID: 0
[...] | [
{
"change_type": "MODIFY",
"old_path": "runsc/main.go",
"new_path": "runsc/main.go",
"diff": "@@ -53,6 +53,7 @@ var (\nlogFD = flag.Int(\"log-fd\", -1, \"file descriptor to log to. If set, the 'log' flag is ignored.\")\ndebugLogFD = flag.Int(\"debug-log-fd\", -1, \"file descriptor to write debug logs to. If set, the 'debug-log-dir' flag is ignored.\")\ndebugLogFormat = flag.String(\"debug-log-format\", \"text\", \"log format: text (default), json, or json-k8s\")\n+ alsoLogToStderr = flag.Bool(\"alsologtostderr\", false, \"send log messages to stderr\")\n// Debugging flags: strace related\nstrace = flag.Bool(\"strace\", false, \"enable strace\")\n@@ -228,6 +229,10 @@ func main() {\ne = newEmitter(\"text\", ioutil.Discard)\n}\n+ if *alsoLogToStderr {\n+ e = log.MultiEmitter{e, newEmitter(*debugLogFormat, os.Stderr)}\n+ }\n+\nlog.SetTarget(e)\nlog.Infof(\"***************************\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add alsologtostderr option
When set sends log messages to the error log:
sudo ./runsc --logtostderr do ls
I0531 17:59:58.105064 144564 x:0] ***************************
I0531 17:59:58.105087 144564 x:0] Args: [runsc --logtostderr do ls]
I0531 17:59:58.105112 144564 x:0] PID: 144564
I0531 17:59:58.105125 144564 x:0] UID: 0, GID: 0
[...]
PiperOrigin-RevId: 251964377 |
259,885 | 06.06.2019 17:57:42 | 25,200 | c933f3eede5634bf778dfb757fb68d927a43a7a8 | Change visibility of //pkg/sentry/time. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/time/BUILD",
"new_path": "pkg/sentry/time/BUILD",
"diff": "@@ -32,7 +32,7 @@ go_library(\n\"tsc_arm64.s\",\n],\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/sentry/time\",\n- visibility = [\"//pkg/sentry:internal\"],\n+ visibility = [\"//:sandbox\"],\ndeps = [\n\"//pkg/log\",\n\"//pkg/metric\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Change visibility of //pkg/sentry/time.
PiperOrigin-RevId: 251965598 |
259,858 | 06.06.2019 22:08:49 | 25,200 | e5fb3aab122c546441c595c2135a273468c5a997 | BUILD: Use runsc to generate version
This also ensures BUILD files are correctly formatted. | [
{
"change_type": "MODIFY",
"old_path": "runsc/BUILD",
"new_path": "runsc/BUILD",
"diff": "-package(\n- licenses = [\"notice\"], # Apache 2.0\n-)\n+package(licenses = [\"notice\"]) # Apache 2.0\nload(\"@io_bazel_rules_go//go:def.bzl\", \"go_binary\")\nload(\"@bazel_tools//tools/build_defs/pkg:pkg.bzl\", \"pkg_deb\", \"pkg_tar\")\n@@ -84,8 +82,9 @@ pkg_tar(\ngenrule(\nname = \"deb-version\",\nouts = [\"version.txt\"],\n- cmd = \"cat bazel-out/volatile-status.txt | grep VERSION | sed 's/^[^0-9]*//' >$@\",\n+ cmd = \"$(location :runsc) -version | head -n 1 | sed 's/^[^0-9]*//' > $@\",\nstamp = 1,\n+ tools = [\":runsc\"],\n)\npkg_deb(\n@@ -98,4 +97,7 @@ pkg_deb(\npackage = \"runsc\",\npostinst = \"debian/postinst.sh\",\nversion_file = \":version.txt\",\n+ visibility = [\n+ \"//visibility:public\",\n+ ],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/BUILD",
"new_path": "test/BUILD",
"diff": "-# gVisor is a general-purpose sandbox.\n-\n-package(licenses = [\"notice\"])\n-\n-exports_files([\"LICENSE\"])\n+package(licenses = [\"notice\"]) # Apache 2.0\n# We need to define a bazel platform and toolchain to specify dockerPrivileged\n# and dockerRunAsRoot options, they are required to run tests on the RBE\n"
}
] | Go | Apache License 2.0 | google/gvisor | BUILD: Use runsc to generate version
This also ensures BUILD files are correctly formatted.
PiperOrigin-RevId: 251990267 |
259,891 | 07.06.2019 12:54:53 | 25,200 | 8afbd974da2483d8f81e3abde5c9d689719263cb | Address Ian's comments. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/iptables/iptables.go",
"new_path": "pkg/tcpip/iptables/iptables.go",
"diff": "@@ -34,9 +34,9 @@ const (\n// all packets.\nfunc DefaultTables() *IPTables {\ntables := IPTables{\n- Tables: map[string]*Table{\n- tablenameNat: &Table{\n- BuiltinChains: map[Hook]*Chain{\n+ Tables: map[string]Table{\n+ tablenameNat: Table{\n+ BuiltinChains: map[Hook]Chain{\nPrerouting: unconditionalAcceptChain(chainNamePrerouting),\nInput: unconditionalAcceptChain(chainNameInput),\nOutput: unconditionalAcceptChain(chainNameOutput),\n@@ -48,10 +48,10 @@ func DefaultTables() *IPTables {\nOutput: UnconditionalAcceptTarget{},\nPostrouting: UnconditionalAcceptTarget{},\n},\n- UserChains: map[string]*Chain{},\n+ UserChains: map[string]Chain{},\n},\n- tablenameMangle: &Table{\n- BuiltinChains: map[Hook]*Chain{\n+ tablenameMangle: Table{\n+ BuiltinChains: map[Hook]Chain{\nPrerouting: unconditionalAcceptChain(chainNamePrerouting),\nOutput: unconditionalAcceptChain(chainNameOutput),\n},\n@@ -59,7 +59,7 @@ func DefaultTables() *IPTables {\nPrerouting: UnconditionalAcceptTarget{},\nOutput: UnconditionalAcceptTarget{},\n},\n- UserChains: map[string]*Chain{},\n+ UserChains: map[string]Chain{},\n},\n},\nPriorities: map[Hook][]string{\n@@ -68,28 +68,14 @@ func DefaultTables() *IPTables {\n},\n}\n- // Initialize each table's Chains field.\n- tables.Tables[tablenameNat].Chains = map[string]*Chain{\n- chainNamePrerouting: tables.Tables[tablenameNat].BuiltinChains[Prerouting],\n- chainNameInput: tables.Tables[tablenameNat].BuiltinChains[Input],\n- chainNameOutput: tables.Tables[tablenameNat].BuiltinChains[Output],\n- chainNamePostrouting: tables.Tables[tablenameNat].BuiltinChains[Postrouting],\n- }\n- tables.Tables[tablenameMangle].Chains = map[string]*Chain{\n- chainNamePrerouting: tables.Tables[tablenameMangle].BuiltinChains[Prerouting],\n- chainNameInput: tables.Tables[tablenameMangle].BuiltinChains[Input],\n- chainNameOutput: tables.Tables[tablenameMangle].BuiltinChains[Output],\n- chainNamePostrouting: tables.Tables[tablenameMangle].BuiltinChains[Postrouting],\n- }\n-\nreturn &tables\n}\n-func unconditionalAcceptChain(name string) *Chain {\n- return &Chain{\n+func unconditionalAcceptChain(name string) Chain {\n+ return Chain{\nName: name,\n- Rules: []*Rule{\n- &Rule{\n+ Rules: []Rule{\n+ Rule{\nTarget: UnconditionalAcceptTarget{},\n},\n},\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/iptables/types.go",
"new_path": "pkg/tcpip/iptables/types.go",
"diff": "@@ -98,11 +98,11 @@ const (\n// IPTables holds all the tables for a netstack.\ntype IPTables struct {\n- // mu protects the entire struct.\n- mu sync.RWMutex\n+ // Mu protects the entire struct.\n+ Mu sync.RWMutex\n// Tables maps table names to tables. User tables have arbitrary names.\n- Tables map[string]*Table\n+ Tables map[string]Table\n// Priorities maps each hook to a list of table names. The order of the\n// list is the order in which each table should be visited for that\n@@ -118,7 +118,7 @@ type Table struct {\n// BuiltinChains holds the un-deletable chains built into netstack. If\n// a hook isn't present in the map, this table doesn't utilize that\n// hook.\n- BuiltinChains map[Hook]*Chain\n+ BuiltinChains map[Hook]Chain\n// DefaultTargets holds a target for each hook that will be executed if\n// chain traversal doesn't yield a verdict.\n@@ -126,7 +126,7 @@ type Table struct {\n// UserChains holds user-defined chains for the keyed by name. Users\n// can give their chains arbitrary names.\n- UserChains map[string]*Chain\n+ UserChains map[string]Chain\n// Chains maps names to chains for both builtin and user-defined chains.\n// Its entries point to Chains already either in BuiltinChains and\n@@ -158,7 +158,7 @@ type Chain struct {\nName string\n// Rules is the list of rules to traverse.\n- Rules []*Rule\n+ Rules []Rule\n}\n// Rule is a packet processing rule. It consists of two pieces. First it\n"
}
] | Go | Apache License 2.0 | google/gvisor | Address Ian's comments.
Change-Id: I7445033b1970cbba3f2ed0682fe520dce02d8fad |
259,885 | 07.06.2019 14:51:18 | 25,200 | 48961d27a8bcc76b3783a7cc4a4a5ebcd5532d25 | Move //pkg/sentry/memutil to //pkg/memutil. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/memutil/BUILD",
"diff": "+load(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"memutil\",\n+ srcs = [\"memutil_unsafe.go\"],\n+ importpath = \"gvisor.googlesource.com/gvisor/pkg/memutil\",\n+ visibility = [\"//visibility:public\"],\n+ deps = [\"@org_golang_x_sys//unix:go_default_library\"],\n+)\n"
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/memutil/memutil_unsafe.go",
"new_path": "pkg/memutil/memutil_unsafe.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+// +build linux\n+\n+// Package memutil provides a wrapper for the memfd_create() system call.\npackage memutil\nimport (\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/context/contexttest/BUILD",
"new_path": "pkg/sentry/context/contexttest/BUILD",
"diff": "@@ -9,11 +9,11 @@ go_library(\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest\",\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n+ \"//pkg/memutil\",\n\"//pkg/sentry/context\",\n\"//pkg/sentry/kernel/auth\",\n\"//pkg/sentry/kernel/time\",\n\"//pkg/sentry/limits\",\n- \"//pkg/sentry/memutil\",\n\"//pkg/sentry/pgalloc\",\n\"//pkg/sentry/platform\",\n\"//pkg/sentry/platform/ptrace\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/context/contexttest/contexttest.go",
"new_path": "pkg/sentry/context/contexttest/contexttest.go",
"diff": "@@ -21,11 +21,11 @@ import (\n\"testing\"\n\"time\"\n+ \"gvisor.googlesource.com/gvisor/pkg/memutil\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\nktime \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/limits\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/memutil\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/ptrace\"\n"
},
{
"change_type": "DELETE",
"old_path": "pkg/sentry/memutil/BUILD",
"new_path": null,
"diff": "-load(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n-\n-package(licenses = [\"notice\"])\n-\n-go_library(\n- name = \"memutil\",\n- srcs = [\n- \"memutil.go\",\n- \"memutil_unsafe.go\",\n- ],\n- importpath = \"gvisor.googlesource.com/gvisor/pkg/sentry/memutil\",\n- visibility = [\"//pkg/sentry:internal\"],\n- deps = [\"@org_golang_x_sys//unix:go_default_library\"],\n-)\n"
},
{
"change_type": "DELETE",
"old_path": "pkg/sentry/memutil/memutil.go",
"new_path": null,
"diff": "-// Copyright 2018 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-// Package memutil contains the utility functions for memory operations.\n-package memutil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/pgalloc/BUILD",
"new_path": "pkg/sentry/pgalloc/BUILD",
"diff": "@@ -63,10 +63,10 @@ go_library(\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n\"//pkg/log\",\n+ \"//pkg/memutil\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/context\",\n\"//pkg/sentry/hostmm\",\n- \"//pkg/sentry/memutil\",\n\"//pkg/sentry/platform\",\n\"//pkg/sentry/safemem\",\n\"//pkg/sentry/usage\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/usage/BUILD",
"new_path": "pkg/sentry/usage/BUILD",
"diff": "@@ -17,6 +17,6 @@ go_library(\n],\ndeps = [\n\"//pkg/bits\",\n- \"//pkg/sentry/memutil\",\n+ \"//pkg/memutil\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/usage/memory.go",
"new_path": "pkg/sentry/usage/memory.go",
"diff": "@@ -22,7 +22,7 @@ import (\n\"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/bits\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/memutil\"\n+ \"gvisor.googlesource.com/gvisor/pkg/memutil\"\n)\n// MemoryKind represents a type of memory used by the application.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/BUILD",
"new_path": "runsc/boot/BUILD",
"diff": "@@ -30,6 +30,7 @@ go_library(\n\"//pkg/cpuid\",\n\"//pkg/eventchannel\",\n\"//pkg/log\",\n+ \"//pkg/memutil\",\n\"//pkg/rand\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/arch:registers_go_proto\",\n@@ -51,7 +52,6 @@ go_library(\n\"//pkg/sentry/kernel/kdefs\",\n\"//pkg/sentry/limits\",\n\"//pkg/sentry/loader\",\n- \"//pkg/sentry/memutil\",\n\"//pkg/sentry/pgalloc\",\n\"//pkg/sentry/platform\",\n\"//pkg/sentry/platform/kvm\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -29,6 +29,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/cpuid\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/pkg/memutil\"\n\"gvisor.googlesource.com/gvisor/pkg/rand\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/control\"\n@@ -37,7 +38,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/loader\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/memutil\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/kvm\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move //pkg/sentry/memutil to //pkg/memutil.
PiperOrigin-RevId: 252124156 |
259,885 | 10.06.2019 15:46:17 | 25,200 | 589f36ac4ae31b1f7f35a74d982398e48c28aa31 | Move //pkg/sentry/platform/procid to //pkg/procid. | [
{
"change_type": "RENAME",
"old_path": "pkg/sentry/platform/procid/BUILD",
"new_path": "pkg/procid/BUILD",
"diff": "@@ -9,8 +9,8 @@ go_library(\n\"procid_amd64.s\",\n\"procid_arm64.s\",\n],\n- importpath = \"gvisor.googlesource.com/gvisor/pkg/sentry/platform/procid\",\n- visibility = [\"//pkg/sentry:internal\"],\n+ importpath = \"gvisor.googlesource.com/gvisor/pkg/procid\",\n+ visibility = [\"//visibility:public\"],\n)\ngo_test(\n"
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/platform/procid/procid.go",
"new_path": "pkg/procid/procid.go",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/platform/procid/procid_amd64.s",
"new_path": "pkg/procid/procid_amd64.s",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/platform/procid/procid_arm64.s",
"new_path": "pkg/procid/procid_arm64.s",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/platform/procid/procid_net_test.go",
"new_path": "pkg/procid/procid_net_test.go",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "pkg/sentry/platform/procid/procid_test.go",
"new_path": "pkg/procid/procid_test.go",
"diff": ""
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/BUILD",
"new_path": "pkg/sentry/platform/kvm/BUILD",
"diff": "@@ -32,10 +32,10 @@ go_library(\n\"//pkg/atomicbitops\",\n\"//pkg/cpuid\",\n\"//pkg/log\",\n+ \"//pkg/procid\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/platform\",\n\"//pkg/sentry/platform/interrupt\",\n- \"//pkg/sentry/platform/procid\",\n\"//pkg/sentry/platform/ring0\",\n\"//pkg/sentry/platform/ring0/pagetables\",\n\"//pkg/sentry/platform/safecopy\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine.go",
"new_path": "pkg/sentry/platform/kvm/machine.go",
"diff": "@@ -23,7 +23,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/atomicbitops\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/platform/procid\"\n+ \"gvisor.googlesource.com/gvisor/pkg/procid\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0/pagetables\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/BUILD",
"new_path": "pkg/sentry/platform/ptrace/BUILD",
"diff": "@@ -20,11 +20,11 @@ go_library(\ndeps = [\n\"//pkg/abi/linux\",\n\"//pkg/log\",\n+ \"//pkg/procid\",\n\"//pkg/seccomp\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/platform\",\n\"//pkg/sentry/platform/interrupt\",\n- \"//pkg/sentry/platform/procid\",\n\"//pkg/sentry/platform/safecopy\",\n\"//pkg/sentry/usermem\",\n\"@org_golang_x_sys//unix:go_default_library\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess.go",
"diff": "@@ -21,9 +21,9 @@ import (\n\"sync\"\n\"syscall\"\n+ \"gvisor.googlesource.com/gvisor/pkg/procid\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/platform/procid\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess_linux.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess_linux.go",
"diff": "@@ -22,9 +22,9 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/pkg/procid\"\n\"gvisor.googlesource.com/gvisor/pkg/seccomp\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/platform/procid\"\n)\nconst syscallEvent syscall.Signal = 0x80\n@@ -142,7 +142,7 @@ func attachedThread(flags uintptr, defaultAction linux.BPFAction) (*thread, erro\n// down available calls only to what is needed.\nrules := []seccomp.RuleSet{\n// Rules for trapping vsyscall access.\n- seccomp.RuleSet{\n+ {\nRules: seccomp.SyscallRules{\nsyscall.SYS_GETTIMEOFDAY: {},\nsyscall.SYS_TIME: {},\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move //pkg/sentry/platform/procid to //pkg/procid.
PiperOrigin-RevId: 252501653 |
259,853 | 11.06.2019 09:37:01 | 25,200 | 307a9854edd4a1257b0393d39d8b4fca7b4223e4 | gvisor/test: create a per-testcase directory for runsc logs
Otherwise it's hard to find a directory for a specific test case. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/syscall_test_runner.go",
"new_path": "test/syscalls/syscall_test_runner.go",
"diff": "@@ -200,7 +200,11 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\nargs = append(args, \"-strace\")\n}\nif outDir, ok := syscall.Getenv(\"TEST_UNDECLARED_OUTPUTS_DIR\"); ok {\n- debugLogDir, err := ioutil.TempDir(outDir, \"runsc\")\n+ tdir := filepath.Join(outDir, strings.Replace(tc.FullName(), \"/\", \"_\", -1))\n+ if err := os.MkdirAll(tdir, 0755); err != nil {\n+ t.Fatalf(\"could not create test dir: %v\", err)\n+ }\n+ debugLogDir, err := ioutil.TempDir(tdir, \"runsc\")\nif err != nil {\nt.Fatalf(\"could not create temp dir: %v\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor/test: create a per-testcase directory for runsc logs
Otherwise it's hard to find a directory for a specific test case.
PiperOrigin-RevId: 252636901 |
259,992 | 11.06.2019 13:55:17 | 25,200 | a775ae82fed4f703db28edc3d9d58b5652d6979d | Fix broken pipe error building version file
(11:34:09) ERROR: /tmpfs/src/github/repo/runsc/BUILD:82:1: Couldn't build file runsc/version.txt: Executing genrule //runsc:deb-version failed (Broken pipe): bash failed: error executing command | [
{
"change_type": "MODIFY",
"old_path": "runsc/BUILD",
"new_path": "runsc/BUILD",
"diff": "@@ -82,7 +82,7 @@ pkg_tar(\ngenrule(\nname = \"deb-version\",\nouts = [\"version.txt\"],\n- cmd = \"$(location :runsc) -version | head -n 1 | sed 's/^[^0-9]*//' > $@\",\n+ cmd = \"$(location :runsc) -version | grep 'runsc version' | sed 's/^[^0-9]*//' > $@\",\nstamp = 1,\ntools = [\":runsc\"],\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix broken pipe error building version file
(11:34:09) ERROR: /tmpfs/src/github/repo/runsc/BUILD:82:1: Couldn't build file runsc/version.txt: Executing genrule //runsc:deb-version failed (Broken pipe): bash failed: error executing command
PiperOrigin-RevId: 252691902 |
259,992 | 11.06.2019 14:30:34 | 25,200 | 847c4b9759c49cb30728579cfb0f4a69f1987b94 | Use net.HardwareAddr for FDBasedLink.LinkAddress
It prints formatted to the log. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/network.go",
"new_path": "runsc/boot/network.go",
"diff": "@@ -56,7 +56,7 @@ type FDBasedLink struct {\nAddresses []net.IP\nRoutes []Route\nGSOMaxSize uint32\n- LinkAddress []byte\n+ LinkAddress net.HardwareAddr\n// NumChannels controls how many underlying FD's are to be used to\n// create this endpoint.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/network.go",
"new_path": "runsc/sandbox/network.go",
"diff": "@@ -228,7 +228,7 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, enableGSO\nif err != nil {\nreturn fmt.Errorf(\"getting link for interface %q: %v\", iface.Name, err)\n}\n- link.LinkAddress = []byte(ifaceLink.Attrs().HardwareAddr)\n+ link.LinkAddress = ifaceLink.Attrs().HardwareAddr\nlog.Debugf(\"Setting up network channels\")\n// Create the socket for the device.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use net.HardwareAddr for FDBasedLink.LinkAddress
It prints formatted to the log.
PiperOrigin-RevId: 252699551 |
259,853 | 11.06.2019 16:35:42 | 25,200 | 69c8657a66ac1a7e3bfd388de0a7cd28ac4b51cd | kokoro: don't overwrite test results for different runtimes | [
{
"change_type": "MODIFY",
"old_path": "tools/run_tests.sh",
"new_path": "tools/run_tests.sh",
"diff": "@@ -175,13 +175,17 @@ run_docker_tests() {\n# configuration, e.g. save/restore not supported with hostnet.\ndeclare -a variations=(\"\" \"-kvm\" \"-hostnet\" \"-overlay\")\nfor v in \"${variations[@]}\"; do\n+ # Change test names otherwise each run of tests will overwrite logs and\n+ # results of the previous run.\n+ sed -i \"s/name = \\\"integration_test.*\\\"/name = \\\"integration_test${v}\\\"/\" runsc/test/integration/BUILD\n+ sed -i \"s/name = \\\"image_test.*\\\"/name = \\\"image_test${v}\\\"/\" runsc/test/image/BUILD\n# Run runsc tests with docker that are tagged manual.\nbazel test \\\n\"${BAZEL_BUILD_FLAGS[@]}\" \\\n--test_env=RUNSC_RUNTIME=\"${RUNTIME}${v}\" \\\n--test_output=all \\\n- //runsc/test/image:image_test \\\n- //runsc/test/integration:integration_test\n+ //runsc/test/image:image_test${v} \\\n+ //runsc/test/integration:integration_test${v}\ndone\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | kokoro: don't overwrite test results for different runtimes
PiperOrigin-RevId: 252724255 |
259,858 | 11.06.2019 19:23:27 | 25,200 | df110ad4fe571721a7eb4a5a1f9ce92584ef7809 | Eat sendfile partial error
For sendfile(2), we propagate a TCP error through the system call layer.
This should be eaten if there is a partial result. This change also adds
a test to ensure that there is no panic in this case, for both TCP sockets
and unix domain sockets. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/error.go",
"new_path": "pkg/sentry/syscalls/linux/error.go",
"diff": "@@ -92,6 +92,10 @@ func handleIOError(t *kernel.Task, partialResult bool, err, intr error, op strin\n// TODO(gvisor.dev/issue/161): In some cases SIGPIPE should\n// also be sent to the application.\nreturn nil\n+ case syserror.ECONNRESET:\n+ // For TCP sendfile connections, we may have a reset. But we\n+ // should just return n as the result.\n+ return nil\ncase syserror.ErrWouldBlock:\n// Syscall would block, but completed a partial read/write.\n// This case should only be returned by IssueIO for nonblocking\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/sendfile_socket.cc",
"new_path": "test/syscalls/linux/sendfile_socket.cc",
"diff": "@@ -33,9 +33,69 @@ namespace gvisor {\nnamespace testing {\nnamespace {\n+class SendFileTest : public ::testing::TestWithParam<int> {\n+ protected:\n+ PosixErrorOr<std::tuple<int, int>> Sockets() {\n+ // Bind a server socket.\n+ int family = GetParam();\n+ struct sockaddr server_addr = {};\n+ switch (family) {\n+ case AF_INET: {\n+ struct sockaddr_in *server_addr_in =\n+ reinterpret_cast<struct sockaddr_in *>(&server_addr);\n+ server_addr_in->sin_family = family;\n+ server_addr_in->sin_addr.s_addr = INADDR_ANY;\n+ break;\n+ }\n+ case AF_UNIX: {\n+ struct sockaddr_un *server_addr_un =\n+ reinterpret_cast<struct sockaddr_un *>(&server_addr);\n+ server_addr_un->sun_family = family;\n+ server_addr_un->sun_path[0] = '\\0';\n+ break;\n+ }\n+ default:\n+ return PosixError(EINVAL);\n+ }\n+ int server = socket(family, SOCK_STREAM, 0);\n+ if (bind(server, &server_addr, sizeof(server_addr)) < 0) {\n+ return PosixError(errno);\n+ }\n+ if (listen(server, 1) < 0) {\n+ close(server);\n+ return PosixError(errno);\n+ }\n+\n+ // Fetch the address; both are anonymous.\n+ socklen_t length = sizeof(server_addr);\n+ if (getsockname(server, &server_addr, &length) < 0) {\n+ close(server);\n+ return PosixError(errno);\n+ }\n+\n+ // Connect the client.\n+ int client = socket(family, SOCK_STREAM, 0);\n+ if (connect(client, &server_addr, length) < 0) {\n+ close(server);\n+ close(client);\n+ return PosixError(errno);\n+ }\n+\n+ // Accept on the server.\n+ int server_client = accept(server, nullptr, 0);\n+ if (server_client < 0) {\n+ close(server);\n+ close(client);\n+ return PosixError(errno);\n+ }\n+ close(server);\n+ return std::make_tuple(client, server_client);\n+ }\n+};\n+\n// Sends large file to exercise the path that read and writes data multiple\n// times, esp. when more data is read than can be written.\n-TEST(SendFileTest, SendMultiple) {\n+TEST_P(SendFileTest, SendMultiple) {\nstd::vector<char> data(5 * 1024 * 1024);\nRandomizeBuffer(data.data(), data.size());\n@@ -45,34 +105,20 @@ TEST(SendFileTest, SendMultiple) {\nTempPath::kDefaultFileMode));\nconst TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n- // Use a socket for target file to make the write window small.\n- const FileDescriptor server(socket(AF_INET, SOCK_STREAM, IPPROTO_TCP));\n- ASSERT_THAT(server.get(), SyscallSucceeds());\n-\n- struct sockaddr_in server_addr = {};\n- server_addr.sin_family = AF_INET;\n- server_addr.sin_addr.s_addr = INADDR_ANY;\n- ASSERT_THAT(\n- bind(server.get(), reinterpret_cast<struct sockaddr *>(&server_addr),\n- sizeof(server_addr)),\n- SyscallSucceeds());\n- ASSERT_THAT(listen(server.get(), 1), SyscallSucceeds());\n+ // Create sockets.\n+ std::tuple<int, int> fds = ASSERT_NO_ERRNO_AND_VALUE(Sockets());\n+ const FileDescriptor server(std::get<0>(fds));\n+ FileDescriptor client(std::get<1>(fds)); // non-const, reset is used.\n// Thread that reads data from socket and dumps to a file.\n- ScopedThread th([&server, &out_file, &server_addr] {\n- socklen_t addrlen = sizeof(server_addr);\n- const FileDescriptor fd(RetryEINTR(accept)(\n- server.get(), reinterpret_cast<struct sockaddr *>(&server_addr),\n- &addrlen));\n- ASSERT_THAT(fd.get(), SyscallSucceeds());\n-\n+ ScopedThread th([&] {\nFileDescriptor outf =\nASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY));\n// Read until socket is closed.\nchar buf[10240];\nfor (int cnt = 0;; cnt++) {\n- int r = RetryEINTR(read)(fd.get(), buf, sizeof(buf));\n+ int r = RetryEINTR(read)(server.get(), buf, sizeof(buf));\n// We cannot afford to save on every read() call.\nif (cnt % 1000 == 0) {\nASSERT_THAT(r, SyscallSucceeds());\n@@ -99,25 +145,6 @@ TEST(SendFileTest, SendMultiple) {\nconst FileDescriptor inf =\nASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));\n- FileDescriptor outf(socket(AF_INET, SOCK_STREAM, IPPROTO_TCP));\n- ASSERT_THAT(outf.get(), SyscallSucceeds());\n-\n- // Get the port bound by the listening socket.\n- socklen_t addrlen = sizeof(server_addr);\n- ASSERT_THAT(getsockname(server.get(),\n- reinterpret_cast<sockaddr *>(&server_addr), &addrlen),\n- SyscallSucceeds());\n-\n- struct sockaddr_in addr = {};\n- addr.sin_family = AF_INET;\n- addr.sin_addr.s_addr = inet_addr(\"127.0.0.1\");\n- addr.sin_port = server_addr.sin_port;\n- std::cout << \"Connecting on port=\" << server_addr.sin_port;\n- ASSERT_THAT(\n- RetryEINTR(connect)(\n- outf.get(), reinterpret_cast<struct sockaddr *>(&addr), sizeof(addr)),\n- SyscallSucceeds());\n-\nint cnt = 0;\nfor (size_t sent = 0; sent < data.size(); cnt++) {\nconst size_t remain = data.size() - sent;\n@@ -125,7 +152,7 @@ TEST(SendFileTest, SendMultiple) {\n<< \", remain=\" << remain;\n// Send data and verify that sendfile returns the correct value.\n- int res = sendfile(outf.get(), inf.get(), nullptr, remain);\n+ int res = sendfile(client.get(), inf.get(), nullptr, remain);\n// We cannot afford to save on every sendfile() call.\nif (cnt % 120 == 0) {\nMaybeSave();\n@@ -142,17 +169,74 @@ TEST(SendFileTest, SendMultiple) {\n}\n// Close socket to stop thread.\n- outf.reset();\n+ client.reset();\nth.Join();\n// Verify that the output file has the correct data.\n- outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDONLY));\n+ const FileDescriptor outf =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDONLY));\nstd::vector<char> actual(data.size(), '\\0');\nASSERT_THAT(RetryEINTR(read)(outf.get(), actual.data(), actual.size()),\nSyscallSucceedsWithValue(actual.size()));\nASSERT_EQ(memcmp(data.data(), actual.data(), data.size()), 0);\n}\n+TEST_P(SendFileTest, Shutdown) {\n+ // Create a socket.\n+ std::tuple<int, int> fds = ASSERT_NO_ERRNO_AND_VALUE(Sockets());\n+ const FileDescriptor client(std::get<0>(fds));\n+ FileDescriptor server(std::get<1>(fds)); // non-const, released below.\n+\n+ // If this is a TCP socket, then turn off linger.\n+ if (GetParam() == AF_INET) {\n+ struct linger sl;\n+ sl.l_onoff = 1;\n+ sl.l_linger = 0;\n+ ASSERT_THAT(\n+ setsockopt(server.get(), SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)),\n+ SyscallSucceeds());\n+ }\n+\n+ // Create a 1m file with random data.\n+ std::vector<char> data(1024 * 1024);\n+ RandomizeBuffer(data.data(), data.size());\n+ const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(\n+ GetAbsoluteTestTmpdir(), absl::string_view(data.data(), data.size()),\n+ TempPath::kDefaultFileMode));\n+ const FileDescriptor inf =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY));\n+\n+ // Read some data, then shutdown the socket. We don't actually care about\n+ // checking the contents (other tests do that), so we just re-use the same\n+ // buffer as above.\n+ ScopedThread t([&]() {\n+ int done = 0;\n+ while (done < data.size()) {\n+ int n = read(server.get(), data.data(), data.size());\n+ ASSERT_THAT(n, SyscallSucceeds());\n+ done += n;\n+ }\n+ // Close the server side socket.\n+ ASSERT_THAT(close(server.release()), SyscallSucceeds());\n+ });\n+\n+ // Continuously stream from the file to the socket. Note we do not assert\n+ // that a specific amount of data has been written at any time, just that some\n+ // data is written. Eventually, we should get a connection reset error.\n+ while (1) {\n+ off_t offset = 0; // Always read from the start.\n+ int n = sendfile(client.get(), inf.get(), &offset, data.size());\n+ EXPECT_THAT(n, AnyOf(SyscallFailsWithErrno(ECONNRESET),\n+ SyscallFailsWithErrno(EPIPE), SyscallSucceeds()));\n+ if (n <= 0) {\n+ break;\n+ }\n+ }\n+}\n+\n+INSTANTIATE_TEST_SUITE_P(AddressFamily, SendFileTest,\n+ ::testing::Values(AF_UNIX, AF_INET));\n+\n} // namespace\n} // namespace testing\n} // namespace gvisor\n"
}
] | Go | Apache License 2.0 | google/gvisor | Eat sendfile partial error
For sendfile(2), we propagate a TCP error through the system call layer.
This should be eaten if there is a partial result. This change also adds
a test to ensure that there is no panic in this case, for both TCP sockets
and unix domain sockets.
PiperOrigin-RevId: 252746192 |
259,853 | 12.06.2019 10:47:38 | 25,200 | 0d05a12fd394e464d44d8d39c58b22249358ed19 | gvisor/ptrace: print guest registers if a stub stopped with unexpected code | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess.go",
"diff": "@@ -21,6 +21,7 @@ import (\n\"sync\"\n\"syscall\"\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/pkg/procid\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform\"\n@@ -300,6 +301,18 @@ const (\nkilled\n)\n+func (t *thread) dumpAndPanic(message string) {\n+ var regs syscall.PtraceRegs\n+ message += \"\\n\"\n+ if err := t.getRegs(®s); err == nil {\n+ message += dumpRegs(®s)\n+ } else {\n+ log.Warningf(\"unable to get registers: %v\", err)\n+ }\n+ message += fmt.Sprintf(\"stubStart\\t = %016x\\n\", stubStart)\n+ panic(message)\n+}\n+\n// wait waits for a stop event.\n//\n// Precondition: outcome is a valid waitOutcome.\n@@ -320,7 +333,7 @@ func (t *thread) wait(outcome waitOutcome) syscall.Signal {\nswitch outcome {\ncase stopped:\nif !status.Stopped() {\n- panic(fmt.Sprintf(\"ptrace status unexpected: got %v, wanted stopped\", status))\n+ t.dumpAndPanic(fmt.Sprintf(\"ptrace status unexpected: got %v, wanted stopped\", status))\n}\nstopSig := status.StopSignal()\nif stopSig == 0 {\n@@ -334,12 +347,12 @@ func (t *thread) wait(outcome waitOutcome) syscall.Signal {\nreturn stopSig\ncase killed:\nif !status.Exited() && !status.Signaled() {\n- panic(fmt.Sprintf(\"ptrace status unexpected: got %v, wanted exited\", status))\n+ t.dumpAndPanic(fmt.Sprintf(\"ptrace status unexpected: got %v, wanted exited\", status))\n}\nreturn syscall.Signal(status.ExitStatus())\ndefault:\n// Should not happen.\n- panic(fmt.Sprintf(\"unknown outcome: %v\", outcome))\n+ t.dumpAndPanic(fmt.Sprintf(\"unknown outcome: %v\", outcome))\n}\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess_amd64.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess_amd64.go",
"diff": "package ptrace\nimport (\n+ \"fmt\"\n+ \"strings\"\n\"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n@@ -102,3 +104,38 @@ func syscallReturnValue(regs *syscall.PtraceRegs) (uintptr, error) {\n}\nreturn uintptr(rval), nil\n}\n+\n+func dumpRegs(regs *syscall.PtraceRegs) string {\n+ var m strings.Builder\n+\n+ fmt.Fprintf(&m, \"Registers:\\n\")\n+ fmt.Fprintf(&m, \"\\tR15\\t = %016x\\n\", regs.R15)\n+ fmt.Fprintf(&m, \"\\tR14\\t = %016x\\n\", regs.R14)\n+ fmt.Fprintf(&m, \"\\tR13\\t = %016x\\n\", regs.R13)\n+ fmt.Fprintf(&m, \"\\tR12\\t = %016x\\n\", regs.R12)\n+ fmt.Fprintf(&m, \"\\tRbp\\t = %016x\\n\", regs.Rbp)\n+ fmt.Fprintf(&m, \"\\tRbx\\t = %016x\\n\", regs.Rbx)\n+ fmt.Fprintf(&m, \"\\tR11\\t = %016x\\n\", regs.R11)\n+ fmt.Fprintf(&m, \"\\tR10\\t = %016x\\n\", regs.R10)\n+ fmt.Fprintf(&m, \"\\tR9\\t = %016x\\n\", regs.R9)\n+ fmt.Fprintf(&m, \"\\tR8\\t = %016x\\n\", regs.R8)\n+ fmt.Fprintf(&m, \"\\tRax\\t = %016x\\n\", regs.Rax)\n+ fmt.Fprintf(&m, \"\\tRcx\\t = %016x\\n\", regs.Rcx)\n+ fmt.Fprintf(&m, \"\\tRdx\\t = %016x\\n\", regs.Rdx)\n+ fmt.Fprintf(&m, \"\\tRsi\\t = %016x\\n\", regs.Rsi)\n+ fmt.Fprintf(&m, \"\\tRdi\\t = %016x\\n\", regs.Rdi)\n+ fmt.Fprintf(&m, \"\\tOrig_rax = %016x\\n\", regs.Orig_rax)\n+ fmt.Fprintf(&m, \"\\tRip\\t = %016x\\n\", regs.Rip)\n+ fmt.Fprintf(&m, \"\\tCs\\t = %016x\\n\", regs.Cs)\n+ fmt.Fprintf(&m, \"\\tEflags\\t = %016x\\n\", regs.Eflags)\n+ fmt.Fprintf(&m, \"\\tRsp\\t = %016x\\n\", regs.Rsp)\n+ fmt.Fprintf(&m, \"\\tSs\\t = %016x\\n\", regs.Ss)\n+ fmt.Fprintf(&m, \"\\tFs_base\\t = %016x\\n\", regs.Fs_base)\n+ fmt.Fprintf(&m, \"\\tGs_base\\t = %016x\\n\", regs.Gs_base)\n+ fmt.Fprintf(&m, \"\\tDs\\t = %016x\\n\", regs.Ds)\n+ fmt.Fprintf(&m, \"\\tEs\\t = %016x\\n\", regs.Es)\n+ fmt.Fprintf(&m, \"\\tFs\\t = %016x\\n\", regs.Fs)\n+ fmt.Fprintf(&m, \"\\tGs\\t = %016x\\n\", regs.Gs)\n+\n+ return m.String()\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor/ptrace: print guest registers if a stub stopped with unexpected code
PiperOrigin-RevId: 252855280 |
259,853 | 12.06.2019 11:54:15 | 25,200 | bb849bad296f372670c2d2cf97424f74cf750ce2 | gvisor/runsc: apply seccomp filters before parsing a state file | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/BUILD",
"new_path": "runsc/boot/BUILD",
"diff": "@@ -16,6 +16,7 @@ go_library(\n\"limits.go\",\n\"loader.go\",\n\"network.go\",\n+ \"pprof.go\",\n\"strace.go\",\n],\nimportpath = \"gvisor.googlesource.com/gvisor/runsc/boot\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -359,6 +359,17 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\nreturn fmt.Errorf(\"file cannot be empty\")\n}\n+ if cm.l.conf.ProfileEnable {\n+ // initializePProf opens /proc/self/maps, so has to be\n+ // called before installing seccomp filters.\n+ initializePProf()\n+ }\n+\n+ // Seccomp filters have to be applied before parsing the state file.\n+ if err := cm.l.installSeccompFilters(); err != nil {\n+ return err\n+ }\n+\n// Load the state.\nloadOpts := state.LoadOpts{Source: specFile}\nif err := loadOpts.Load(k, networkStack); err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -445,6 +445,23 @@ func createMemoryFile() (*pgalloc.MemoryFile, error) {\nreturn mf, nil\n}\n+func (l *Loader) installSeccompFilters() error {\n+ if l.conf.DisableSeccomp {\n+ filter.Report(\"syscall filter is DISABLED. Running in less secure mode.\")\n+ } else {\n+ opts := filter.Options{\n+ Platform: l.k.Platform,\n+ HostNetwork: l.conf.Network == NetworkHost,\n+ ProfileEnable: l.conf.ProfileEnable,\n+ ControllerFD: l.ctrl.srv.FD(),\n+ }\n+ if err := filter.Install(opts); err != nil {\n+ return fmt.Errorf(\"installing seccomp filters: %v\", err)\n+ }\n+ }\n+ return nil\n+}\n+\n// Run runs the root container.\nfunc (l *Loader) Run() error {\nerr := l.run()\n@@ -480,25 +497,19 @@ func (l *Loader) run() error {\nreturn fmt.Errorf(\"trying to start deleted container %q\", l.sandboxID)\n}\n+ // If we are restoring, we do not want to create a process.\n+ // l.restore is set by the container manager when a restore call is made.\n+ if !l.restore {\n+ if l.conf.ProfileEnable {\n+ initializePProf()\n+ }\n+\n// Finally done with all configuration. Setup filters before user code\n// is loaded.\n- if l.conf.DisableSeccomp {\n- filter.Report(\"syscall filter is DISABLED. Running in less secure mode.\")\n- } else {\n- opts := filter.Options{\n- Platform: l.k.Platform,\n- HostNetwork: l.conf.Network == NetworkHost,\n- ProfileEnable: l.conf.ProfileEnable,\n- ControllerFD: l.ctrl.srv.FD(),\n- }\n- if err := filter.Install(opts); err != nil {\n- return fmt.Errorf(\"installing seccomp filters: %v\", err)\n- }\n+ if err := l.installSeccompFilters(); err != nil {\n+ return err\n}\n- // If we are restoring, we do not want to create a process.\n- // l.restore is set by the container manager when a restore call is made.\n- if !l.restore {\n// Create the FD map, which will set stdin, stdout, and stderr. If console\n// is true, then ioctl calls will be passed through to the host fd.\nctx := l.rootProcArgs.NewContext(l.k)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/pprof.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package boot\n+\n+func initializePProf() {\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor/runsc: apply seccomp filters before parsing a state file
PiperOrigin-RevId: 252869983 |
259,962 | 12.06.2019 13:34:47 | 25,200 | 70578806e8d3e01fae2249b3e602cd5b05d378a0 | Add support for TCP_CONGESTION socket option.
This CL also cleans up the error returned for setting congestion
control which was incorrectly returning EINVAL instead of ENOENT. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/epsocket.go",
"new_path": "pkg/sentry/socket/epsocket/epsocket.go",
"diff": "@@ -920,6 +920,30 @@ func getSockOptTCP(t *kernel.Task, ep commonEndpoint, name, outLen int) (interfa\nt.Kernel().EmitUnimplementedEvent(t)\n+ case linux.TCP_CONGESTION:\n+ if outLen <= 0 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ var v tcpip.CongestionControlOption\n+ if err := ep.GetSockOpt(&v); err != nil {\n+ return nil, syserr.TranslateNetstackError(err)\n+ }\n+\n+ // We match linux behaviour here where it returns the lower of\n+ // TCP_CA_NAME_MAX bytes or the value of the option length.\n+ //\n+ // This is Linux's net/tcp.h TCP_CA_NAME_MAX.\n+ const tcpCANameMax = 16\n+\n+ toCopy := tcpCANameMax\n+ if outLen < tcpCANameMax {\n+ toCopy = outLen\n+ }\n+ b := make([]byte, toCopy)\n+ copy(b, v)\n+ return b, nil\n+\ndefault:\nemitUnimplementedEventTCP(t, name)\n}\n@@ -1222,6 +1246,12 @@ func setSockOptTCP(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) *\n}\nreturn syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.KeepaliveIntervalOption(time.Second * time.Duration(v))))\n+ case linux.TCP_CONGESTION:\n+ v := tcpip.CongestionControlOption(optVal)\n+ if err := ep.SetSockOpt(v); err != nil {\n+ return syserr.TranslateNetstackError(err)\n+ }\n+ return nil\ncase linux.TCP_REPAIR_OPTIONS:\nt.Kernel().EmitUnimplementedEvent(t)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -472,6 +472,14 @@ type KeepaliveIntervalOption time.Duration\n// closed.\ntype KeepaliveCountOption int\n+// CongestionControlOption is used by SetSockOpt/GetSockOpt to set/get\n+// the current congestion control algorithm.\n+type CongestionControlOption string\n+\n+// AvailableCongestionControlOption is used to query the supported congestion\n+// control algorithms.\n+type AvailableCongestionControlOption string\n+\n// MulticastTTLOption is used by SetSockOpt/GetSockOpt to control the default\n// TTL value for multicast messages. The default is 1.\ntype MulticastTTLOption uint8\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/BUILD",
"new_path": "pkg/tcpip/transport/tcp/BUILD",
"diff": "@@ -21,6 +21,7 @@ go_library(\n\"accept.go\",\n\"connect.go\",\n\"cubic.go\",\n+ \"cubic_state.go\",\n\"endpoint.go\",\n\"endpoint_state.go\",\n\"forwarder.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/cubic.go",
"new_path": "pkg/tcpip/transport/tcp/cubic.go",
"diff": "@@ -23,6 +23,7 @@ import (\n// control algorithm state.\n//\n// See: https://tools.ietf.org/html/rfc8312.\n+// +stateify savable\ntype cubicState struct {\n// wLastMax is the previous wMax value.\nwLastMax float64\n@@ -33,7 +34,7 @@ type cubicState struct {\n// t denotes the time when the current congestion avoidance\n// was entered.\n- t time.Time\n+ t time.Time `state:\".(unixTime)\"`\n// numCongestionEvents tracks the number of congestion events since last\n// RTO.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/transport/tcp/cubic_state.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp\n+\n+import (\n+ \"time\"\n+)\n+\n+// saveT is invoked by stateify.\n+func (c *cubicState) saveT() unixTime {\n+ return unixTime{c.t.Unix(), c.t.UnixNano()}\n+}\n+\n+// loadT is invoked by stateify.\n+func (c *cubicState) loadT(unix unixTime) {\n+ c.t = time.Unix(unix.second, unix.nano)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -17,6 +17,7 @@ package tcp\nimport (\n\"fmt\"\n\"math\"\n+ \"strings\"\n\"sync\"\n\"sync/atomic\"\n\"time\"\n@@ -286,7 +287,7 @@ type endpoint struct {\n// cc stores the name of the Congestion Control algorithm to use for\n// this endpoint.\n- cc CongestionControlOption\n+ cc tcpip.CongestionControlOption\n// The following are used when a \"packet too big\" control packet is\n// received. They are protected by sndBufMu. They are used to\n@@ -394,7 +395,7 @@ func newEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waite\ne.rcvBufSize = rs.Default\n}\n- var cs CongestionControlOption\n+ var cs tcpip.CongestionControlOption\nif err := stack.TransportProtocolOption(ProtocolNumber, &cs); err == nil {\ne.cc = cs\n}\n@@ -898,6 +899,40 @@ func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error {\ne.mu.Unlock()\nreturn nil\n+ case tcpip.CongestionControlOption:\n+ // Query the available cc algorithms in the stack and\n+ // validate that the specified algorithm is actually\n+ // supported in the stack.\n+ var avail tcpip.AvailableCongestionControlOption\n+ if err := e.stack.TransportProtocolOption(ProtocolNumber, &avail); err != nil {\n+ return err\n+ }\n+ availCC := strings.Split(string(avail), \" \")\n+ for _, cc := range availCC {\n+ if v == tcpip.CongestionControlOption(cc) {\n+ // Acquire the work mutex as we may need to\n+ // reinitialize the congestion control state.\n+ e.mu.Lock()\n+ state := e.state\n+ e.cc = v\n+ e.mu.Unlock()\n+ switch state {\n+ case StateEstablished:\n+ e.workMu.Lock()\n+ e.mu.Lock()\n+ if e.state == state {\n+ e.snd.cc = e.snd.initCongestionControl(e.cc)\n+ }\n+ e.mu.Unlock()\n+ e.workMu.Unlock()\n+ }\n+ return nil\n+ }\n+ }\n+\n+ // Linux returns ENOENT when an invalid congestion\n+ // control algorithm is specified.\n+ return tcpip.ErrNoSuchFile\ndefault:\nreturn nil\n}\n@@ -1067,6 +1102,12 @@ func (e *endpoint) GetSockOpt(opt interface{}) *tcpip.Error {\n}\nreturn nil\n+ case *tcpip.CongestionControlOption:\n+ e.mu.Lock()\n+ *o = e.cc\n+ e.mu.Unlock()\n+ return nil\n+\ndefault:\nreturn tcpip.ErrUnknownProtocolOption\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/protocol.go",
"new_path": "pkg/tcpip/transport/tcp/protocol.go",
"diff": "@@ -79,13 +79,6 @@ const (\nccCubic = \"cubic\"\n)\n-// CongestionControlOption sets the current congestion control algorithm.\n-type CongestionControlOption string\n-\n-// AvailableCongestionControlOption returns the supported congestion control\n-// algorithms.\n-type AvailableCongestionControlOption string\n-\ntype protocol struct {\nmu sync.Mutex\nsackEnabled bool\n@@ -93,7 +86,6 @@ type protocol struct {\nrecvBufferSize ReceiveBufferSizeOption\ncongestionControl string\navailableCongestionControl []string\n- allowedCongestionControl []string\n}\n// Number returns the tcp protocol number.\n@@ -188,7 +180,7 @@ func (p *protocol) SetOption(option interface{}) *tcpip.Error {\np.mu.Unlock()\nreturn nil\n- case CongestionControlOption:\n+ case tcpip.CongestionControlOption:\nfor _, c := range p.availableCongestionControl {\nif string(v) == c {\np.mu.Lock()\n@@ -197,7 +189,9 @@ func (p *protocol) SetOption(option interface{}) *tcpip.Error {\nreturn nil\n}\n}\n- return tcpip.ErrInvalidOptionValue\n+ // linux returns ENOENT when an invalid congestion control\n+ // is specified.\n+ return tcpip.ErrNoSuchFile\ndefault:\nreturn tcpip.ErrUnknownProtocolOption\n}\n@@ -223,14 +217,14 @@ func (p *protocol) Option(option interface{}) *tcpip.Error {\n*v = p.recvBufferSize\np.mu.Unlock()\nreturn nil\n- case *CongestionControlOption:\n+ case *tcpip.CongestionControlOption:\np.mu.Lock()\n- *v = CongestionControlOption(p.congestionControl)\n+ *v = tcpip.CongestionControlOption(p.congestionControl)\np.mu.Unlock()\nreturn nil\n- case *AvailableCongestionControlOption:\n+ case *tcpip.AvailableCongestionControlOption:\np.mu.Lock()\n- *v = AvailableCongestionControlOption(strings.Join(p.availableCongestionControl, \" \"))\n+ *v = tcpip.AvailableCongestionControlOption(strings.Join(p.availableCongestionControl, \" \"))\np.mu.Unlock()\nreturn nil\ndefault:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/snd.go",
"new_path": "pkg/tcpip/transport/tcp/snd.go",
"diff": "@@ -194,8 +194,6 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint\ns := &sender{\nep: ep,\n- sndCwnd: InitialCwnd,\n- sndSsthresh: math.MaxInt64,\nsndWnd: sndWnd,\nsndUna: iss + 1,\nsndNxt: iss + 1,\n@@ -238,7 +236,13 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint\nreturn s\n}\n-func (s *sender) initCongestionControl(congestionControlName CongestionControlOption) congestionControl {\n+// initCongestionControl initializes the specified congestion control module and\n+// returns a handle to it. It also initializes the sndCwnd and sndSsThresh to\n+// their initial values.\n+func (s *sender) initCongestionControl(congestionControlName tcpip.CongestionControlOption) congestionControl {\n+ s.sndCwnd = InitialCwnd\n+ s.sndSsthresh = math.MaxInt64\n+\nswitch congestionControlName {\ncase ccCubic:\nreturn newCubicCC(s)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"new_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"diff": "@@ -3205,13 +3205,14 @@ func TestTCPEndpointProbe(t *testing.T) {\n}\n}\n-func TestSetCongestionControl(t *testing.T) {\n+func TestStackSetCongestionControl(t *testing.T) {\ntestCases := []struct {\n- cc tcp.CongestionControlOption\n- mustPass bool\n+ cc tcpip.CongestionControlOption\n+ err *tcpip.Error\n}{\n- {\"reno\", true},\n- {\"cubic\", true},\n+ {\"reno\", nil},\n+ {\"cubic\", nil},\n+ {\"blahblah\", tcpip.ErrNoSuchFile},\n}\nfor _, tc := range testCases {\n@@ -3221,62 +3222,135 @@ func TestSetCongestionControl(t *testing.T) {\ns := c.Stack()\n- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tc.cc); err != nil && tc.mustPass {\n- t.Fatalf(\"s.SetTransportProtocolOption(%v, %v) = %v, want not-nil\", tcp.ProtocolNumber, tc.cc, err)\n+ var oldCC tcpip.CongestionControlOption\n+ if err := s.TransportProtocolOption(tcp.ProtocolNumber, &oldCC); err != nil {\n+ t.Fatalf(\"s.TransportProtocolOption(%v, %v) = %v\", tcp.ProtocolNumber, &oldCC, err)\n}\n- var cc tcp.CongestionControlOption\n+ if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tc.cc); err != tc.err {\n+ t.Fatalf(\"s.SetTransportProtocolOption(%v, %v) = %v, want %v\", tcp.ProtocolNumber, tc.cc, err, tc.err)\n+ }\n+\n+ var cc tcpip.CongestionControlOption\nif err := s.TransportProtocolOption(tcp.ProtocolNumber, &cc); err != nil {\nt.Fatalf(\"s.TransportProtocolOption(%v, %v) = %v\", tcp.ProtocolNumber, &cc, err)\n}\n- if got, want := cc, tc.cc; got != want {\n+\n+ got, want := cc, oldCC\n+ // If SetTransportProtocolOption is expected to succeed\n+ // then the returned value for congestion control should\n+ // match the one specified in the\n+ // SetTransportProtocolOption call above, else it should\n+ // be what it was before the call to\n+ // SetTransportProtocolOption.\n+ if tc.err == nil {\n+ want = tc.cc\n+ }\n+ if got != want {\nt.Fatalf(\"got congestion control: %v, want: %v\", got, want)\n}\n})\n}\n}\n-func TestAvailableCongestionControl(t *testing.T) {\n+func TestStackAvailableCongestionControl(t *testing.T) {\nc := context.New(t, 1500)\ndefer c.Cleanup()\ns := c.Stack()\n// Query permitted congestion control algorithms.\n- var aCC tcp.AvailableCongestionControlOption\n+ var aCC tcpip.AvailableCongestionControlOption\nif err := s.TransportProtocolOption(tcp.ProtocolNumber, &aCC); err != nil {\nt.Fatalf(\"s.TransportProtocolOption(%v, %v) = %v\", tcp.ProtocolNumber, &aCC, err)\n}\n- if got, want := aCC, tcp.AvailableCongestionControlOption(\"reno cubic\"); got != want {\n- t.Fatalf(\"got tcp.AvailableCongestionControlOption: %v, want: %v\", got, want)\n+ if got, want := aCC, tcpip.AvailableCongestionControlOption(\"reno cubic\"); got != want {\n+ t.Fatalf(\"got tcpip.AvailableCongestionControlOption: %v, want: %v\", got, want)\n}\n}\n-func TestSetAvailableCongestionControl(t *testing.T) {\n+func TestStackSetAvailableCongestionControl(t *testing.T) {\nc := context.New(t, 1500)\ndefer c.Cleanup()\ns := c.Stack()\n// Setting AvailableCongestionControlOption should fail.\n- aCC := tcp.AvailableCongestionControlOption(\"xyz\")\n+ aCC := tcpip.AvailableCongestionControlOption(\"xyz\")\nif err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &aCC); err == nil {\nt.Fatalf(\"s.TransportProtocolOption(%v, %v) = nil, want non-nil\", tcp.ProtocolNumber, &aCC)\n}\n// Verify that we still get the expected list of congestion control options.\n- var cc tcp.AvailableCongestionControlOption\n+ var cc tcpip.AvailableCongestionControlOption\nif err := s.TransportProtocolOption(tcp.ProtocolNumber, &cc); err != nil {\nt.Fatalf(\"s.TransportProtocolOption(%v, %v) = %v\", tcp.ProtocolNumber, &cc, err)\n}\n- if got, want := cc, tcp.AvailableCongestionControlOption(\"reno cubic\"); got != want {\n- t.Fatalf(\"got tcp.AvailableCongestionControlOption: %v, want: %v\", got, want)\n+ if got, want := cc, tcpip.AvailableCongestionControlOption(\"reno cubic\"); got != want {\n+ t.Fatalf(\"got tcpip.AvailableCongestionControlOption: %v, want: %v\", got, want)\n+ }\n+}\n+\n+func TestEndpointSetCongestionControl(t *testing.T) {\n+ testCases := []struct {\n+ cc tcpip.CongestionControlOption\n+ err *tcpip.Error\n+ }{\n+ {\"reno\", nil},\n+ {\"cubic\", nil},\n+ {\"blahblah\", tcpip.ErrNoSuchFile},\n+ }\n+\n+ for _, connected := range []bool{false, true} {\n+ for _, tc := range testCases {\n+ t.Run(fmt.Sprintf(\"SetSockOpt(.., %v) w/ connected = %v\", tc.cc, connected), func(t *testing.T) {\n+ c := context.New(t, 1500)\n+ defer c.Cleanup()\n+\n+ // Create TCP endpoint.\n+ var err *tcpip.Error\n+ c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)\n+ if err != nil {\n+ t.Fatalf(\"NewEndpoint failed: %v\", err)\n+ }\n+\n+ var oldCC tcpip.CongestionControlOption\n+ if err := c.EP.GetSockOpt(&oldCC); err != nil {\n+ t.Fatalf(\"c.EP.SockOpt(%v) = %v\", &oldCC, err)\n+ }\n+\n+ if connected {\n+ c.Connect(789 /* iss */, 32768 /* rcvWnd */, nil)\n+ }\n+\n+ if err := c.EP.SetSockOpt(tc.cc); err != tc.err {\n+ t.Fatalf(\"c.EP.SetSockOpt(%v) = %v, want %v\", tc.cc, err, tc.err)\n+ }\n+\n+ var cc tcpip.CongestionControlOption\n+ if err := c.EP.GetSockOpt(&cc); err != nil {\n+ t.Fatalf(\"c.EP.SockOpt(%v) = %v\", &cc, err)\n+ }\n+\n+ got, want := cc, oldCC\n+ // If SetSockOpt is expected to succeed then the\n+ // returned value for congestion control should match\n+ // the one specified in the SetSockOpt above, else it\n+ // should be what it was before the call to SetSockOpt.\n+ if tc.err == nil {\n+ want = tc.cc\n+ }\n+ if got != want {\n+ t.Fatalf(\"got congestion control: %v, want: %v\", got, want)\n+ }\n+ })\n+ }\n}\n}\nfunc enableCUBIC(t *testing.T, c *context.Context) {\nt.Helper()\n- opt := tcp.CongestionControlOption(\"cubic\")\n+ opt := tcpip.CongestionControlOption(\"cubic\")\nif err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, opt); err != nil {\nt.Fatalf(\"c.s.SetTransportProtocolOption(tcp.ProtocolNumber, %v = %v\", opt, err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/testing/context/context.go",
"new_path": "pkg/tcpip/transport/tcp/testing/context/context.go",
"diff": "@@ -520,35 +520,21 @@ func (c *Context) CreateConnected(iss seqnum.Value, rcvWnd seqnum.Size, epRcvBuf\nc.CreateConnectedWithRawOptions(iss, rcvWnd, epRcvBuf, nil)\n}\n-// CreateConnectedWithRawOptions creates a connected TCP endpoint and sends\n-// the specified option bytes as the Option field in the initial SYN packet.\n+// Connect performs the 3-way handshake for c.EP with the provided Initial\n+// Sequence Number (iss) and receive window(rcvWnd) and any options if\n+// specified.\n//\n// It also sets the receive buffer for the endpoint to the specified\n// value in epRcvBuf.\n-func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum.Size, epRcvBuf *tcpip.ReceiveBufferSizeOption, options []byte) {\n- // Create TCP endpoint.\n- var err *tcpip.Error\n- c.EP, err = c.s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)\n- if err != nil {\n- c.t.Fatalf(\"NewEndpoint failed: %v\", err)\n- }\n- if got, want := tcp.EndpointState(c.EP.State()), tcp.StateInitial; got != want {\n- c.t.Errorf(\"Unexpected endpoint state: want %v, got %v\", want, got)\n- }\n-\n- if epRcvBuf != nil {\n- if err := c.EP.SetSockOpt(*epRcvBuf); err != nil {\n- c.t.Fatalf(\"SetSockOpt failed failed: %v\", err)\n- }\n- }\n-\n+//\n+// PreCondition: c.EP must already be created.\n+func (c *Context) Connect(iss seqnum.Value, rcvWnd seqnum.Size, options []byte) {\n// Start connection attempt.\nwaitEntry, notifyCh := waiter.NewChannelEntry(nil)\nc.WQ.EventRegister(&waitEntry, waiter.EventOut)\ndefer c.WQ.EventUnregister(&waitEntry)\n- err = c.EP.Connect(tcpip.FullAddress{Addr: TestAddr, Port: TestPort})\n- if err != tcpip.ErrConnectStarted {\n+ if err := c.EP.Connect(tcpip.FullAddress{Addr: TestAddr, Port: TestPort}); err != tcpip.ErrConnectStarted {\nc.t.Fatalf(\"Unexpected return value from Connect: %v\", err)\n}\n@@ -590,8 +576,7 @@ func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum.\n// Wait for connection to be established.\nselect {\ncase <-notifyCh:\n- err = c.EP.GetSockOpt(tcpip.ErrorOption{})\n- if err != nil {\n+ if err := c.EP.GetSockOpt(tcpip.ErrorOption{}); err != nil {\nc.t.Fatalf(\"Unexpected error when connecting: %v\", err)\n}\ncase <-time.After(1 * time.Second):\n@@ -604,6 +589,27 @@ func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum.\nc.Port = tcpHdr.SourcePort()\n}\n+// CreateConnectedWithRawOptions creates a connected TCP endpoint and sends\n+// the specified option bytes as the Option field in the initial SYN packet.\n+//\n+// It also sets the receive buffer for the endpoint to the specified\n+// value in epRcvBuf.\n+func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum.Size, epRcvBuf *tcpip.ReceiveBufferSizeOption, options []byte) {\n+ // Create TCP endpoint.\n+ var err *tcpip.Error\n+ c.EP, err = c.s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ)\n+ if err != nil {\n+ c.t.Fatalf(\"NewEndpoint failed: %v\", err)\n+ }\n+\n+ if epRcvBuf != nil {\n+ if err := c.EP.SetSockOpt(*epRcvBuf); err != nil {\n+ c.t.Fatalf(\"SetSockOpt failed failed: %v\", err)\n+ }\n+ }\n+ c.Connect(iss, rcvWnd, options)\n+}\n+\n// RawEndpoint is just a small wrapper around a TCP endpoint's state to make\n// sending data and ACK packets easy while being able to manipulate the sequence\n// numbers and timestamp values as needed.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_ip_tcp_generic.cc",
"new_path": "test/syscalls/linux/socket_ip_tcp_generic.cc",
"diff": "@@ -592,5 +592,109 @@ TEST_P(TCPSocketPairTest, MsgTruncMsgPeek) {\nEXPECT_EQ(0, memcmp(received_data2, sent_data, sizeof(sent_data)));\n}\n+TEST_P(TCPSocketPairTest, SetCongestionControlSucceedsForSupported) {\n+ // This is Linux's net/tcp.h TCP_CA_NAME_MAX.\n+ const int kTcpCaNameMax = 16;\n+\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+ // Netstack only supports reno & cubic so we only test these two values here.\n+ {\n+ const char kSetCC[kTcpCaNameMax] = \"reno\";\n+ ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n+ &kSetCC, strlen(kSetCC)),\n+ SyscallSucceedsWithValue(0));\n+\n+ char got_cc[kTcpCaNameMax];\n+ memset(got_cc, '1', sizeof(got_cc));\n+ socklen_t optlen = sizeof(got_cc);\n+ ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n+ &got_cc, &optlen),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kSetCC)));\n+ }\n+ {\n+ const char kSetCC[kTcpCaNameMax] = \"cubic\";\n+ ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n+ &kSetCC, strlen(kSetCC)),\n+ SyscallSucceedsWithValue(0));\n+\n+ char got_cc[kTcpCaNameMax];\n+ memset(got_cc, '1', sizeof(got_cc));\n+ socklen_t optlen = sizeof(got_cc);\n+ ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n+ &got_cc, &optlen),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kSetCC)));\n+ }\n+}\n+\n+TEST_P(TCPSocketPairTest, SetGetTCPCongestionShortReadBuffer) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+ {\n+ // Verify that getsockopt/setsockopt work with buffers smaller than\n+ // kTcpCaNameMax.\n+ const char kSetCC[] = \"cubic\";\n+ ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n+ &kSetCC, strlen(kSetCC)),\n+ SyscallSucceedsWithValue(0));\n+\n+ char got_cc[sizeof(kSetCC)];\n+ socklen_t optlen = sizeof(got_cc);\n+ ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n+ &got_cc, &optlen),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(got_cc)));\n+ }\n+}\n+\n+TEST_P(TCPSocketPairTest, SetGetTCPCongestionLargeReadBuffer) {\n+ // This is Linux's net/tcp.h TCP_CA_NAME_MAX.\n+ const int kTcpCaNameMax = 16;\n+\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+ {\n+ // Verify that getsockopt works with buffers larger than\n+ // kTcpCaNameMax.\n+ const char kSetCC[] = \"cubic\";\n+ ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n+ &kSetCC, strlen(kSetCC)),\n+ SyscallSucceedsWithValue(0));\n+\n+ char got_cc[kTcpCaNameMax + 5];\n+ socklen_t optlen = sizeof(got_cc);\n+ ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n+ &got_cc, &optlen),\n+ SyscallSucceedsWithValue(0));\n+ // Linux copies the minimum of kTcpCaNameMax or the length of the passed in\n+ // buffer and sets optlen to the number of bytes actually copied\n+ // irrespective of the actual length of the congestion control name.\n+ EXPECT_EQ(kTcpCaNameMax, optlen);\n+ EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kSetCC)));\n+ }\n+}\n+\n+TEST_P(TCPSocketPairTest, SetCongestionControlFailsForUnsupported) {\n+ // This is Linux's net/tcp.h TCP_CA_NAME_MAX.\n+ const int kTcpCaNameMax = 16;\n+\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+ char old_cc[kTcpCaNameMax];\n+ socklen_t optlen;\n+ ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n+ &old_cc, &optlen),\n+ SyscallSucceedsWithValue(0));\n+\n+ const char kSetCC[] = \"invalid_ca_cc\";\n+ ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n+ &kSetCC, strlen(kSetCC)),\n+ SyscallFailsWithErrno(ENOENT));\n+\n+ char got_cc[kTcpCaNameMax];\n+ ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n+ &got_cc, &optlen),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_EQ(0, memcmp(got_cc, old_cc, sizeof(old_cc)));\n+}\n+\n} // namespace testing\n} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/tcp_socket.cc",
"new_path": "test/syscalls/linux/tcp_socket.cc",
"diff": "@@ -751,6 +751,133 @@ TEST_P(SimpleTcpSocketTest, NonBlockingConnectRefused) {\nEXPECT_THAT(close(s.release()), SyscallSucceeds());\n}\n+// Test that setting a supported congestion control algorithm succeeds for an\n+// unconnected TCP socket\n+TEST_P(SimpleTcpSocketTest, SetCongestionControlSucceedsForSupported) {\n+ // This is Linux's net/tcp.h TCP_CA_NAME_MAX.\n+ const int kTcpCaNameMax = 16;\n+\n+ FileDescriptor s =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));\n+ {\n+ const char kSetCC[kTcpCaNameMax] = \"reno\";\n+ ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &kSetCC,\n+ strlen(kSetCC)),\n+ SyscallSucceedsWithValue(0));\n+\n+ char got_cc[kTcpCaNameMax];\n+ memset(got_cc, '1', sizeof(got_cc));\n+ socklen_t optlen = sizeof(got_cc);\n+ ASSERT_THAT(\n+ getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen),\n+ SyscallSucceedsWithValue(0));\n+ // We ignore optlen here as the linux kernel sets optlen to the lower of the\n+ // size of the buffer passed in or kTcpCaNameMax and not the length of the\n+ // congestion control algorithm's actual name.\n+ EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kTcpCaNameMax)));\n+ }\n+ {\n+ const char kSetCC[kTcpCaNameMax] = \"cubic\";\n+ ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &kSetCC,\n+ strlen(kSetCC)),\n+ SyscallSucceedsWithValue(0));\n+\n+ char got_cc[kTcpCaNameMax];\n+ memset(got_cc, '1', sizeof(got_cc));\n+ socklen_t optlen = sizeof(got_cc);\n+ ASSERT_THAT(\n+ getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen),\n+ SyscallSucceedsWithValue(0));\n+ // We ignore optlen here as the linux kernel sets optlen to the lower of the\n+ // size of the buffer passed in or kTcpCaNameMax and not the length of the\n+ // congestion control algorithm's actual name.\n+ EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kTcpCaNameMax)));\n+ }\n+}\n+\n+// This test verifies that a getsockopt(...TCP_CONGESTION) behaviour is\n+// consistent between linux and gvisor when the passed in buffer is smaller than\n+// kTcpCaNameMax.\n+TEST_P(SimpleTcpSocketTest, SetGetTCPCongestionShortReadBuffer) {\n+ FileDescriptor s =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));\n+ {\n+ // Verify that getsockopt/setsockopt work with buffers smaller than\n+ // kTcpCaNameMax.\n+ const char kSetCC[] = \"cubic\";\n+ ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &kSetCC,\n+ strlen(kSetCC)),\n+ SyscallSucceedsWithValue(0));\n+\n+ char got_cc[sizeof(kSetCC)];\n+ socklen_t optlen = sizeof(got_cc);\n+ ASSERT_THAT(\n+ getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_EQ(sizeof(got_cc), optlen);\n+ EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(got_cc)));\n+ }\n+}\n+\n+// This test verifies that a getsockopt(...TCP_CONGESTION) behaviour is\n+// consistent between linux and gvisor when the passed in buffer is larger than\n+// kTcpCaNameMax.\n+TEST_P(SimpleTcpSocketTest, SetGetTCPCongestionLargeReadBuffer) {\n+ // This is Linux's net/tcp.h TCP_CA_NAME_MAX.\n+ const int kTcpCaNameMax = 16;\n+\n+ FileDescriptor s =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));\n+ {\n+ // Verify that getsockopt works with buffers larger than\n+ // kTcpCaNameMax.\n+ const char kSetCC[] = \"cubic\";\n+ ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &kSetCC,\n+ strlen(kSetCC)),\n+ SyscallSucceedsWithValue(0));\n+\n+ char got_cc[kTcpCaNameMax + 5];\n+ socklen_t optlen = sizeof(got_cc);\n+ ASSERT_THAT(\n+ getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen),\n+ SyscallSucceedsWithValue(0));\n+ // Linux copies the minimum of kTcpCaNameMax or the length of the passed in\n+ // buffer and sets optlen to the number of bytes actually copied\n+ // irrespective of the actual length of the congestion control name.\n+ EXPECT_EQ(kTcpCaNameMax, optlen);\n+ EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kSetCC)));\n+ }\n+}\n+\n+// Test that setting an unsupported congestion control algorithm fails for an\n+// unconnected TCP socket.\n+TEST_P(SimpleTcpSocketTest, SetCongestionControlFailsForUnsupported) {\n+ // This is Linux's net/tcp.h TCP_CA_NAME_MAX.\n+ const int kTcpCaNameMax = 16;\n+\n+ FileDescriptor s =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));\n+ char old_cc[kTcpCaNameMax];\n+ socklen_t optlen = sizeof(old_cc);\n+ ASSERT_THAT(\n+ getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &old_cc, &optlen),\n+ SyscallSucceedsWithValue(0));\n+\n+ const char kSetCC[] = \"invalid_ca_kSetCC\";\n+ ASSERT_THAT(\n+ setsockopt(s.get(), SOL_TCP, TCP_CONGESTION, &kSetCC, strlen(kSetCC)),\n+ SyscallFailsWithErrno(ENOENT));\n+\n+ char got_cc[kTcpCaNameMax];\n+ ASSERT_THAT(\n+ getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen),\n+ SyscallSucceedsWithValue(0));\n+ // We ignore optlen here as the linux kernel sets optlen to the lower of the\n+ // size of the buffer passed in or kTcpCaNameMax and not the length of the\n+ // congestion control algorithm's actual name.\n+ EXPECT_EQ(0, memcmp(got_cc, old_cc, sizeof(kTcpCaNameMax)));\n+}\n+\nINSTANTIATE_TEST_SUITE_P(AllInetTests, SimpleTcpSocketTest,\n::testing::Values(AF_INET, AF_INET6));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add support for TCP_CONGESTION socket option.
This CL also cleans up the error returned for setting congestion
control which was incorrectly returning EINVAL instead of ENOENT.
PiperOrigin-RevId: 252889093 |
259,858 | 12.06.2019 10:17:58 | 25,200 | c1636b150ab0041782634b3d5909ec818675e14d | Add /pr support for links.
This also updates the canonical package name to include a /gvisor
suffix. This is more flexible to allow for future packages or git hook
interception. | [
{
"change_type": "MODIFY",
"old_path": "cmd/gvisor-website/main.go",
"new_path": "cmd/gvisor-website/main.go",
"diff": "@@ -30,28 +30,34 @@ import (\n)\nvar redirects = map[string]string{\n- \"/change\": \"https://gvisor.googlesource.com/gvisor/\",\n- \"/cl\": \"https://gvisor-review.googlesource.com/\",\n+ \"/change\": \"https://github.com/google/gvisor\",\n\"/issue\": \"https://github.com/google/gvisor/issues\",\n\"/issue/new\": \"https://github.com/google/gvisor/issues/new\",\n+ \"/pr\": \"https://github.com/google/gvisor/pulls\",\n// Redirects to compatibility docs.\n\"/c\": \"/docs/user_guide/compatibility\",\n\"/c/linux/amd64\": \"/docs/user_guide/compatibility/amd64\",\n+\n+ // Deprecated, but links continue to work.\n+ \"/cl\": \"https://gvisor-review.googlesource.com\",\n}\nvar prefixHelpers = map[string]string{\n- \"cl\": \"https://gvisor-review.googlesource.com/c/gvisor/+/%s\",\n- \"change\": \"https://gvisor.googlesource.com/gvisor/+/%s\",\n+ \"change\": \"https://github.com/google/gvisor/commit/%s\",\n\"issue\": \"https://github.com/google/gvisor/issues/%s\",\n+ \"pr\": \"https://github.com/google/gvisor/pull/%s\",\n// Redirects to compatibility docs.\n\"c/linux/amd64\": \"/docs/user_guide/compatibility/amd64/#%s\",\n+\n+ // Deprecated, but links continue to work.\n+ \"cl\": \"https://gvisor-review.googlesource.com/c/gvisor/+/%s\",\n}\nvar (\nvalidId = regexp.MustCompile(`^[A-Za-z0-9-]*/?$`)\n- goGetHeader = `<meta name=\"go-import\" content=\"gvisor.dev git https://github.com/google/gvisor\">`\n+ goGetHeader = `<meta name=\"go-import\" content=\"gvisor.dev/gvisor git https://github.com/google/gvisor\">`\ngoGetHTML5 = `<!doctype html><html><head><meta charset=utf-8>` + goGetHeader + `<title>Go-get</title></head><body></html>`\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add /pr support for links.
This also updates the canonical package name to include a /gvisor
suffix. This is more flexible to allow for future packages or git hook
interception. |
259,858 | 12.06.2019 15:58:32 | 25,200 | e352f464780b22945308469de4e92a4f068b4805 | Minor BUILD file cleanup. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/muxed/BUILD",
"new_path": "pkg/tcpip/link/muxed/BUILD",
"diff": "load(\"//tools/go_stateify:defs.bzl\", \"go_library\", \"go_test\")\n-package(\n- licenses = [\"notice\"], # Apache 2.0\n-)\n+package(licenses = [\"notice\"])\ngo_library(\nname = \"muxed\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/raw/BUILD",
"new_path": "pkg/tcpip/transport/raw/BUILD",
"diff": "-package(\n- licenses = [\"notice\"], # Apache 2.0\n-)\n+package(licenses = [\"notice\"])\nload(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Minor BUILD file cleanup.
PiperOrigin-RevId: 252918338 |
259,962 | 13.06.2019 13:40:37 | 25,200 | 9f77b36fa100761eb3eabbb87f5111419202a9d5 | Set optlen correctly when calling getsockopt. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_ip_tcp_generic.cc",
"new_path": "test/syscalls/linux/socket_ip_tcp_generic.cc",
"diff": "@@ -679,7 +679,7 @@ TEST_P(TCPSocketPairTest, SetCongestionControlFailsForUnsupported) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nchar old_cc[kTcpCaNameMax];\n- socklen_t optlen;\n+ socklen_t optlen = sizeof(old_cc);\nASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n&old_cc, &optlen),\nSyscallSucceedsWithValue(0));\n@@ -690,6 +690,7 @@ TEST_P(TCPSocketPairTest, SetCongestionControlFailsForUnsupported) {\nSyscallFailsWithErrno(ENOENT));\nchar got_cc[kTcpCaNameMax];\n+ optlen = sizeof(got_cc);\nASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION,\n&got_cc, &optlen),\nSyscallSucceedsWithValue(0));\n"
}
] | Go | Apache License 2.0 | google/gvisor | Set optlen correctly when calling getsockopt.
PiperOrigin-RevId: 253096085 |
259,970 | 13.06.2019 14:26:26 | 25,200 | b915a25597a961311ceb57f89d18eaee9c9461d8 | Fix use of "2 ^ 30".
2 ^ 30 is 28, not | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -259,7 +259,7 @@ func New(args Args) (*Loader, error) {\n// Adjust the total memory returned by the Sentry so that applications that\n// use /proc/meminfo can make allocations based on this limit.\nusage.MinimumTotalMemoryBytes = args.TotalMem\n- log.Infof(\"Setting total memory to %.2f GB\", float64(args.TotalMem)/(2^30))\n+ log.Infof(\"Setting total memory to %.2f GB\", float64(args.TotalMem)/(1 << 30))\n}\n// Initiate the Kernel object, which is required by the Context passed\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix use of "2 ^ 30".
2 ^ 30 is 28, not 1073741824. |
259,884 | 13.06.2019 15:44:17 | 25,200 | 4fdd560b76dfe4e3df83a8cba5a070ce7142b433 | Set the HOME environment variable (fixes
runsc will now set the HOME environment variable as required by POSIX. The
user's home directory is retrieved from the /etc/passwd file located on the
container's file system during boot. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/BUILD",
"new_path": "runsc/boot/BUILD",
"diff": "@@ -18,6 +18,7 @@ go_library(\n\"network.go\",\n\"pprof.go\",\n\"strace.go\",\n+ \"user.go\",\n],\nimportpath = \"gvisor.googlesource.com/gvisor/runsc/boot\",\nvisibility = [\n@@ -69,6 +70,7 @@ go_library(\n\"//pkg/sentry/time\",\n\"//pkg/sentry/unimpl:unimplemented_syscall_go_proto\",\n\"//pkg/sentry/usage\",\n+ \"//pkg/sentry/usermem\",\n\"//pkg/sentry/watchdog\",\n\"//pkg/syserror\",\n\"//pkg/tcpip\",\n@@ -97,6 +99,7 @@ go_test(\n\"compat_test.go\",\n\"fs_test.go\",\n\"loader_test.go\",\n+ \"user_test.go\",\n],\nembed = [\":boot\"],\ndeps = [\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -20,6 +20,7 @@ import (\nmrand \"math/rand\"\n\"os\"\n\"runtime\"\n+ \"strings\"\n\"sync\"\n\"sync/atomic\"\n\"syscall\"\n@@ -534,6 +535,24 @@ func (l *Loader) run() error {\nreturn err\n}\n+ // Read /etc/passwd for the user's HOME directory and set the HOME\n+ // environment variable as required by POSIX if it is not overridden by\n+ // the user.\n+ hasHomeEnvv := false\n+ for _, envv := range l.rootProcArgs.Envv {\n+ if strings.HasPrefix(envv, \"HOME=\") {\n+ hasHomeEnvv = true\n+ }\n+ }\n+ if !hasHomeEnvv {\n+ homeDir, err := getExecUserHome(rootCtx, rootMns, uint32(l.rootProcArgs.Credentials.RealKUID))\n+ if err != nil {\n+ return fmt.Errorf(\"error reading exec user: %v\", err)\n+ }\n+\n+ l.rootProcArgs.Envv = append(l.rootProcArgs.Envv, \"HOME=\"+homeDir)\n+ }\n+\n// Create the root container init task. It will begin running\n// when the kernel is started.\nif _, _, err := l.k.CreateProcess(l.rootProcArgs); err != nil {\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/boot/user_test.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package boot\n+\n+import (\n+ \"io/ioutil\"\n+ \"os\"\n+ \"path/filepath\"\n+ \"strings\"\n+ \"syscall\"\n+ \"testing\"\n+\n+ specs \"github.com/opencontainers/runtime-spec/specs-go\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n+)\n+\n+func setupTempDir() (string, error) {\n+ tmpDir, err := ioutil.TempDir(os.TempDir(), \"exec-user-test\")\n+ if err != nil {\n+ return \"\", err\n+ }\n+ return tmpDir, nil\n+}\n+\n+func setupPasswd(contents string, perms os.FileMode) func() (string, error) {\n+ return func() (string, error) {\n+ tmpDir, err := setupTempDir()\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ if err := os.Mkdir(filepath.Join(tmpDir, \"etc\"), 0777); err != nil {\n+ return \"\", err\n+ }\n+\n+ f, err := os.Create(filepath.Join(tmpDir, \"etc\", \"passwd\"))\n+ if err != nil {\n+ return \"\", err\n+ }\n+ defer f.Close()\n+\n+ _, err = f.WriteString(contents)\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ err = f.Chmod(perms)\n+ if err != nil {\n+ return \"\", err\n+ }\n+ return tmpDir, nil\n+ }\n+}\n+\n+// TestGetExecUserHome tests the getExecUserHome function.\n+func TestGetExecUserHome(t *testing.T) {\n+ tests := map[string]struct {\n+ uid uint32\n+ createRoot func() (string, error)\n+ expected string\n+ }{\n+ \"success\": {\n+ uid: 1000,\n+ createRoot: setupPasswd(\"adin::1000:1111::/home/adin:/bin/sh\", 0666),\n+ expected: \"/home/adin\",\n+ },\n+ \"no_passwd\": {\n+ uid: 1000,\n+ createRoot: setupTempDir,\n+ expected: \"/\",\n+ },\n+ \"no_perms\": {\n+ uid: 1000,\n+ createRoot: setupPasswd(\"adin::1000:1111::/home/adin:/bin/sh\", 0000),\n+ expected: \"/\",\n+ },\n+ \"directory\": {\n+ uid: 1000,\n+ createRoot: func() (string, error) {\n+ tmpDir, err := setupTempDir()\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ if err := os.Mkdir(filepath.Join(tmpDir, \"etc\"), 0777); err != nil {\n+ return \"\", err\n+ }\n+\n+ if err := syscall.Mkdir(filepath.Join(tmpDir, \"etc\", \"passwd\"), 0666); err != nil {\n+ return \"\", err\n+ }\n+\n+ return tmpDir, nil\n+ },\n+ expected: \"/\",\n+ },\n+ // Currently we don't allow named pipes.\n+ \"named_pipe\": {\n+ uid: 1000,\n+ createRoot: func() (string, error) {\n+ tmpDir, err := setupTempDir()\n+ if err != nil {\n+ return \"\", err\n+ }\n+\n+ if err := os.Mkdir(filepath.Join(tmpDir, \"etc\"), 0777); err != nil {\n+ return \"\", err\n+ }\n+\n+ if err := syscall.Mkfifo(filepath.Join(tmpDir, \"etc\", \"passwd\"), 0666); err != nil {\n+ return \"\", err\n+ }\n+\n+ return tmpDir, nil\n+ },\n+ expected: \"/\",\n+ },\n+ }\n+\n+ for name, tc := range tests {\n+ t.Run(name, func(t *testing.T) {\n+ tmpDir, err := tc.createRoot()\n+ if err != nil {\n+ t.Fatalf(\"failed to create root dir: %v\", err)\n+ }\n+\n+ sandEnd, cleanup, err := startGofer(tmpDir)\n+ if err != nil {\n+ t.Fatalf(\"failed to create gofer: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ ctx := contexttest.Context(t)\n+ conf := &Config{\n+ RootDir: \"unused_root_dir\",\n+ Network: NetworkNone,\n+ DisableSeccomp: true,\n+ }\n+\n+ spec := &specs.Spec{\n+ Root: &specs.Root{\n+ Path: tmpDir,\n+ Readonly: true,\n+ },\n+ // Add /proc mount as tmpfs to avoid needing a kernel.\n+ Mounts: []specs.Mount{\n+ {\n+ Destination: \"/proc\",\n+ Type: \"tmpfs\",\n+ },\n+ },\n+ }\n+\n+ var mns *fs.MountNamespace\n+ setMountNS := func(m *fs.MountNamespace) {\n+ mns = m\n+ ctx.(*contexttest.TestContext).RegisterValue(fs.CtxRoot, mns.Root())\n+ }\n+ mntr := newContainerMounter(spec, \"\", []int{sandEnd}, nil, &podMountHints{})\n+ if err := mntr.setupRootContainer(ctx, ctx, conf, setMountNS); err != nil {\n+ t.Fatalf(\"failed to create mount namespace: %v\", err)\n+ }\n+\n+ got, err := getExecUserHome(ctx, mns, tc.uid)\n+ if err != nil {\n+ t.Fatalf(\"failed to get user home: %v\", err)\n+ }\n+\n+ if got != tc.expected {\n+ t.Fatalf(\"expected %v, got: %v\", tc.expected, got)\n+ }\n+ })\n+ }\n+}\n+\n+// TestFindHomeInPasswd tests the findHomeInPasswd function's passwd file parsing.\n+func TestFindHomeInPasswd(t *testing.T) {\n+ tests := map[string]struct {\n+ uid uint32\n+ passwd string\n+ expected string\n+ def string\n+ }{\n+ \"empty\": {\n+ uid: 1000,\n+ passwd: \"\",\n+ expected: \"/\",\n+ def: \"/\",\n+ },\n+ \"whitespace\": {\n+ uid: 1000,\n+ passwd: \" \",\n+ expected: \"/\",\n+ def: \"/\",\n+ },\n+ \"full\": {\n+ uid: 1000,\n+ passwd: \"adin::1000:1111::/home/adin:/bin/sh\",\n+ expected: \"/home/adin\",\n+ def: \"/\",\n+ },\n+ // For better or worse, this is how runc works.\n+ \"partial\": {\n+ uid: 1000,\n+ passwd: \"adin::1000:1111:\",\n+ expected: \"\",\n+ def: \"/\",\n+ },\n+ \"multiple\": {\n+ uid: 1001,\n+ passwd: \"adin::1000:1111::/home/adin:/bin/sh\\nian::1001:1111::/home/ian:/bin/sh\",\n+ expected: \"/home/ian\",\n+ def: \"/\",\n+ },\n+ \"duplicate\": {\n+ uid: 1000,\n+ passwd: \"adin::1000:1111::/home/adin:/bin/sh\\nian::1000:1111::/home/ian:/bin/sh\",\n+ expected: \"/home/adin\",\n+ def: \"/\",\n+ },\n+ \"empty_lines\": {\n+ uid: 1001,\n+ passwd: \"adin::1000:1111::/home/adin:/bin/sh\\n\\n\\nian::1001:1111::/home/ian:/bin/sh\",\n+ expected: \"/home/ian\",\n+ def: \"/\",\n+ },\n+ }\n+\n+ for name, tc := range tests {\n+ t.Run(name, func(t *testing.T) {\n+ got, err := findHomeInPasswd(tc.uid, strings.NewReader(tc.passwd), tc.def)\n+ if err != nil {\n+ t.Fatalf(\"error parsing passwd: %v\", err)\n+ }\n+ if tc.expected != got {\n+ t.Fatalf(\"expected %v, got: %v\", tc.expected, got)\n+ }\n+ })\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Set the HOME environment variable (fixes #293)
runsc will now set the HOME environment variable as required by POSIX. The
user's home directory is retrieved from the /etc/passwd file located on the
container's file system during boot.
PiperOrigin-RevId: 253120627 |
259,885 | 13.06.2019 15:52:49 | 25,200 | 0c8603084d9399fde250c68fe30a084749d937ff | Add p9 and unet benchmarks. | [
{
"change_type": "MODIFY",
"old_path": "pkg/p9/transport_test.go",
"new_path": "pkg/p9/transport_test.go",
"diff": "@@ -179,6 +179,51 @@ func TestSendClosed(t *testing.T) {\n}\n}\n+func BenchmarkSendRecv(b *testing.B) {\n+ server, client, err := unet.SocketPair(false)\n+ if err != nil {\n+ b.Fatalf(\"socketpair got err %v expected nil\", err)\n+ }\n+ defer server.Close()\n+ defer client.Close()\n+\n+ // Exchange Rflush messages since these contain no data and therefore incur\n+ // no additional marshaling overhead.\n+ go func() {\n+ for i := 0; i < b.N; i++ {\n+ tag, m, err := recv(server, maximumLength, msgRegistry.get)\n+ if err != nil {\n+ b.Fatalf(\"recv got err %v expected nil\", err)\n+ }\n+ if tag != Tag(1) {\n+ b.Fatalf(\"got tag %v expected 1\", tag)\n+ }\n+ if _, ok := m.(*Rflush); !ok {\n+ b.Fatalf(\"got message %T expected *Rflush\", m)\n+ }\n+ if err := send(server, Tag(2), &Rflush{}); err != nil {\n+ b.Fatalf(\"send got err %v expected nil\", err)\n+ }\n+ }\n+ }()\n+ b.ResetTimer()\n+ for i := 0; i < b.N; i++ {\n+ if err := send(client, Tag(1), &Rflush{}); err != nil {\n+ b.Fatalf(\"send got err %v expected nil\", err)\n+ }\n+ tag, m, err := recv(client, maximumLength, msgRegistry.get)\n+ if err != nil {\n+ b.Fatalf(\"recv got err %v expected nil\", err)\n+ }\n+ if tag != Tag(2) {\n+ b.Fatalf(\"got tag %v expected 2\", tag)\n+ }\n+ if _, ok := m.(*Rflush); !ok {\n+ b.Fatalf(\"got message %v expected *Rflush\", m)\n+ }\n+ }\n+}\n+\nfunc init() {\nmsgRegistry.register(MsgTypeBadDecode, func() message { return &badDecode{} })\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/unet/unet_test.go",
"new_path": "pkg/unet/unet_test.go",
"diff": "@@ -691,3 +691,45 @@ func TestControlMessage(t *testing.T) {\n}\n}\n}\n+\n+func benchmarkSendRecv(b *testing.B, packet bool) {\n+ server, client, err := SocketPair(packet)\n+ if err != nil {\n+ b.Fatalf(\"SocketPair: got %v, wanted nil\", err)\n+ }\n+ defer server.Close()\n+ defer client.Close()\n+ go func() {\n+ buf := make([]byte, 1)\n+ for i := 0; i < b.N; i++ {\n+ n, err := server.Read(buf)\n+ if n != 1 || err != nil {\n+ b.Fatalf(\"server.Read: got (%d, %v), wanted (1, nil)\", n, err)\n+ }\n+ n, err = server.Write(buf)\n+ if n != 1 || err != nil {\n+ b.Fatalf(\"server.Write: got (%d, %v), wanted (1, nil)\", n, err)\n+ }\n+ }\n+ }()\n+ buf := make([]byte, 1)\n+ b.ResetTimer()\n+ for i := 0; i < b.N; i++ {\n+ n, err := client.Write(buf)\n+ if n != 1 || err != nil {\n+ b.Fatalf(\"client.Write: got (%d, %v), wanted (1, nil)\", n, err)\n+ }\n+ n, err = client.Read(buf)\n+ if n != 1 || err != nil {\n+ b.Fatalf(\"client.Read: got (%d, %v), wanted (1, nil)\", n, err)\n+ }\n+ }\n+}\n+\n+func BenchmarkSendRecvStream(b *testing.B) {\n+ benchmarkSendRecv(b, false)\n+}\n+\n+func BenchmarkSendRecvPacket(b *testing.B) {\n+ benchmarkSendRecv(b, true)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add p9 and unet benchmarks.
PiperOrigin-RevId: 253122166 |
259,985 | 13.06.2019 17:23:35 | 25,200 | 05ff1ffaadaa0ac370365eb14febc761506735ce | Implement getsockopt() SO_DOMAIN, SO_PROTOCOL and SO_TYPE.
SO_TYPE was already implemented for everything but netlink sockets. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/epsocket.go",
"new_path": "pkg/sentry/socket/epsocket/epsocket.go",
"diff": "@@ -668,12 +668,6 @@ func GetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int,\nfunc getSockOptSocket(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int, skType linux.SockType, name, outLen int) (interface{}, *syserr.Error) {\n// TODO(b/124056281): Stop rejecting short optLen values in getsockopt.\nswitch name {\n- case linux.SO_TYPE:\n- if outLen < sizeOfInt32 {\n- return nil, syserr.ErrInvalidArgument\n- }\n- return int32(skType), nil\n-\ncase linux.SO_ERROR:\nif outLen < sizeOfInt32 {\nreturn nil, syserr.ErrInvalidArgument\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/provider.go",
"new_path": "pkg/sentry/socket/epsocket/provider.go",
"diff": "@@ -111,7 +111,7 @@ func (p *provider) Socket(t *kernel.Task, stype linux.SockType, protocol int) (*\nreturn nil, syserr.TranslateNetstackError(e)\n}\n- return New(t, p.family, stype, protocol, wq, ep)\n+ return New(t, p.family, stype, int(transProto), wq, ep)\n}\n// Pair just returns nil sockets (not supported).\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/socket.go",
"new_path": "pkg/sentry/socket/hostinet/socket.go",
"diff": "@@ -288,7 +288,7 @@ func (s *socketOperations) GetSockOpt(t *kernel.Task, level int, name int, outLe\n}\ncase syscall.SOL_SOCKET:\nswitch name {\n- case syscall.SO_ERROR, syscall.SO_KEEPALIVE, syscall.SO_SNDBUF, syscall.SO_RCVBUF, syscall.SO_REUSEADDR, syscall.SO_TYPE:\n+ case syscall.SO_ERROR, syscall.SO_KEEPALIVE, syscall.SO_SNDBUF, syscall.SO_RCVBUF, syscall.SO_REUSEADDR:\noptlen = sizeofInt32\ncase syscall.SO_LINGER:\noptlen = syscall.SizeofLinger\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/BUILD",
"new_path": "pkg/sentry/syscalls/linux/BUILD",
"diff": "@@ -86,6 +86,7 @@ go_library(\n\"//pkg/sentry/syscalls\",\n\"//pkg/sentry/usage\",\n\"//pkg/sentry/usermem\",\n+ \"//pkg/syserr\",\n\"//pkg/syserror\",\n\"//pkg/waiter\",\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_socket.go",
"new_path": "pkg/sentry/syscalls/linux/sys_socket.go",
"diff": "@@ -29,6 +29,7 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/socket/control\"\n\"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport\"\n\"gvisor.dev/gvisor/pkg/sentry/usermem\"\n+ \"gvisor.dev/gvisor/pkg/syserr\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n)\n@@ -61,6 +62,8 @@ const controlLenOffset = 40\n// to the Flags field.\nconst flagsOffset = 48\n+const sizeOfInt32 = 4\n+\n// messageHeader64Len is the length of a MessageHeader64 struct.\nvar messageHeader64Len = uint64(binary.Size(MessageHeader64{}))\n@@ -466,7 +469,7 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy\n}\n// Call syscall implementation then copy both value and value len out.\n- v, e := s.GetSockOpt(t, int(level), int(name), int(optLen))\n+ v, e := getSockOpt(t, s, int(level), int(name), int(optLen))\nif e != nil {\nreturn 0, nil, e.ToError()\n}\n@@ -487,6 +490,33 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy\nreturn 0, nil, nil\n}\n+// getSockOpt tries to handle common socket options, or dispatches to a specific\n+// socket implementation.\n+func getSockOpt(t *kernel.Task, s socket.Socket, level, name, len int) (interface{}, *syserr.Error) {\n+ if level == linux.SOL_SOCKET {\n+ switch name {\n+ case linux.SO_TYPE, linux.SO_DOMAIN, linux.SO_PROTOCOL:\n+ if len < sizeOfInt32 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+ }\n+\n+ switch name {\n+ case linux.SO_TYPE:\n+ _, skType, _ := s.Type()\n+ return int32(skType), nil\n+ case linux.SO_DOMAIN:\n+ family, _, _ := s.Type()\n+ return int32(family), nil\n+ case linux.SO_PROTOCOL:\n+ _, _, protocol := s.Type()\n+ return int32(protocol), nil\n+ }\n+ }\n+\n+ return s.GetSockOpt(t, level, name, len)\n+}\n+\n// SetSockOpt implements the linux syscall setsockopt(2).\n//\n// Note that unlike Linux, enabling SO_PASSCRED does not autobind the socket.\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -1909,6 +1909,7 @@ cc_library(\n\":unix_domain_socket_test_util\",\n\"//test/util:test_util\",\n\"@com_google_absl//absl/strings\",\n+ \"@com_google_absl//absl/strings:str_format\",\n\"@com_google_googletest//:gtest\",\n],\nalwayslink = 1,\n@@ -2427,6 +2428,7 @@ cc_binary(\n\"//test/util:file_descriptor\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n+ \"@com_google_absl//absl/strings:str_format\",\n\"@com_google_googletest//:gtest\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/ip_socket_test_util.cc",
"new_path": "test/syscalls/linux/ip_socket_test_util.cc",
"diff": "@@ -45,47 +45,53 @@ SocketPairKind IPv6TCPAcceptBindSocketPair(int type) {\nstd::string description =\nabsl::StrCat(DescribeSocketType(type), \"connected IPv6 TCP socket\");\nreturn SocketPairKind{\n- description, TCPAcceptBindSocketPairCreator(AF_INET6, type | SOCK_STREAM,\n- 0, /* dual_stack = */ false)};\n+ description, AF_INET6, type | SOCK_STREAM, IPPROTO_TCP,\n+ TCPAcceptBindSocketPairCreator(AF_INET6, type | SOCK_STREAM, 0,\n+ /* dual_stack = */ false)};\n}\nSocketPairKind IPv4TCPAcceptBindSocketPair(int type) {\nstd::string description =\nabsl::StrCat(DescribeSocketType(type), \"connected IPv4 TCP socket\");\nreturn SocketPairKind{\n- description, TCPAcceptBindSocketPairCreator(AF_INET, type | SOCK_STREAM,\n- 0, /* dual_stack = */ false)};\n+ description, AF_INET, type | SOCK_STREAM, IPPROTO_TCP,\n+ TCPAcceptBindSocketPairCreator(AF_INET, type | SOCK_STREAM, 0,\n+ /* dual_stack = */ false)};\n}\nSocketPairKind DualStackTCPAcceptBindSocketPair(int type) {\nstd::string description =\nabsl::StrCat(DescribeSocketType(type), \"connected dual stack TCP socket\");\nreturn SocketPairKind{\n- description, TCPAcceptBindSocketPairCreator(AF_INET6, type | SOCK_STREAM,\n- 0, /* dual_stack = */ true)};\n+ description, AF_INET6, type | SOCK_STREAM, IPPROTO_TCP,\n+ TCPAcceptBindSocketPairCreator(AF_INET6, type | SOCK_STREAM, 0,\n+ /* dual_stack = */ true)};\n}\nSocketPairKind IPv6UDPBidirectionalBindSocketPair(int type) {\nstd::string description =\nabsl::StrCat(DescribeSocketType(type), \"connected IPv6 UDP socket\");\n- return SocketPairKind{description, UDPBidirectionalBindSocketPairCreator(\n- AF_INET6, type | SOCK_DGRAM, 0,\n+ return SocketPairKind{\n+ description, AF_INET6, type | SOCK_DGRAM, IPPROTO_UDP,\n+ UDPBidirectionalBindSocketPairCreator(AF_INET6, type | SOCK_DGRAM, 0,\n/* dual_stack = */ false)};\n}\nSocketPairKind IPv4UDPBidirectionalBindSocketPair(int type) {\nstd::string description =\nabsl::StrCat(DescribeSocketType(type), \"connected IPv4 UDP socket\");\n- return SocketPairKind{description, UDPBidirectionalBindSocketPairCreator(\n- AF_INET, type | SOCK_DGRAM, 0,\n+ return SocketPairKind{\n+ description, AF_INET, type | SOCK_DGRAM, IPPROTO_UDP,\n+ UDPBidirectionalBindSocketPairCreator(AF_INET, type | SOCK_DGRAM, 0,\n/* dual_stack = */ false)};\n}\nSocketPairKind DualStackUDPBidirectionalBindSocketPair(int type) {\nstd::string description =\nabsl::StrCat(DescribeSocketType(type), \"connected dual stack UDP socket\");\n- return SocketPairKind{description, UDPBidirectionalBindSocketPairCreator(\n- AF_INET6, type | SOCK_DGRAM, 0,\n+ return SocketPairKind{\n+ description, AF_INET6, type | SOCK_DGRAM, IPPROTO_UDP,\n+ UDPBidirectionalBindSocketPairCreator(AF_INET6, type | SOCK_DGRAM, 0,\n/* dual_stack = */ true)};\n}\n@@ -93,22 +99,25 @@ SocketPairKind IPv4UDPUnboundSocketPair(int type) {\nstd::string description =\nabsl::StrCat(DescribeSocketType(type), \"IPv4 UDP socket\");\nreturn SocketPairKind{\n- description, UDPUnboundSocketPairCreator(AF_INET, type | SOCK_DGRAM, 0,\n+ description, AF_INET, type | SOCK_DGRAM, IPPROTO_UDP,\n+ UDPUnboundSocketPairCreator(AF_INET, type | SOCK_DGRAM, 0,\n/* dual_stack = */ false)};\n}\nSocketKind IPv4UDPUnboundSocket(int type) {\nstd::string description =\nabsl::StrCat(DescribeSocketType(type), \"IPv4 UDP socket\");\n- return SocketKind{description, UnboundSocketCreator(\n- AF_INET, type | SOCK_DGRAM, IPPROTO_UDP)};\n+ return SocketKind{\n+ description, AF_INET, type | SOCK_DGRAM, IPPROTO_UDP,\n+ UnboundSocketCreator(AF_INET, type | SOCK_DGRAM, IPPROTO_UDP)};\n}\nSocketKind IPv4TCPUnboundSocket(int type) {\nstd::string description =\nabsl::StrCat(DescribeSocketType(type), \"IPv4 TCP socket\");\n- return SocketKind{description, UnboundSocketCreator(\n- AF_INET, type | SOCK_STREAM, IPPROTO_TCP)};\n+ return SocketKind{\n+ description, AF_INET, type | SOCK_STREAM, IPPROTO_TCP,\n+ UnboundSocketCreator(AF_INET, type | SOCK_STREAM, IPPROTO_TCP)};\n}\n} // namespace testing\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_generic.cc",
"new_path": "test/syscalls/linux/socket_generic.cc",
"diff": "#include \"gtest/gtest.h\"\n#include \"gtest/gtest.h\"\n+#include \"absl/strings/str_format.h\"\n#include \"absl/strings/string_view.h\"\n#include \"test/syscalls/linux/socket_test_util.h\"\n#include \"test/syscalls/linux/unix_domain_socket_test_util.h\"\n@@ -687,5 +688,54 @@ TEST_P(AllSocketPairTest, RecvTimeoutWaitAll) {\nEXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));\n}\n+TEST_P(AllSocketPairTest, GetSockoptType) {\n+ int type = GetParam().type;\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+ for (const int fd : {sockets->first_fd(), sockets->second_fd()}) {\n+ int opt;\n+ socklen_t optlen = sizeof(opt);\n+ EXPECT_THAT(getsockopt(fd, SOL_SOCKET, SO_TYPE, &opt, &optlen),\n+ SyscallSucceeds());\n+\n+ // Type may have SOCK_NONBLOCK and SOCK_CLOEXEC ORed into it. Remove these\n+ // before comparison.\n+ type &= ~(SOCK_NONBLOCK | SOCK_CLOEXEC);\n+ EXPECT_EQ(opt, type) << absl::StrFormat(\n+ \"getsockopt(%d, SOL_SOCKET, SO_TYPE, &opt, &optlen) => opt=%d was \"\n+ \"unexpected\",\n+ fd, opt);\n+ }\n+}\n+\n+TEST_P(AllSocketPairTest, GetSockoptDomain) {\n+ const int domain = GetParam().domain;\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+ for (const int fd : {sockets->first_fd(), sockets->second_fd()}) {\n+ int opt;\n+ socklen_t optlen = sizeof(opt);\n+ EXPECT_THAT(getsockopt(fd, SOL_SOCKET, SO_DOMAIN, &opt, &optlen),\n+ SyscallSucceeds());\n+ EXPECT_EQ(opt, domain) << absl::StrFormat(\n+ \"getsockopt(%d, SOL_SOCKET, SO_DOMAIN, &opt, &optlen) => opt=%d was \"\n+ \"unexpected\",\n+ fd, opt);\n+ }\n+}\n+\n+TEST_P(AllSocketPairTest, GetSockoptProtocol) {\n+ const int protocol = GetParam().protocol;\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+ for (const int fd : {sockets->first_fd(), sockets->second_fd()}) {\n+ int opt;\n+ socklen_t optlen = sizeof(opt);\n+ EXPECT_THAT(getsockopt(fd, SOL_SOCKET, SO_PROTOCOL, &opt, &optlen),\n+ SyscallSucceeds());\n+ EXPECT_EQ(opt, protocol) << absl::StrFormat(\n+ \"getsockopt(%d, SOL_SOCKET, SO_PROTOCOL, &opt, &optlen) => opt=%d was \"\n+ \"unexpected\",\n+ fd, opt);\n+ }\n+}\n+\n} // namespace testing\n} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_netlink_route.cc",
"new_path": "test/syscalls/linux/socket_netlink_route.cc",
"diff": "#include <vector>\n#include \"gtest/gtest.h\"\n+#include \"absl/strings/str_format.h\"\n#include \"test/syscalls/linux/socket_netlink_util.h\"\n#include \"test/syscalls/linux/socket_test_util.h\"\n#include \"test/util/cleanup.h\"\n@@ -144,24 +145,56 @@ TEST(NetlinkRouteTest, GetPeerName) {\nEXPECT_EQ(addr.nl_pid, 0);\n}\n-using IntSockOptTest = ::testing::TestWithParam<int>;\n+// Parameters for GetSockOpt test. They are:\n+// 0: Socket option to query.\n+// 1: A predicate to run on the returned sockopt value. Should return true if\n+// the value is considered ok.\n+// 2: A description of what the sockopt value is expected to be. Should complete\n+// the sentence \"<value> was unexpected, expected <description>\"\n+using SockOptTest =\n+ ::testing::TestWithParam<std::tuple<int, std::function<bool(int)>, std::string>>;\n+\n+TEST_P(SockOptTest, GetSockOpt) {\n+ int sockopt = std::get<0>(GetParam());\n+ auto verifier = std::get<1>(GetParam());\n+ std::string verifier_description = std::get<2>(GetParam());\n-TEST_P(IntSockOptTest, GetSockOpt) {\nFileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(Socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE));\nint res;\nsocklen_t len = sizeof(res);\n- EXPECT_THAT(getsockopt(fd.get(), SOL_SOCKET, GetParam(), &res, &len),\n+ EXPECT_THAT(getsockopt(fd.get(), SOL_SOCKET, sockopt, &res, &len),\nSyscallSucceeds());\nEXPECT_EQ(len, sizeof(res));\n- EXPECT_GT(res, 0);\n+ EXPECT_TRUE(verifier(res)) << absl::StrFormat(\n+ \"getsockopt(%d, SOL_SOCKET, %d, &res, &len) => res=%d was unexpected, \"\n+ \"expected %s\",\n+ fd.get(), sockopt, res, verifier_description);\n+}\n+\n+std::function<bool(int)> IsPositive() {\n+ return [](int val) { return val > 0; };\n+}\n+\n+std::function<bool(int)> IsEqual(int target) {\n+ return [target](int val) { return val == target; };\n}\n-INSTANTIATE_TEST_SUITE_P(NetlinkRouteTest, IntSockOptTest,\n- ::testing::Values(SO_SNDBUF, SO_RCVBUF));\n+INSTANTIATE_TEST_SUITE_P(\n+ NetlinkRouteTest, SockOptTest,\n+ ::testing::Values(\n+ std::make_tuple(SO_SNDBUF, IsPositive(), \"positive send buffer size\"),\n+ std::make_tuple(SO_RCVBUF, IsPositive(),\n+ \"positive receive buffer size\"),\n+ std::make_tuple(SO_TYPE, IsEqual(SOCK_RAW),\n+ absl::StrFormat(\"SOCK_RAW (%d)\", SOCK_RAW)),\n+ std::make_tuple(SO_DOMAIN, IsEqual(AF_NETLINK),\n+ absl::StrFormat(\"AF_NETLINK (%d)\", AF_NETLINK)),\n+ std::make_tuple(SO_PROTOCOL, IsEqual(NETLINK_ROUTE),\n+ absl::StrFormat(\"NETLINK_ROUTE (%d)\", NETLINK_ROUTE))));\n// Validates the reponses to RTM_GETLINK + NLM_F_DUMP.\nvoid CheckGetLinkResponse(const struct nlmsghdr* hdr, int seq, int port) {\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_test_util.cc",
"new_path": "test/syscalls/linux/socket_test_util.cc",
"diff": "@@ -457,7 +457,8 @@ Creator<SocketPair> UDPUnboundSocketPairCreator(int domain, int type,\nSocketPairKind Reversed(SocketPairKind const& base) {\nauto const& creator = base.creator;\nreturn SocketPairKind{\n- absl::StrCat(\"reversed \", base.description),\n+ absl::StrCat(\"reversed \", base.description), base.domain, base.type,\n+ base.protocol,\n[creator]() -> PosixErrorOr<std::unique_ptr<ReversedSocketPair>> {\nASSIGN_OR_RETURN_ERRNO(auto creator_value, creator());\nreturn absl::make_unique<ReversedSocketPair>(std::move(creator_value));\n@@ -542,8 +543,8 @@ struct sockaddr_storage AddrFDSocketPair::to_storage(const sockaddr_in6& addr) {\nSocketKind SimpleSocket(int fam, int type, int proto) {\nreturn SocketKind{\n- absl::StrCat(\"Family \", fam, \", type \", type, \", proto \", proto),\n- SyscallSocketCreator(fam, type, proto)};\n+ absl::StrCat(\"Family \", fam, \", type \", type, \", proto \", proto), fam,\n+ type, proto, SyscallSocketCreator(fam, type, proto)};\n}\nssize_t SendLargeSendMsg(const std::unique_ptr<SocketPair>& sockets,\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/socket_test_util.h",
"new_path": "test/syscalls/linux/socket_test_util.h",
"diff": "@@ -287,6 +287,9 @@ Creator<FileDescriptor> UnboundSocketCreator(int domain, int type,\n// a function that creates such a socket pair.\nstruct SocketPairKind {\nstd::string description;\n+ int domain;\n+ int type;\n+ int protocol;\nCreator<SocketPair> creator;\n// Create creates a socket pair of this kind.\n@@ -297,6 +300,9 @@ struct SocketPairKind {\n// a function that creates such a socket.\nstruct SocketKind {\nstd::string description;\n+ int domain;\n+ int type;\n+ int protocol;\nCreator<FileDescriptor> creator;\n// Create creates a socket pair of this kind.\n@@ -353,6 +359,7 @@ Middleware SetSockOpt(int level, int optname, T* value) {\nreturn SocketPairKind{\nabsl::StrCat(\"setsockopt(\", level, \", \", optname, \", \", *value, \") \",\nbase.description),\n+ base.domain, base.type, base.protocol,\n[creator, level, optname,\nvalue]() -> PosixErrorOr<std::unique_ptr<SocketPair>> {\nASSIGN_OR_RETURN_ERRNO(auto creator_value, creator());\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/unix_domain_socket_test_util.cc",
"new_path": "test/syscalls/linux/unix_domain_socket_test_util.cc",
"diff": "@@ -47,7 +47,7 @@ std::string DescribeUnixDomainSocketType(int type) {\n}\nSocketPairKind UnixDomainSocketPair(int type) {\n- return SocketPairKind{DescribeUnixDomainSocketType(type),\n+ return SocketPairKind{DescribeUnixDomainSocketType(type), AF_UNIX, type, 0,\nSyscallSocketPairCreator(AF_UNIX, type, 0)};\n}\n@@ -56,11 +56,12 @@ SocketPairKind FilesystemBoundUnixDomainSocketPair(int type) {\n\" created with filesystem binding\");\nif ((type & SOCK_DGRAM) == SOCK_DGRAM) {\nreturn SocketPairKind{\n- description,\n+ description, AF_UNIX, type, 0,\nFilesystemBidirectionalBindSocketPairCreator(AF_UNIX, type, 0)};\n}\nreturn SocketPairKind{\n- description, FilesystemAcceptBindSocketPairCreator(AF_UNIX, type, 0)};\n+ description, AF_UNIX, type, 0,\n+ FilesystemAcceptBindSocketPairCreator(AF_UNIX, type, 0)};\n}\nSocketPairKind AbstractBoundUnixDomainSocketPair(int type) {\n@@ -68,17 +69,17 @@ SocketPairKind AbstractBoundUnixDomainSocketPair(int type) {\n\" created with abstract namespace binding\");\nif ((type & SOCK_DGRAM) == SOCK_DGRAM) {\nreturn SocketPairKind{\n- description,\n+ description, AF_UNIX, type, 0,\nAbstractBidirectionalBindSocketPairCreator(AF_UNIX, type, 0)};\n}\n- return SocketPairKind{description,\n+ return SocketPairKind{description, AF_UNIX, type, 0,\nAbstractAcceptBindSocketPairCreator(AF_UNIX, type, 0)};\n}\nSocketPairKind SocketpairGoferUnixDomainSocketPair(int type) {\nstd::string description = absl::StrCat(DescribeUnixDomainSocketType(type),\n\" created with the socketpair gofer\");\n- return SocketPairKind{description,\n+ return SocketPairKind{description, AF_UNIX, type, 0,\nSocketpairGoferSocketPairCreator(AF_UNIX, type, 0)};\n}\n@@ -87,13 +88,15 @@ SocketPairKind SocketpairGoferFileSocketPair(int type) {\nabsl::StrCat(((type & O_NONBLOCK) != 0) ? \"non-blocking \" : \"\",\n((type & O_CLOEXEC) != 0) ? \"close-on-exec \" : \"\",\n\"file socket created with the socketpair gofer\");\n- return SocketPairKind{description,\n+ // The socketpair gofer always creates SOCK_STREAM sockets on open(2).\n+ return SocketPairKind{description, AF_UNIX, SOCK_STREAM, 0,\nSocketpairGoferFileSocketPairCreator(type)};\n}\nSocketPairKind FilesystemUnboundUnixDomainSocketPair(int type) {\nreturn SocketPairKind{absl::StrCat(DescribeUnixDomainSocketType(type),\n\" unbound with a filesystem address\"),\n+ AF_UNIX, type, 0,\nFilesystemUnboundSocketPairCreator(AF_UNIX, type, 0)};\n}\n@@ -101,7 +104,7 @@ SocketPairKind AbstractUnboundUnixDomainSocketPair(int type) {\nreturn SocketPairKind{\nabsl::StrCat(DescribeUnixDomainSocketType(type),\n\" unbound with an abstract namespace address\"),\n- AbstractUnboundSocketPairCreator(AF_UNIX, type, 0)};\n+ AF_UNIX, type, 0, AbstractUnboundSocketPairCreator(AF_UNIX, type, 0)};\n}\nvoid SendSingleFD(int sock, int fd, char buf[], int buf_size) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement getsockopt() SO_DOMAIN, SO_PROTOCOL and SO_TYPE.
SO_TYPE was already implemented for everything but netlink sockets.
PiperOrigin-RevId: 253138157 |
259,962 | 14.06.2019 07:30:30 | 25,200 | a8608c501b23f89ac3df50cde7428cb3c4b56145 | Enable Receive Buffer Auto-Tuning for runsc.
Updates | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -847,9 +847,17 @@ func newEmptyNetworkStack(conf *Config, clock tcpip.Clock) (inet.Stack, error) {\n// privileges.\nRaw: true,\n})}\n+\n+ // Enable SACK Recovery.\nif err := s.Stack.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SACKEnabled(true)); err != nil {\nreturn nil, fmt.Errorf(\"failed to enable SACK: %v\", err)\n}\n+\n+ // Enable Receive Buffer Auto-Tuning.\n+ if err := s.Stack.SetTransportProtocolOption(tcp.ProtocolNumber, tcpip.ModerateReceiveBufferOption(true)); err != nil {\n+ return nil, fmt.Errorf(\"SetTransportProtocolOption failed: %v\", err)\n+ }\n+\nreturn &s, nil\ndefault:\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable Receive Buffer Auto-Tuning for runsc.
Updates #230
PiperOrigin-RevId: 253225078 |
259,853 | 17.06.2019 17:07:44 | 25,200 | 66cc0e9f928218ca642a41fa67bb163197aa1f37 | gvisor/bazel: use python2 to build runsc-debian
$ bazel build runsc:runsc-debian
File ".../bazel_tools/tools/build_defs/pkg/make_deb.py", line 311,
in GetFlagValue:
flagvalue = flagvalue.decode('utf-8')
AttributeError: 'str' object has no attribute 'decode'
make_deb.py is incompatible with Python3. | [
{
"change_type": "MODIFY",
"old_path": "Dockerfile",
"new_path": "Dockerfile",
"diff": "FROM ubuntu:bionic\n-RUN apt-get update && apt-get install -y curl gnupg2 git\n+RUN apt-get update && apt-get install -y curl gnupg2 git python3\nRUN echo \"deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8\" | tee /etc/apt/sources.list.d/bazel.list && \\\ncurl https://bazel.build/bazel-release.pub.gpg | apt-key add -\nRUN apt-get update && apt-get install -y bazel && apt-get clean\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/BUILD",
"new_path": "runsc/BUILD",
"diff": "@@ -96,6 +96,11 @@ pkg_deb(\nmaintainer = \"The gVisor Authors <[email protected]>\",\npackage = \"runsc\",\npostinst = \"debian/postinst.sh\",\n+ tags = [\n+ # TODO(b/135475885): pkg_deb requires python2:\n+ # https://github.com/bazelbuild/bazel/issues/8443\n+ \"manual\",\n+ ],\nversion_file = \":version.txt\",\nvisibility = [\n\"//visibility:public\",\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/run_tests.sh",
"new_path": "tools/run_tests.sh",
"diff": "@@ -92,6 +92,14 @@ build_everything() {\n\"${BUILD_PACKAGES[@]}\"\n}\n+build_runsc_debian() {\n+ cd ${WORKSPACE_DIR}\n+\n+ # TODO(b/135475885): pkg_deb is incompatible with Python3.\n+ # https://github.com/bazelbuild/bazel/issues/8443\n+ bazel build --host_force_python=py2 runsc:runsc-debian\n+}\n+\n# Run simple tests runs the tests that require no special setup or\n# configuration.\nrun_simple_tests() {\n@@ -277,6 +285,8 @@ main() {\nrun_syscall_tests\nrun_runsc_do_tests\n+ build_runsc_debian\n+\n# Build other flavors too.\nbuild_everything dbg\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor/bazel: use python2 to build runsc-debian
$ bazel build runsc:runsc-debian
File ".../bazel_tools/tools/build_defs/pkg/make_deb.py", line 311,
in GetFlagValue:
flagvalue = flagvalue.decode('utf-8')
AttributeError: 'str' object has no attribute 'decode'
make_deb.py is incompatible with Python3.
https://github.com/bazelbuild/bazel/issues/8443
PiperOrigin-RevId: 253691923 |
259,853 | 18.06.2019 01:40:12 | 25,200 | 3d1e44a677ecacbf81888211a93cef0a71c1c6c0 | gvisor/kokoro: don't modify tests names in the BUILD file | [
{
"change_type": "ADD",
"old_path": "runsc/test/BUILD",
"new_path": "runsc/test/BUILD",
"diff": ""
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/build_defs.bzl",
"diff": "+\"\"\"Defines a rule for runsc test targets.\"\"\"\n+\n+load(\"@io_bazel_rules_go//go:def.bzl\", _go_test = \"go_test\")\n+\n+# runtime_test is a macro that will create targets to run the given test target\n+# with different runtime options.\n+def runtime_test(**kwargs):\n+ \"\"\"Runs the given test target with different runtime options.\"\"\"\n+ name = kwargs[\"name\"]\n+ _go_test(**kwargs)\n+ kwargs[\"name\"] = name + \"_hostnet\"\n+ kwargs[\"args\"] = [\"--runtime-type=hostnet\"]\n+ _go_test(**kwargs)\n+ kwargs[\"name\"] = name + \"_kvm\"\n+ kwargs[\"args\"] = [\"--runtime-type=kvm\"]\n+ _go_test(**kwargs)\n+ kwargs[\"name\"] = name + \"_overlay\"\n+ kwargs[\"args\"] = [\"--runtime-type=overlay\"]\n+ _go_test(**kwargs)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/BUILD",
"new_path": "runsc/test/image/BUILD",
"diff": "-load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\", \"go_test\")\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\")\n+load(\"//runsc/test:build_defs.bzl\", \"runtime_test\")\npackage(licenses = [\"notice\"])\n-go_test(\n+runtime_test(\nname = \"image_test\",\nsize = \"large\",\nsrcs = [\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/integration/BUILD",
"new_path": "runsc/test/integration/BUILD",
"diff": "-load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\", \"go_test\")\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\")\n+load(\"//runsc/test:build_defs.bzl\", \"runtime_test\")\npackage(licenses = [\"notice\"])\n-go_test(\n+runtime_test(\nname = \"integration_test\",\nsize = \"large\",\nsrcs = [\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/docker.go",
"new_path": "runsc/test/testutil/docker.go",
"diff": "package testutil\nimport (\n+ \"flag\"\n\"fmt\"\n\"io/ioutil\"\n\"log\"\n@@ -30,10 +31,15 @@ import (\n\"github.com/kr/pty\"\n)\n+var runtimeType = flag.String(\"runtime-type\", \"\", \"specify which runtime to use: kvm, hostnet, overlay\")\n+\nfunc getRuntime() string {\nr, ok := os.LookupEnv(\"RUNSC_RUNTIME\")\nif !ok {\n- return \"runsc-test\"\n+ r = \"runsc-test\"\n+ }\n+ if *runtimeType != \"\" {\n+ r += \"-\" + *runtimeType\n}\nreturn r\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/run_tests.sh",
"new_path": "tools/run_tests.sh",
"diff": "@@ -181,20 +181,24 @@ run_docker_tests() {\n# These names are used to exclude tests not supported in certain\n# configuration, e.g. save/restore not supported with hostnet.\n- declare -a variations=(\"\" \"-kvm\" \"-hostnet\" \"-overlay\")\n- for v in \"${variations[@]}\"; do\n- # Change test names otherwise each run of tests will overwrite logs and\n- # results of the previous run.\n- sed -i \"s/name = \\\"integration_test.*\\\"/name = \\\"integration_test${v}\\\"/\" runsc/test/integration/BUILD\n- sed -i \"s/name = \\\"image_test.*\\\"/name = \\\"image_test${v}\\\"/\" runsc/test/image/BUILD\n# Run runsc tests with docker that are tagged manual.\n+ #\n+ # The --nocache_test_results option is used here to eliminate cached results\n+ # from the previous run for the runc runtime.\nbazel test \\\n\"${BAZEL_BUILD_FLAGS[@]}\" \\\n- --test_env=RUNSC_RUNTIME=\"${RUNTIME}${v}\" \\\n+ --test_env=RUNSC_RUNTIME=\"${RUNTIME}\" \\\n--test_output=all \\\n- //runsc/test/image:image_test${v} \\\n- //runsc/test/integration:integration_test${v}\n- done\n+ --nocache_test_results \\\n+ --test_output=streamed \\\n+ //runsc/test/integration:integration_test \\\n+ //runsc/test/integration:integration_test_hostnet \\\n+ //runsc/test/integration:integration_test_overlay \\\n+ //runsc/test/integration:integration_test_kvm \\\n+ //runsc/test/image:image_test \\\n+ //runsc/test/image:image_test_overlay \\\n+ //runsc/test/image:image_test_hostnet \\\n+ //runsc/test/image:image_test_kvm\n}\n# Run the tests that require root.\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor/kokoro: don't modify tests names in the BUILD file
PiperOrigin-RevId: 253746380 |
259,992 | 18.06.2019 11:02:29 | 25,200 | ec15fb116248b549a1d6c5ca835aaa52681a9bd7 | Fix PipeTest_Streaming timeout
Test was calling Size() inside read and write loops. Size()
makes 2 syscalls to return the pipe size, making the test
do a lot more work than it should. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/pipe.cc",
"new_path": "test/syscalls/linux/pipe.cc",
"diff": "@@ -50,32 +50,28 @@ struct PipeCreator {\n};\nclass PipeTest : public ::testing::TestWithParam<PipeCreator> {\n- protected:\n- FileDescriptor rfd;\n- FileDescriptor wfd;\n-\npublic:\nstatic void SetUpTestSuite() {\n// Tests intentionally generate SIGPIPE.\nTEST_PCHECK(signal(SIGPIPE, SIG_IGN) != SIG_ERR);\n}\n- // Initializes rfd and wfd as a blocking pipe.\n+ // Initializes rfd_ and wfd_ as a blocking pipe.\n//\n// The return value indicates success: the test should be skipped otherwise.\nbool CreateBlocking() { return create(true); }\n- // Initializes rfd and wfd as a non-blocking pipe.\n+ // Initializes rfd_ and wfd_ as a non-blocking pipe.\n//\n// The return value is per CreateBlocking.\nbool CreateNonBlocking() { return create(false); }\n// Returns true iff the pipe represents a named pipe.\n- bool IsNamedPipe() { return namedpipe_; }\n+ bool IsNamedPipe() const { return named_pipe_; }\n- int Size() {\n- int s1 = fcntl(rfd.get(), F_GETPIPE_SZ);\n- int s2 = fcntl(wfd.get(), F_GETPIPE_SZ);\n+ int Size() const {\n+ int s1 = fcntl(rfd_.get(), F_GETPIPE_SZ);\n+ int s2 = fcntl(wfd_.get(), F_GETPIPE_SZ);\nEXPECT_GT(s1, 0);\nEXPECT_GT(s2, 0);\nEXPECT_EQ(s1, s2);\n@@ -87,20 +83,18 @@ class PipeTest : public ::testing::TestWithParam<PipeCreator> {\n}\nprivate:\n- bool namedpipe_ = false;\n-\nbool create(bool wants_blocking) {\n// Generate the pipe.\nint fds[2] = {-1, -1};\nbool is_blocking = false;\n- GetParam().create_(fds, &is_blocking, &namedpipe_);\n+ GetParam().create_(fds, &is_blocking, &named_pipe_);\nif (fds[0] < 0 || fds[1] < 0) {\nreturn false;\n}\n// Save descriptors.\n- rfd.reset(fds[0]);\n- wfd.reset(fds[1]);\n+ rfd_.reset(fds[0]);\n+ wfd_.reset(fds[1]);\n// Adjust blocking, if needed.\nif (!is_blocking && wants_blocking) {\n@@ -115,6 +109,13 @@ class PipeTest : public ::testing::TestWithParam<PipeCreator> {\nreturn true;\n}\n+\n+ protected:\n+ FileDescriptor rfd_;\n+ FileDescriptor wfd_;\n+\n+ private:\n+ bool named_pipe_ = false;\n};\nTEST_P(PipeTest, Inode) {\n@@ -122,9 +123,9 @@ TEST_P(PipeTest, Inode) {\n// Ensure that the inode number is the same for each end.\nstruct stat rst;\n- ASSERT_THAT(fstat(rfd.get(), &rst), SyscallSucceeds());\n+ ASSERT_THAT(fstat(rfd_.get(), &rst), SyscallSucceeds());\nstruct stat wst;\n- ASSERT_THAT(fstat(wfd.get(), &wst), SyscallSucceeds());\n+ ASSERT_THAT(fstat(wfd_.get(), &wst), SyscallSucceeds());\nEXPECT_EQ(rst.st_ino, wst.st_ino);\n}\n@@ -133,9 +134,10 @@ TEST_P(PipeTest, Permissions) {\n// Attempt bad operations.\nint buf = kTestValue;\n- ASSERT_THAT(write(rfd.get(), &buf, sizeof(buf)),\n+ ASSERT_THAT(write(rfd_.get(), &buf, sizeof(buf)),\n+ SyscallFailsWithErrno(EBADF));\n+ EXPECT_THAT(read(wfd_.get(), &buf, sizeof(buf)),\nSyscallFailsWithErrno(EBADF));\n- EXPECT_THAT(read(wfd.get(), &buf, sizeof(buf)), SyscallFailsWithErrno(EBADF));\n}\nTEST_P(PipeTest, Flags) {\n@@ -144,13 +146,13 @@ TEST_P(PipeTest, Flags) {\nif (IsNamedPipe()) {\n// May be stubbed to zero; define locally.\nconstexpr int kLargefile = 0100000;\n- EXPECT_THAT(fcntl(rfd.get(), F_GETFL),\n+ EXPECT_THAT(fcntl(rfd_.get(), F_GETFL),\nSyscallSucceedsWithValue(kLargefile | O_RDONLY));\n- EXPECT_THAT(fcntl(wfd.get(), F_GETFL),\n+ EXPECT_THAT(fcntl(wfd_.get(), F_GETFL),\nSyscallSucceedsWithValue(kLargefile | O_WRONLY));\n} else {\n- EXPECT_THAT(fcntl(rfd.get(), F_GETFL), SyscallSucceedsWithValue(O_RDONLY));\n- EXPECT_THAT(fcntl(wfd.get(), F_GETFL), SyscallSucceedsWithValue(O_WRONLY));\n+ EXPECT_THAT(fcntl(rfd_.get(), F_GETFL), SyscallSucceedsWithValue(O_RDONLY));\n+ EXPECT_THAT(fcntl(wfd_.get(), F_GETFL), SyscallSucceedsWithValue(O_WRONLY));\n}\n}\n@@ -159,9 +161,9 @@ TEST_P(PipeTest, Write) {\nint wbuf = kTestValue;\nint rbuf = ~kTestValue;\n- ASSERT_THAT(write(wfd.get(), &wbuf, sizeof(wbuf)),\n+ ASSERT_THAT(write(wfd_.get(), &wbuf, sizeof(wbuf)),\nSyscallSucceedsWithValue(sizeof(wbuf)));\n- ASSERT_THAT(read(rfd.get(), &rbuf, sizeof(rbuf)),\n+ ASSERT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),\nSyscallSucceedsWithValue(sizeof(rbuf)));\nEXPECT_EQ(wbuf, rbuf);\n}\n@@ -171,15 +173,15 @@ TEST_P(PipeTest, NonBlocking) {\nint wbuf = kTestValue;\nint rbuf = ~kTestValue;\n- EXPECT_THAT(read(rfd.get(), &rbuf, sizeof(rbuf)),\n+ EXPECT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),\nSyscallFailsWithErrno(EWOULDBLOCK));\n- ASSERT_THAT(write(wfd.get(), &wbuf, sizeof(wbuf)),\n+ ASSERT_THAT(write(wfd_.get(), &wbuf, sizeof(wbuf)),\nSyscallSucceedsWithValue(sizeof(wbuf)));\n- ASSERT_THAT(read(rfd.get(), &rbuf, sizeof(rbuf)),\n+ ASSERT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),\nSyscallSucceedsWithValue(sizeof(rbuf)));\nEXPECT_EQ(wbuf, rbuf);\n- EXPECT_THAT(read(rfd.get(), &rbuf, sizeof(rbuf)),\n+ EXPECT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),\nSyscallFailsWithErrno(EWOULDBLOCK));\n}\n@@ -202,26 +204,26 @@ TEST_P(PipeTest, Seek) {\nfor (int i = 0; i < 4; i++) {\n// Attempt absolute seeks.\n- EXPECT_THAT(lseek(rfd.get(), 0, SEEK_SET), SyscallFailsWithErrno(ESPIPE));\n- EXPECT_THAT(lseek(rfd.get(), 4, SEEK_SET), SyscallFailsWithErrno(ESPIPE));\n- EXPECT_THAT(lseek(wfd.get(), 0, SEEK_SET), SyscallFailsWithErrno(ESPIPE));\n- EXPECT_THAT(lseek(wfd.get(), 4, SEEK_SET), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(lseek(rfd_.get(), 0, SEEK_SET), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(lseek(rfd_.get(), 4, SEEK_SET), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(lseek(wfd_.get(), 0, SEEK_SET), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(lseek(wfd_.get(), 4, SEEK_SET), SyscallFailsWithErrno(ESPIPE));\n// Attempt relative seeks.\n- EXPECT_THAT(lseek(rfd.get(), 0, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));\n- EXPECT_THAT(lseek(rfd.get(), 4, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));\n- EXPECT_THAT(lseek(wfd.get(), 0, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));\n- EXPECT_THAT(lseek(wfd.get(), 4, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(lseek(rfd_.get(), 0, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(lseek(rfd_.get(), 4, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(lseek(wfd_.get(), 0, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(lseek(wfd_.get(), 4, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));\n// Attempt end-of-file seeks.\n- EXPECT_THAT(lseek(rfd.get(), 0, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));\n- EXPECT_THAT(lseek(rfd.get(), -4, SEEK_END), SyscallFailsWithErrno(ESPIPE));\n- EXPECT_THAT(lseek(wfd.get(), 0, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));\n- EXPECT_THAT(lseek(wfd.get(), -4, SEEK_END), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(lseek(rfd_.get(), 0, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(lseek(rfd_.get(), -4, SEEK_END), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(lseek(wfd_.get(), 0, SEEK_CUR), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(lseek(wfd_.get(), -4, SEEK_END), SyscallFailsWithErrno(ESPIPE));\n// Add some more data to the pipe.\nint buf = kTestValue;\n- ASSERT_THAT(write(wfd.get(), &buf, sizeof(buf)),\n+ ASSERT_THAT(write(wfd_.get(), &buf, sizeof(buf)),\nSyscallSucceedsWithValue(sizeof(buf)));\n}\n}\n@@ -230,14 +232,14 @@ TEST_P(PipeTest, OffsetCalls) {\nSKIP_IF(!CreateBlocking());\nint buf;\n- EXPECT_THAT(pread(wfd.get(), &buf, sizeof(buf), 0),\n+ EXPECT_THAT(pread(wfd_.get(), &buf, sizeof(buf), 0),\nSyscallFailsWithErrno(ESPIPE));\n- EXPECT_THAT(pwrite(rfd.get(), &buf, sizeof(buf), 0),\n+ EXPECT_THAT(pwrite(rfd_.get(), &buf, sizeof(buf), 0),\nSyscallFailsWithErrno(ESPIPE));\nstruct iovec iov;\n- EXPECT_THAT(preadv(wfd.get(), &iov, 1, 0), SyscallFailsWithErrno(ESPIPE));\n- EXPECT_THAT(pwritev(rfd.get(), &iov, 1, 0), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(preadv(wfd_.get(), &iov, 1, 0), SyscallFailsWithErrno(ESPIPE));\n+ EXPECT_THAT(pwritev(rfd_.get(), &iov, 1, 0), SyscallFailsWithErrno(ESPIPE));\n}\nTEST_P(PipeTest, WriterSideCloses) {\n@@ -245,13 +247,13 @@ TEST_P(PipeTest, WriterSideCloses) {\nScopedThread t([this]() {\nint buf = ~kTestValue;\n- ASSERT_THAT(read(rfd.get(), &buf, sizeof(buf)),\n+ ASSERT_THAT(read(rfd_.get(), &buf, sizeof(buf)),\nSyscallSucceedsWithValue(sizeof(buf)));\nEXPECT_EQ(buf, kTestValue);\n// This will return when the close() completes.\n- ASSERT_THAT(read(rfd.get(), &buf, sizeof(buf)), SyscallSucceeds());\n+ ASSERT_THAT(read(rfd_.get(), &buf, sizeof(buf)), SyscallSucceeds());\n// This will return straight away.\n- ASSERT_THAT(read(rfd.get(), &buf, sizeof(buf)),\n+ ASSERT_THAT(read(rfd_.get(), &buf, sizeof(buf)),\nSyscallSucceedsWithValue(0));\n});\n@@ -260,14 +262,14 @@ TEST_P(PipeTest, WriterSideCloses) {\n// Write to unblock.\nint buf = kTestValue;\n- ASSERT_THAT(write(wfd.get(), &buf, sizeof(buf)),\n+ ASSERT_THAT(write(wfd_.get(), &buf, sizeof(buf)),\nSyscallSucceedsWithValue(sizeof(buf)));\n// Sleep a bit so the thread can block again.\nabsl::SleepFor(syncDelay);\n// Allow the thread to complete.\n- ASSERT_THAT(close(wfd.release()), SyscallSucceeds());\n+ ASSERT_THAT(close(wfd_.release()), SyscallSucceeds());\nt.Join();\n}\n@@ -275,36 +277,36 @@ TEST_P(PipeTest, WriterSideClosesReadDataFirst) {\nSKIP_IF(!CreateBlocking());\nint wbuf = kTestValue;\n- ASSERT_THAT(write(wfd.get(), &wbuf, sizeof(wbuf)),\n+ ASSERT_THAT(write(wfd_.get(), &wbuf, sizeof(wbuf)),\nSyscallSucceedsWithValue(sizeof(wbuf)));\n- ASSERT_THAT(close(wfd.release()), SyscallSucceeds());\n+ ASSERT_THAT(close(wfd_.release()), SyscallSucceeds());\nint rbuf;\n- ASSERT_THAT(read(rfd.get(), &rbuf, sizeof(rbuf)),\n+ ASSERT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),\nSyscallSucceedsWithValue(sizeof(rbuf)));\nEXPECT_EQ(wbuf, rbuf);\n- EXPECT_THAT(read(rfd.get(), &rbuf, sizeof(rbuf)),\n+ EXPECT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),\nSyscallSucceedsWithValue(0));\n}\nTEST_P(PipeTest, ReaderSideCloses) {\nSKIP_IF(!CreateBlocking());\n- ASSERT_THAT(close(rfd.release()), SyscallSucceeds());\n+ ASSERT_THAT(close(rfd_.release()), SyscallSucceeds());\nint buf = kTestValue;\n- EXPECT_THAT(write(wfd.get(), &buf, sizeof(buf)),\n+ EXPECT_THAT(write(wfd_.get(), &buf, sizeof(buf)),\nSyscallFailsWithErrno(EPIPE));\n}\nTEST_P(PipeTest, CloseTwice) {\nSKIP_IF(!CreateBlocking());\n- int _rfd = rfd.release();\n- int _wfd = wfd.release();\n- ASSERT_THAT(close(_rfd), SyscallSucceeds());\n- ASSERT_THAT(close(_wfd), SyscallSucceeds());\n- EXPECT_THAT(close(_rfd), SyscallFailsWithErrno(EBADF));\n- EXPECT_THAT(close(_wfd), SyscallFailsWithErrno(EBADF));\n+ int reader = rfd_.release();\n+ int writer = wfd_.release();\n+ ASSERT_THAT(close(reader), SyscallSucceeds());\n+ ASSERT_THAT(close(writer), SyscallSucceeds());\n+ EXPECT_THAT(close(reader), SyscallFailsWithErrno(EBADF));\n+ EXPECT_THAT(close(writer), SyscallFailsWithErrno(EBADF));\n}\n// Blocking write returns EPIPE when read end is closed if nothing has been\n@@ -316,18 +318,18 @@ TEST_P(PipeTest, BlockWriteClosed) {\nScopedThread t([this, ¬ify]() {\nstd::vector<char> buf(Size());\n// Exactly fill the pipe buffer.\n- ASSERT_THAT(WriteFd(wfd.get(), buf.data(), buf.size()),\n+ ASSERT_THAT(WriteFd(wfd_.get(), buf.data(), buf.size()),\nSyscallSucceedsWithValue(buf.size()));\nnotify.Notify();\n// Attempt to write one more byte. Blocks.\n// N.B. Don't use WriteFd, we don't want a retry.\n- EXPECT_THAT(write(wfd.get(), buf.data(), 1), SyscallFailsWithErrno(EPIPE));\n+ EXPECT_THAT(write(wfd_.get(), buf.data(), 1), SyscallFailsWithErrno(EPIPE));\n});\nnotify.WaitForNotification();\n- ASSERT_THAT(close(rfd.release()), SyscallSucceeds());\n+ ASSERT_THAT(close(rfd_.release()), SyscallSucceeds());\nt.Join();\n}\n@@ -340,9 +342,9 @@ TEST_P(PipeTest, BlockPartialWriteClosed) {\nstd::vector<char> buf(2 * Size());\n// Write more than fits in the buffer. Blocks then returns partial write\n// when the other end is closed. The next call returns EPIPE.\n- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),\n+ ASSERT_THAT(write(wfd_.get(), buf.data(), buf.size()),\nSyscallSucceedsWithValue(Size()));\n- EXPECT_THAT(write(wfd.get(), buf.data(), buf.size()),\n+ EXPECT_THAT(write(wfd_.get(), buf.data(), buf.size()),\nSyscallFailsWithErrno(EPIPE));\n});\n@@ -350,7 +352,7 @@ TEST_P(PipeTest, BlockPartialWriteClosed) {\nabsl::SleepFor(syncDelay);\n// Unblock the above.\n- ASSERT_THAT(close(rfd.release()), SyscallSucceeds());\n+ ASSERT_THAT(close(rfd_.release()), SyscallSucceeds());\nt.Join();\n}\n@@ -361,7 +363,7 @@ TEST_P(PipeTest, ReadFromClosedFd_NoRandomSave) {\nScopedThread t([this, ¬ify]() {\nnotify.Notify();\nint buf;\n- ASSERT_THAT(read(rfd.get(), &buf, sizeof(buf)),\n+ ASSERT_THAT(read(rfd_.get(), &buf, sizeof(buf)),\nSyscallSucceedsWithValue(sizeof(buf)));\nASSERT_EQ(kTestValue, buf);\n});\n@@ -375,9 +377,9 @@ TEST_P(PipeTest, ReadFromClosedFd_NoRandomSave) {\n// is ongoing read() above. We will not be able to restart the read()\n// successfully in restore run since the read fd is closed.\nconst DisableSave ds;\n- ASSERT_THAT(close(rfd.release()), SyscallSucceeds());\n+ ASSERT_THAT(close(rfd_.release()), SyscallSucceeds());\nint buf = kTestValue;\n- ASSERT_THAT(write(wfd.get(), &buf, sizeof(buf)),\n+ ASSERT_THAT(write(wfd_.get(), &buf, sizeof(buf)),\nSyscallSucceedsWithValue(sizeof(buf)));\nt.Join();\n}\n@@ -387,18 +389,18 @@ TEST_P(PipeTest, FionRead) {\nSKIP_IF(!CreateBlocking());\nint n;\n- ASSERT_THAT(ioctl(rfd.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));\n+ ASSERT_THAT(ioctl(rfd_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));\nEXPECT_EQ(n, 0);\n- ASSERT_THAT(ioctl(wfd.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));\n+ ASSERT_THAT(ioctl(wfd_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));\nEXPECT_EQ(n, 0);\nstd::vector<char> buf(Size());\n- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),\n+ ASSERT_THAT(write(wfd_.get(), buf.data(), buf.size()),\nSyscallSucceedsWithValue(buf.size()));\n- EXPECT_THAT(ioctl(rfd.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));\n+ EXPECT_THAT(ioctl(rfd_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));\nEXPECT_EQ(n, buf.size());\n- EXPECT_THAT(ioctl(wfd.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));\n+ EXPECT_THAT(ioctl(wfd_.get(), FIONREAD, &n), SyscallSucceedsWithValue(0));\nEXPECT_EQ(n, buf.size());\n}\n@@ -409,11 +411,11 @@ TEST_P(PipeTest, OpenViaProcSelfFD) {\nSKIP_IF(IsNamedPipe());\n// Close the write end of the pipe.\n- ASSERT_THAT(close(wfd.release()), SyscallSucceeds());\n+ ASSERT_THAT(close(wfd_.release()), SyscallSucceeds());\n// Open other side via /proc/self/fd. It should not block.\nFileDescriptor proc_self_fd = ASSERT_NO_ERRNO_AND_VALUE(\n- Open(absl::StrCat(\"/proc/self/fd/\", rfd.get()), O_RDONLY));\n+ Open(absl::StrCat(\"/proc/self/fd/\", rfd_.get()), O_RDONLY));\n}\n// Test that opening and reading from an anonymous pipe (with existing writes)\n@@ -424,13 +426,13 @@ TEST_P(PipeTest, OpenViaProcSelfFDWithWrites) {\n// Write to the pipe and then close the write fd.\nint wbuf = kTestValue;\n- ASSERT_THAT(write(wfd.get(), &wbuf, sizeof(wbuf)),\n+ ASSERT_THAT(write(wfd_.get(), &wbuf, sizeof(wbuf)),\nSyscallSucceedsWithValue(sizeof(wbuf)));\n- ASSERT_THAT(close(wfd.release()), SyscallSucceeds());\n+ ASSERT_THAT(close(wfd_.release()), SyscallSucceeds());\n// Open read side via /proc/self/fd, and read from it.\nFileDescriptor proc_self_fd = ASSERT_NO_ERRNO_AND_VALUE(\n- Open(absl::StrCat(\"/proc/self/fd/\", rfd.get()), O_RDONLY));\n+ Open(absl::StrCat(\"/proc/self/fd/\", rfd_.get()), O_RDONLY));\nint rbuf;\nASSERT_THAT(read(proc_self_fd.get(), &rbuf, sizeof(rbuf)),\nSyscallSucceedsWithValue(sizeof(rbuf)));\n@@ -443,13 +445,13 @@ TEST_P(PipeTest, ProcFDReleasesFile) {\n// Stat the pipe FD, which shouldn't alter the refcount.\nstruct stat wst;\n- ASSERT_THAT(lstat(absl::StrCat(\"/proc/self/fd/\", wfd.get()).c_str(), &wst),\n+ ASSERT_THAT(lstat(absl::StrCat(\"/proc/self/fd/\", wfd_.get()).c_str(), &wst),\nSyscallSucceeds());\n// Close the write end and ensure that read indicates EOF.\n- wfd.reset();\n+ wfd_.reset();\nchar buf;\n- ASSERT_THAT(read(rfd.get(), &buf, 1), SyscallSucceedsWithValue(0));\n+ ASSERT_THAT(read(rfd_.get(), &buf, 1), SyscallSucceedsWithValue(0));\n}\n// Same for /proc/<PID>/fdinfo.\n@@ -459,30 +461,30 @@ TEST_P(PipeTest, ProcFDInfoReleasesFile) {\n// Stat the pipe FD, which shouldn't alter the refcount.\nstruct stat wst;\nASSERT_THAT(\n- lstat(absl::StrCat(\"/proc/self/fdinfo/\", wfd.get()).c_str(), &wst),\n+ lstat(absl::StrCat(\"/proc/self/fdinfo/\", wfd_.get()).c_str(), &wst),\nSyscallSucceeds());\n// Close the write end and ensure that read indicates EOF.\n- wfd.reset();\n+ wfd_.reset();\nchar buf;\n- ASSERT_THAT(read(rfd.get(), &buf, 1), SyscallSucceedsWithValue(0));\n+ ASSERT_THAT(read(rfd_.get(), &buf, 1), SyscallSucceedsWithValue(0));\n}\nTEST_P(PipeTest, SizeChange) {\nSKIP_IF(!CreateBlocking());\n// Set the minimum possible size.\n- ASSERT_THAT(fcntl(rfd.get(), F_SETPIPE_SZ, 0), SyscallSucceeds());\n+ ASSERT_THAT(fcntl(rfd_.get(), F_SETPIPE_SZ, 0), SyscallSucceeds());\nint min = Size();\nEXPECT_GT(min, 0); // Should be rounded up.\n// Set from the read end.\n- ASSERT_THAT(fcntl(rfd.get(), F_SETPIPE_SZ, min + 1), SyscallSucceeds());\n+ ASSERT_THAT(fcntl(rfd_.get(), F_SETPIPE_SZ, min + 1), SyscallSucceeds());\nint med = Size();\nEXPECT_GT(med, min); // Should have grown, may be rounded.\n// Set from the write end.\n- ASSERT_THAT(fcntl(wfd.get(), F_SETPIPE_SZ, med + 1), SyscallSucceeds());\n+ ASSERT_THAT(fcntl(wfd_.get(), F_SETPIPE_SZ, med + 1), SyscallSucceeds());\nint max = Size();\nEXPECT_GT(max, med); // Ditto.\n}\n@@ -491,9 +493,9 @@ TEST_P(PipeTest, SizeChangeMax) {\nSKIP_IF(!CreateBlocking());\n// Assert there's some maximum.\n- EXPECT_THAT(fcntl(rfd.get(), F_SETPIPE_SZ, 0x7fffffffffffffff),\n+ EXPECT_THAT(fcntl(rfd_.get(), F_SETPIPE_SZ, 0x7fffffffffffffff),\nSyscallFailsWithErrno(EINVAL));\n- EXPECT_THAT(fcntl(wfd.get(), F_SETPIPE_SZ, 0x7fffffffffffffff),\n+ EXPECT_THAT(fcntl(wfd_.get(), F_SETPIPE_SZ, 0x7fffffffffffffff),\nSyscallFailsWithErrno(EINVAL));\n}\n@@ -505,14 +507,14 @@ TEST_P(PipeTest, SizeChangeFull) {\n// adjust the size and the call below will return success. It was found via\n// experimentation that this granularity avoids the rounding for Linux.\nconstexpr int kDelta = 64 * 1024;\n- ASSERT_THAT(fcntl(wfd.get(), F_SETPIPE_SZ, Size() + kDelta),\n+ ASSERT_THAT(fcntl(wfd_.get(), F_SETPIPE_SZ, Size() + kDelta),\nSyscallSucceeds());\n// Fill the buffer and try to change down.\nstd::vector<char> buf(Size());\n- ASSERT_THAT(write(wfd.get(), buf.data(), buf.size()),\n+ ASSERT_THAT(write(wfd_.get(), buf.data(), buf.size()),\nSyscallSucceedsWithValue(buf.size()));\n- EXPECT_THAT(fcntl(wfd.get(), F_SETPIPE_SZ, Size() - kDelta),\n+ EXPECT_THAT(fcntl(wfd_.get(), F_SETPIPE_SZ, Size() - kDelta),\nSyscallFailsWithErrno(EBUSY));\n}\n@@ -522,23 +524,32 @@ TEST_P(PipeTest, Streaming) {\n// We make too many calls to go through full save cycles.\nDisableSave ds;\n+ // Size() requires 2 syscalls, call it once and remember the value.\n+ const int pipe_size = Size();\n+\nabsl::Notification notify;\n- ScopedThread t([this, ¬ify]() {\n+ ScopedThread t([this, ¬ify, pipe_size]() {\n// Don't start until it's full.\nnotify.WaitForNotification();\n- for (int i = 0; i < 2 * Size(); i++) {\n+ for (int i = 0; i < pipe_size; i++) {\nint rbuf;\n- ASSERT_THAT(read(rfd.get(), &rbuf, sizeof(rbuf)),\n+ ASSERT_THAT(read(rfd_.get(), &rbuf, sizeof(rbuf)),\nSyscallSucceedsWithValue(sizeof(rbuf)));\nEXPECT_EQ(rbuf, i);\n}\n});\n- for (int i = 0; i < 2 * Size(); i++) {\n- int wbuf = i;\n- ASSERT_THAT(write(wfd.get(), &wbuf, sizeof(wbuf)),\n- SyscallSucceedsWithValue(sizeof(wbuf)));\n- // Did that write just fill up the buffer? Wake up the reader. Once only.\n- if ((i * sizeof(wbuf)) < Size() && ((i + 1) * sizeof(wbuf)) >= Size()) {\n+\n+ // Write 4 bytes * pipe_size. It will fill up the pipe once, notify the reader\n+ // to start. Then we write pipe size worth 3 more times to ensure the reader\n+ // can follow along.\n+ ssize_t total = 0;\n+ for (int i = 0; i < pipe_size; i++) {\n+ ssize_t written = write(wfd_.get(), &i, sizeof(i));\n+ ASSERT_THAT(written, SyscallSucceedsWithValue(sizeof(i)));\n+ total += written;\n+\n+ // Is the next write about to fill up the buffer? Wake up the reader once.\n+ if (total < pipe_size && (total + written) >= pipe_size) {\nnotify.Notify();\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix PipeTest_Streaming timeout
Test was calling Size() inside read and write loops. Size()
makes 2 syscalls to return the pipe size, making the test
do a lot more work than it should.
PiperOrigin-RevId: 253824690 |
259,847 | 18.06.2019 14:17:31 | 25,200 | 2e1379867a77bfa94cc740b6d1407d3702810c73 | Replace usage of deprecated strtoul/strtoull | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -42,6 +42,7 @@ cc_binary(\nname = \"exec_state_workload\",\ntestonly = 1,\nsrcs = [\"exec_state_workload.cc\"],\n+ deps = [\"@com_google_absl//absl/strings\"],\n)\nsh_binary(\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/exec_state_workload.cc",
"new_path": "test/syscalls/linux/exec_state_workload.cc",
"diff": "#include <sys/auxv.h>\n#include <sys/prctl.h>\n#include <sys/time.h>\n+\n#include <iostream>\n#include <ostream>\n#include <string>\n+#include \"absl/strings/numbers.h\"\n+\n// Pretty-print a sigset_t.\nstd::ostream& operator<<(std::ostream& out, const sigset_t& s) {\nout << \"{ \";\n@@ -138,15 +141,14 @@ int main(int argc, char** argv) {\nreturn 1;\n}\n- char* end;\n- uint32_t signo = strtoul(argv[2], &end, 10);\n- if (end == argv[2]) {\n+ uint32_t signo;\n+ if (!absl::SimpleAtoi(argv[2], &signo)) {\nstd::cerr << \"invalid signo: \" << argv[2] << std::endl;\nreturn 1;\n}\n- uintptr_t handler = strtoull(argv[3], &end, 16);\n- if (end == argv[3]) {\n+ uintptr_t handler;\n+ if (!absl::numbers_internal::safe_strtoi_base(argv[3], &handler, 16)) {\nstd::cerr << \"invalid handler: \" << std::hex << argv[3] << std::endl;\nreturn 1;\n}\n@@ -160,9 +162,8 @@ int main(int argc, char** argv) {\nreturn 1;\n}\n- char* end;\n- uint32_t signo = strtoul(argv[2], &end, 10);\n- if (end == argv[2]) {\n+ uint32_t signo;\n+ if (!absl::SimpleAtoi(argv[2], &signo)) {\nstd::cerr << \"invalid signo: \" << argv[2] << std::endl;\nreturn 1;\n}\n@@ -176,9 +177,8 @@ int main(int argc, char** argv) {\nreturn 1;\n}\n- char* end;\n- uint32_t timer = strtoul(argv[2], &end, 10);\n- if (end == argv[2]) {\n+ uint32_t timer;\n+ if (!absl::SimpleAtoi(argv[2], &timer)) {\nstd::cerr << \"invalid signo: \" << argv[2] << std::endl;\nreturn 1;\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Replace usage of deprecated strtoul/strtoull
PiperOrigin-RevId: 253864770 |
259,992 | 18.06.2019 15:34:58 | 25,200 | 0e07c94d545aa971bb2a05b738f856181a3ff463 | Kill sandbox process when 'runsc do' exits | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/do.go",
"new_path": "runsc/cmd/do.go",
"diff": "@@ -42,6 +42,7 @@ type Do struct {\nroot string\ncwd string\nip string\n+ quiet bool\n}\n// Name implements subcommands.Command.Name.\n@@ -71,6 +72,7 @@ func (c *Do) SetFlags(f *flag.FlagSet) {\nf.StringVar(&c.root, \"root\", \"/\", `path to the root directory, defaults to \"/\"`)\nf.StringVar(&c.cwd, \"cwd\", \".\", \"path to the current directory, defaults to the current directory\")\nf.StringVar(&c.ip, \"ip\", \"192.168.10.2\", \"IPv4 address for the sandbox\")\n+ f.BoolVar(&c.quiet, \"quiet\", false, \"suppress runsc messages to stdout. Application output is still sent to stdout and stderr\")\n}\n// Execute implements subcommands.Command.Execute.\n@@ -134,7 +136,7 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\n} else if conf.Rootless {\nif conf.Network == boot.NetworkSandbox {\n- fmt.Println(\"*** Rootless requires changing network type to host ***\")\n+ c.notifyUser(\"*** Warning: using host network due to --rootless ***\")\nconf.Network = boot.NetworkHost\n}\n@@ -168,8 +170,9 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\nID: cid,\nSpec: spec,\nBundleDir: tmpDir,\n+ Attached: true,\n}\n- ws, err := container.Run(conf, runArgs, false)\n+ ws, err := container.Run(conf, runArgs)\nif err != nil {\nreturn Errorf(\"running container: %v\", err)\n}\n@@ -178,6 +181,13 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\nreturn subcommands.ExitSuccess\n}\n+func (c *Do) notifyUser(format string, v ...interface{}) {\n+ if !c.quiet {\n+ fmt.Printf(format+\"\\n\", v...)\n+ }\n+ log.Warningf(format, v...)\n+}\n+\nfunc resolvePath(path string) (string, error) {\nvar err error\npath, err = filepath.Abs(path)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/restore.go",
"new_path": "runsc/cmd/restore.go",
"diff": "@@ -107,8 +107,9 @@ func (r *Restore) Execute(_ context.Context, f *flag.FlagSet, args ...interface{\nConsoleSocket: r.consoleSocket,\nPIDFile: r.pidFile,\nUserLog: r.userLog,\n+ Attached: !r.detach,\n}\n- ws, err := container.Run(conf, runArgs, r.detach)\n+ ws, err := container.Run(conf, runArgs)\nif err != nil {\nreturn Errorf(\"running container: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/run.go",
"new_path": "runsc/cmd/run.go",
"diff": "@@ -88,8 +88,9 @@ func (r *Run) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s\nConsoleSocket: r.consoleSocket,\nPIDFile: r.pidFile,\nUserLog: r.userLog,\n+ Attached: !r.detach,\n}\n- ws, err := container.Run(conf, runArgs, r.detach)\n+ ws, err := container.Run(conf, runArgs)\nif err != nil {\nreturn Errorf(\"running container: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -262,7 +262,15 @@ type Args struct {\nPIDFile string\n// UserLog is the filename to send user-visible logs to. It may be empty.\n+ //\n+ // It only applies for the init container.\nUserLog string\n+\n+ // Attached indicates that the sandbox lifecycle is attached with the caller.\n+ // If the caller exits, the sandbox should exit too.\n+ //\n+ // It only applies for the init container.\n+ Attached bool\n}\n// Create creates the container in a new Sandbox process, unless the metadata\n@@ -349,6 +357,7 @@ func New(conf *boot.Config, args Args) (*Container, error) {\nIOFiles: ioFiles,\nMountsFile: specFile,\nCgroup: cg,\n+ Attached: args.Attached,\n}\nsand, err := sandbox.New(conf, sandArgs)\nif err != nil {\n@@ -499,7 +508,7 @@ func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile str\n}\n// Run is a helper that calls Create + Start + Wait.\n-func Run(conf *boot.Config, args Args, detach bool) (syscall.WaitStatus, error) {\n+func Run(conf *boot.Config, args Args) (syscall.WaitStatus, error) {\nlog.Debugf(\"Run container %q in root dir: %s\", args.ID, conf.RootDir)\nc, err := New(conf, args)\nif err != nil {\n@@ -522,12 +531,12 @@ func Run(conf *boot.Config, args Args, detach bool) (syscall.WaitStatus, error)\nreturn 0, fmt.Errorf(\"starting container: %v\", err)\n}\n}\n- if detach {\n+ if args.Attached {\n+ return c.Wait()\n+ }\ncu.Release()\nreturn 0, nil\n}\n- return c.Wait()\n-}\n// Execute runs the specified command in the container. It returns the PID of\n// the newly created process.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -215,8 +215,9 @@ func run(spec *specs.Spec, conf *boot.Config) error {\nID: testutil.UniqueContainerID(),\nSpec: spec,\nBundleDir: bundleDir,\n+ Attached: true,\n}\n- ws, err := Run(conf, args, false)\n+ ws, err := Run(conf, args)\nif err != nil {\nreturn fmt.Errorf(\"running container: %v\", err)\n}\n@@ -430,8 +431,9 @@ func TestExePath(t *testing.T) {\nID: testutil.UniqueContainerID(),\nSpec: spec,\nBundleDir: bundleDir,\n+ Attached: true,\n}\n- ws, err := Run(conf, args, false)\n+ ws, err := Run(conf, args)\nos.RemoveAll(rootDir)\nos.RemoveAll(bundleDir)\n@@ -468,8 +470,9 @@ func TestAppExitStatus(t *testing.T) {\nID: testutil.UniqueContainerID(),\nSpec: succSpec,\nBundleDir: bundleDir,\n+ Attached: true,\n}\n- ws, err := Run(conf, args, false)\n+ ws, err := Run(conf, args)\nif err != nil {\nt.Fatalf(\"error running container: %v\", err)\n}\n@@ -492,8 +495,9 @@ func TestAppExitStatus(t *testing.T) {\nID: testutil.UniqueContainerID(),\nSpec: errSpec,\nBundleDir: bundleDir2,\n+ Attached: true,\n}\n- ws, err = Run(conf, args2, false)\n+ ws, err = Run(conf, args2)\nif err != nil {\nt.Fatalf(\"error running container: %v\", err)\n}\n@@ -1624,8 +1628,9 @@ func TestUserLog(t *testing.T) {\nSpec: spec,\nBundleDir: bundleDir,\nUserLog: userLog,\n+ Attached: true,\n}\n- ws, err := Run(conf, args, false)\n+ ws, err := Run(conf, args)\nif err != nil {\nt.Fatalf(\"error running container: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -103,6 +103,10 @@ type Args struct {\n// Gcgroup is the cgroup that the sandbox is part of.\nCgroup *cgroup.Cgroup\n+\n+ // Attached indicates that the sandbox lifecycle is attached with the caller.\n+ // If the caller exits, the sandbox should exit too.\n+ Attached bool\n}\n// New creates the sandbox process. The caller must call Destroy() on the\n@@ -650,6 +654,11 @@ func (s *Sandbox) createSandboxProcess(conf *boot.Config, args *Args, startSyncF\nlog.Debugf(\"Donating FD %d: %q\", i+3, f.Name())\n}\n+ if args.Attached {\n+ // Kill sandbox if parent process exits in attached mode.\n+ cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL\n+ }\n+\nlog.Debugf(\"Starting sandbox: %s %v\", binPath, cmd.Args)\nlog.Debugf(\"SysProcAttr: %+v\", cmd.SysProcAttr)\nif err := specutils.StartInNS(cmd, nss); err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Kill sandbox process when 'runsc do' exits
PiperOrigin-RevId: 253882115 |
259,985 | 18.06.2019 16:20:42 | 25,200 | 546b2948cb4304a99b0d719d5a99dcb7edaace18 | Use return values from syscalls in eventfd tests. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/eventfd.cc",
"new_path": "test/syscalls/linux/eventfd.cc",
"diff": "@@ -53,9 +53,9 @@ TEST(EventfdTest, Nonblock) {\nvoid* read_three_times(void* arg) {\nint efd = *reinterpret_cast<int*>(arg);\nuint64_t l;\n- read(efd, &l, sizeof(l));\n- read(efd, &l, sizeof(l));\n- read(efd, &l, sizeof(l));\n+ EXPECT_THAT(read(efd, &l, sizeof(l)), SyscallSucceedsWithValue(sizeof(l)));\n+ EXPECT_THAT(read(efd, &l, sizeof(l)), SyscallSucceedsWithValue(sizeof(l)));\n+ EXPECT_THAT(read(efd, &l, sizeof(l)), SyscallSucceedsWithValue(sizeof(l)));\nreturn nullptr;\n}\n@@ -160,7 +160,8 @@ TEST(EventfdTest, NotifyNonZero_NoRandomSave) {\nScopedThread t([&efd] {\nsleep(5);\nuint64_t val = 1;\n- write(efd.get(), &val, sizeof(val));\n+ EXPECT_THAT(write(efd.get(), &val, sizeof(val)),\n+ SyscallSucceedsWithValue(sizeof(val)));\n});\n// epoll_wait should return once the thread writes.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use return values from syscalls in eventfd tests.
PiperOrigin-RevId: 253890611 |
259,992 | 18.06.2019 19:14:52 | 25,200 | ca245a428b367e9eb29ae51386beccb6a2d3022b | Attempt to fix TestPipeWritesAccumulate
Test fails because it's reading 4KB instead of the
expected 64KB. Changed the test to read pipe buffer
size instead of hardcode and added some logging in
case the reason for failure was not pipe buffer size. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/fdpipe/pipe_test.go",
"new_path": "pkg/sentry/fs/fdpipe/pipe_test.go",
"diff": "@@ -358,9 +358,9 @@ func TestPipeReadAheadBuffer(t *testing.T) {\n}\n}\n-// This is very important for pipes in general because they can return EWOULDBLOCK and for\n-// those that block they must continue until they have read all of the data (and report it\n-// as such.\n+// This is very important for pipes in general because they can return\n+// EWOULDBLOCK and for those that block they must continue until they have read\n+// all of the data (and report it as such).\nfunc TestPipeReadsAccumulate(t *testing.T) {\nfds := make([]int, 2)\nif err := syscall.Pipe(fds); err != nil {\n@@ -393,8 +393,8 @@ func TestPipeReadsAccumulate(t *testing.T) {\nt.Fatalf(\"write to pipe got (%d, %v), want (%d, nil)\", n, err, len(data))\n}\n- // Construct a segment vec that is a bit more than we have written so we trigger\n- // an EWOULDBLOCK.\n+ // Construct a segment vec that is a bit more than we have written so we\n+ // trigger an EWOULDBLOCK.\nwantBytes := len(data) + 1\nreadBuffer := make([]byte, wantBytes)\niov := usermem.BytesIOSequence(readBuffer)\n@@ -446,8 +446,8 @@ func TestPipeWritesAccumulate(t *testing.T) {\nwfile.Close()\nt.Fatalf(\"newPipeOperations got error %v, want nil\", err)\n}\n- // Don't forget to remove the fd from the fd notifier. Otherwise other tests will\n- // likely be borked, because it's global :(\n+ // Don't forget to remove the fd from the fd notifier. Otherwise other tests\n+ // will likely be borked, because it's global :(\ndefer p.Release()\ninode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{\n@@ -455,32 +455,48 @@ func TestPipeWritesAccumulate(t *testing.T) {\n})\nfile := fs.NewFile(ctx, fs.NewDirent(ctx, inode, \"pipe\"), fs.FileFlags{Read: true}, p)\n- // Construct a segment vec that is larger than the pipe size to trigger an EWOULDBLOCK.\n- wantBytes := 65536 * 2\n+ pipeSize, _, errno := syscall.Syscall(syscall.SYS_FCNTL, uintptr(wfile.FD()), syscall.F_GETPIPE_SZ, 0)\n+ if errno != 0 {\n+ t.Fatalf(\"fcntl(F_GETPIPE_SZ) failed: %v\", errno)\n+ }\n+ t.Logf(\"Pipe buffer size: %d\", pipeSize)\n+\n+ // Construct a segment vec that is larger than the pipe size to trigger an\n+ // EWOULDBLOCK.\n+ wantBytes := int(pipeSize) * 2\nwriteBuffer := make([]byte, wantBytes)\nfor i := 0; i < wantBytes; i++ {\nwriteBuffer[i] = 'a'\n}\niov := usermem.BytesIOSequence(writeBuffer)\nn, err := p.Write(ctx, file, iov, 0)\n- total := n\n- iov = iov.DropFirst64(n)\nif err != syserror.ErrWouldBlock {\nt.Fatalf(\"Writev got error %v, want %v\", err, syserror.ErrWouldBlock)\n}\n+ if n != int64(pipeSize) {\n+ t.Fatalf(\"Writev partial write, got: %v, want %v\", n, pipeSize)\n+ }\n+ total := n\n+ iov = iov.DropFirst64(n)\n// Read the entire pipe buf size to make space for the second half.\n- throwAway := make([]byte, 65536)\n- if n, err := syscall.Read(fds[0], throwAway); n != len(throwAway) || err != nil {\n- t.Fatalf(\"write to pipe got (%d, %v), want (%d, nil)\", n, err, len(throwAway))\n+ readBuffer := make([]byte, n)\n+ if n, err := syscall.Read(fds[0], readBuffer); n != len(readBuffer) || err != nil {\n+ t.Fatalf(\"write to pipe got (%d, %v), want (%d, nil)\", n, err, len(readBuffer))\n+ }\n+ if !bytes.Equal(readBuffer, writeBuffer[:len(readBuffer)]) {\n+ t.Fatalf(\"wrong data read from pipe, got: %v, want: %v\", readBuffer, writeBuffer)\n}\n// This time we should not block.\nn, err = p.Write(ctx, file, iov, 0)\n- total += n\nif err != nil {\nt.Fatalf(\"Writev got error %v, want nil\", err)\n}\n+ if n != int64(pipeSize) {\n+ t.Fatalf(\"Writev partial write, got: %v, want %v\", n, pipeSize)\n+ }\n+ total += n\n// Assert that the result we got back is cumulative.\nif total != int64(wantBytes) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Attempt to fix TestPipeWritesAccumulate
Test fails because it's reading 4KB instead of the
expected 64KB. Changed the test to read pipe buffer
size instead of hardcode and added some logging in
case the reason for failure was not pipe buffer size.
PiperOrigin-RevId: 253916040 |
260,008 | 19.06.2019 08:06:55 | 25,200 | 0d1dc50b70baf6b4a3752d5c761f608feea9f30e | Mark tcp_socket test flaky. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -2939,6 +2939,8 @@ cc_binary(\ntestonly = 1,\nsrcs = [\"tcp_socket.cc\"],\nlinkstatic = 1,\n+ # FIXME(b/135470853)\n+ tags = [\"flaky\"],\ndeps = [\n\":socket_test_util\",\n\"//test/util:file_descriptor\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Mark tcp_socket test flaky.
PiperOrigin-RevId: 253997465 |
259,881 | 19.06.2019 13:39:57 | 25,200 | 9d2efaac5af3618a637abe2dba23f63387dd086e | Add renamed children pathNodes to target parent
Otherwise future renames may miss Renamed calls. | [
{
"change_type": "MODIFY",
"old_path": "pkg/p9/p9test/client_test.go",
"new_path": "pkg/p9/p9test/client_test.go",
"diff": "@@ -269,14 +269,14 @@ type fileGenerator func(*Harness, string, p9.File) (*Mock, *Mock, p9.File)\nfunc walkHelper(h *Harness, name string, dir p9.File) (parentBackend *Mock, walkedBackend *Mock, walked p9.File) {\n_, parent, err := dir.Walk(nil)\nif err != nil {\n- h.t.Fatalf(\"got walk err %v, want nil\", err)\n+ h.t.Fatalf(\"Walk(nil) got err %v, want nil\", err)\n}\ndefer parent.Close()\nparentBackend = h.Pop(parent)\n_, walked, err = parent.Walk([]string{name})\nif err != nil {\n- h.t.Fatalf(\"got walk err %v, want nil\", err)\n+ h.t.Fatalf(\"Walk(%s) got err %v, want nil\", name, err)\n}\nwalkedBackend = h.Pop(walked)\n@@ -854,6 +854,62 @@ func TestRenameAtInvalid(t *testing.T) {\n}\n}\n+// TestRenameSecondOrder tests that indirect rename targets continue to receive\n+// Renamed calls after a rename of its renamed parent. i.e.,\n+//\n+// 1. Create /one/file\n+// 2. Create /directory\n+// 3. Rename /one -> /directory/one\n+// 4. Rename /directory -> /three/foo\n+// 5. file from (1) should still receive Renamed.\n+//\n+// This is a regression test for b/135219260.\n+func TestRenameSecondOrder(t *testing.T) {\n+ h, c := NewHarness(t)\n+ defer h.Finish()\n+\n+ rootBackend, root := newRoot(h, c)\n+ defer root.Close()\n+\n+ // Walk to /one.\n+ _, oneBackend, oneFile := walkHelper(h, \"one\", root)\n+ defer oneFile.Close()\n+\n+ // Walk to and generate /one/file.\n+ //\n+ // walkHelper re-walks to oneFile, so we need the second backend,\n+ // which will also receive Renamed calls.\n+ oneSecondBackend, fileBackend, fileFile := walkHelper(h, \"file\", oneFile)\n+ defer fileFile.Close()\n+\n+ // Walk to and generate /directory.\n+ _, directoryBackend, directoryFile := walkHelper(h, \"directory\", root)\n+ defer directoryFile.Close()\n+\n+ // Rename /one to /directory/one.\n+ rootBackend.EXPECT().RenameAt(\"one\", directoryBackend, \"one\").Return(nil)\n+ expectRenamed(oneBackend, []string{}, directoryBackend, \"one\")\n+ expectRenamed(oneSecondBackend, []string{}, directoryBackend, \"one\")\n+ expectRenamed(fileBackend, []string{}, oneBackend, \"file\")\n+ if err := renameAt(h, root, directoryFile, \"one\", \"one\", false); err != nil {\n+ h.t.Fatalf(\"got rename err %v, want nil\", err)\n+ }\n+\n+ // Walk to /three.\n+ _, threeBackend, threeFile := walkHelper(h, \"three\", root)\n+ defer threeFile.Close()\n+\n+ // Rename /directory to /three/foo.\n+ rootBackend.EXPECT().RenameAt(\"directory\", threeBackend, \"foo\").Return(nil)\n+ expectRenamed(directoryBackend, []string{}, threeBackend, \"foo\")\n+ expectRenamed(oneBackend, []string{}, directoryBackend, \"one\")\n+ expectRenamed(oneSecondBackend, []string{}, directoryBackend, \"one\")\n+ expectRenamed(fileBackend, []string{}, oneBackend, \"file\")\n+ if err := renameAt(h, root, threeFile, \"directory\", \"foo\", false); err != nil {\n+ h.t.Fatalf(\"got rename err %v, want nil\", err)\n+ }\n+}\n+\nfunc TestReadlink(t *testing.T) {\nfor name := range newTypeMap(nil) {\nt.Run(name, func(t *testing.T) {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/server.go",
"new_path": "pkg/p9/server.go",
"diff": "@@ -264,7 +264,7 @@ func (f *fidRef) renameChildTo(oldName string, target *fidRef, newName string) {\n})\n// Replace the previous (now deleted) path node.\n- f.pathNode.children.Store(newName, origPathNode)\n+ target.pathNode.children.Store(newName, origPathNode)\n// Call Renamed on everything above.\nnotifyNameChange(origPathNode)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add renamed children pathNodes to target parent
Otherwise future renames may miss Renamed calls.
PiperOrigin-RevId: 254060946 |
259,881 | 19.06.2019 14:47:01 | 25,200 | 773423a997eb19c48942ccb8a390be72bbd9d5e1 | Abort loop on failure
As-is, on failure these will infinite loop, resulting in test timeout
instead of failure. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/tcp_socket.cc",
"new_path": "test/syscalls/linux/tcp_socket.cc",
"diff": "@@ -268,7 +268,7 @@ TEST_P(TcpSocketTest, BlockingLargeWrite_NoRandomSave) {\nchar readbuf[2500] = {};\nint n = -1;\nwhile (n != 0) {\n- EXPECT_THAT(n = RetryEINTR(read)(t_, &readbuf, sizeof(readbuf)),\n+ ASSERT_THAT(n = RetryEINTR(read)(t_, &readbuf, sizeof(readbuf)),\nSyscallSucceeds());\nread_bytes += n;\n}\n@@ -345,7 +345,7 @@ TEST_P(TcpSocketTest, BlockingLargeSend_NoRandomSave) {\nchar readbuf[2500] = {};\nint n = -1;\nwhile (n != 0) {\n- EXPECT_THAT(n = RetryEINTR(read)(t_, &readbuf, sizeof(readbuf)),\n+ ASSERT_THAT(n = RetryEINTR(read)(t_, &readbuf, sizeof(readbuf)),\nSyscallSucceeds());\nread_bytes += n;\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Abort loop on failure
As-is, on failure these will infinite loop, resulting in test timeout
instead of failure.
PiperOrigin-RevId: 254074989 |
259,881 | 19.06.2019 17:17:12 | 25,200 | c2d87d5d7cd1b7c9e601bfabdcbf53aebfb89de4 | Mark tcp_socket test flaky (for real)
The tag on the binary has no effect. It must be on the test. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -609,6 +609,8 @@ syscall_test(test = \"//test/syscalls/linux:sysret_test\")\nsyscall_test(\nsize = \"medium\",\nshard_count = 10,\n+ # FIXME(b/135470853)\n+ tags = [\"flaky\"],\ntest = \"//test/syscalls/linux:tcp_socket_test\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -2939,8 +2939,6 @@ cc_binary(\ntestonly = 1,\nsrcs = [\"tcp_socket.cc\"],\nlinkstatic = 1,\n- # FIXME(b/135470853)\n- tags = [\"flaky\"],\ndeps = [\n\":socket_test_util\",\n\"//test/util:file_descriptor\",\n"
}
] | Go | Apache License 2.0 | google/gvisor | Mark tcp_socket test flaky (for real)
The tag on the binary has no effect. It must be on the test.
PiperOrigin-RevId: 254103480 |
259,854 | 19.06.2019 18:39:55 | 25,200 | 7e49515696f628a41ed63199570d25dfbe9d8848 | Deflake SendFileTest_Shutdown.
The sendfile syscall's backing doSplice contained a race with regard to
blocking. If the first attempt failed with syserror.ErrWouldBlock and then
the blocking file became ready before registering a waiter, we would just
return the ErrWouldBlock (even if we were supposed to block). | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_splice.go",
"new_path": "pkg/sentry/syscalls/linux/sys_splice.go",
"diff": "@@ -48,12 +48,12 @@ func doSplice(t *kernel.Task, outFile, inFile *fs.File, opts fs.SpliceOpts, nonB\nif ch == nil {\nch = make(chan struct{}, 1)\n}\n- if !inW && inFile.Readiness(EventMaskRead) == 0 && !inFile.Flags().NonBlocking {\n+ if !inW && !inFile.Flags().NonBlocking {\nw, _ := waiter.NewChannelEntry(ch)\ninFile.EventRegister(&w, EventMaskRead)\ndefer inFile.EventUnregister(&w)\ninW = true // Registered.\n- } else if !outW && outFile.Readiness(EventMaskWrite) == 0 && !outFile.Flags().NonBlocking {\n+ } else if !outW && !outFile.Flags().NonBlocking {\nw, _ := waiter.NewChannelEntry(ch)\noutFile.EventRegister(&w, EventMaskWrite)\ndefer outFile.EventUnregister(&w)\n@@ -65,6 +65,11 @@ func doSplice(t *kernel.Task, outFile, inFile *fs.File, opts fs.SpliceOpts, nonB\nbreak\n}\n+ if (!inW || inFile.Readiness(EventMaskRead) != 0) && (!outW || outFile.Readiness(EventMaskWrite) != 0) {\n+ // Something became ready, try again without blocking.\n+ continue\n+ }\n+\n// Block until there's data.\nif err = t.Block(ch); err != nil {\nbreak\n"
}
] | Go | Apache License 2.0 | google/gvisor | Deflake SendFileTest_Shutdown.
The sendfile syscall's backing doSplice contained a race with regard to
blocking. If the first attempt failed with syserror.ErrWouldBlock and then
the blocking file became ready before registering a waiter, we would just
return the ErrWouldBlock (even if we were supposed to block).
PiperOrigin-RevId: 254114432 |
259,881 | 20.06.2019 11:30:08 | 25,200 | b46ec3704b60bebdd63a597c62f3f471ee0d9be9 | Drop extra character | [
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -117,7 +117,7 @@ func New(conf *boot.Config, args *Args) (*Sandbox, error) {\n// occurs. Any errors occurring during cleanup itself are ignored.\nc := specutils.MakeCleanup(func() {\nerr := s.destroy()\n- log.Warningf(\"error Ndestroying sandbox: %v\", err)\n+ log.Warningf(\"error destroying sandbox: %v\", err)\n})\ndefer c.Clean()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Drop extra character
PiperOrigin-RevId: 254237530 |
260,008 | 20.06.2019 12:54:40 | 25,200 | 0b2135072d3a6b418f87f166b58dcf877f7c2fba | Implement madvise(MADV_DONTFORK) | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/lifecycle.go",
"new_path": "pkg/sentry/mm/lifecycle.go",
"diff": "@@ -86,10 +86,22 @@ func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) {\n}\n// Copy vmas.\n+ dontforks := false\ndstvgap := mm2.vmas.FirstGap()\nfor srcvseg := mm.vmas.FirstSegment(); srcvseg.Ok(); srcvseg = srcvseg.NextSegment() {\nvma := srcvseg.Value() // makes a copy of the vma\nvmaAR := srcvseg.Range()\n+\n+ if vma.dontfork {\n+ length := uint64(vmaAR.Length())\n+ mm2.usageAS -= length\n+ if vma.isPrivateDataLocked() {\n+ mm2.dataAS -= length\n+ }\n+ dontforks = true\n+ continue\n+ }\n+\n// Inform the Mappable, if any, of the new mapping.\nif vma.mappable != nil {\nif err := vma.mappable.AddMapping(ctx, mm2, vmaAR, vma.off, vma.canWriteMappableLocked()); err != nil {\n@@ -118,6 +130,10 @@ func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) {\ndefer mm2.activeMu.Unlock()\nmm.activeMu.Lock()\ndefer mm.activeMu.Unlock()\n+ if dontforks {\n+ defer mm.pmas.MergeRange(mm.applicationAddrRange())\n+ }\n+ srcvseg := mm.vmas.FirstSegment()\ndstpgap := mm2.pmas.FirstGap()\nvar unmapAR usermem.AddrRange\nfor srcpseg := mm.pmas.FirstSegment(); srcpseg.Ok(); srcpseg = srcpseg.NextSegment() {\n@@ -125,6 +141,27 @@ func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) {\nif !pma.private {\ncontinue\n}\n+\n+ if dontforks {\n+ // Find the 'vma' that contains the starting address\n+ // associated with the 'pma' (there must be one).\n+ srcvseg = srcvseg.seekNextLowerBound(srcpseg.Start())\n+ if checkInvariants {\n+ if !srcvseg.Ok() {\n+ panic(fmt.Sprintf(\"no vma covers pma range %v\", srcpseg.Range()))\n+ }\n+ if srcpseg.Start() < srcvseg.Start() {\n+ panic(fmt.Sprintf(\"vma %v ran ahead of pma %v\", srcvseg.Range(), srcpseg.Range()))\n+ }\n+ }\n+\n+ srcpseg = mm.pmas.Isolate(srcpseg, srcvseg.Range())\n+ if srcvseg.ValuePtr().dontfork {\n+ continue\n+ }\n+ pma = srcpseg.ValuePtr()\n+ }\n+\nif !pma.needCOW {\npma.needCOW = true\nif pma.effectivePerms.Write {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/mm.go",
"new_path": "pkg/sentry/mm/mm.go",
"diff": "@@ -274,6 +274,9 @@ type vma struct {\n// metag, none of which we currently support.\ngrowsDown bool `state:\"manual\"`\n+ // dontfork is the MADV_DONTFORK setting for this vma configured by madvise().\n+ dontfork bool\n+\nmlockMode memmap.MLockMode\n// numaPolicy is the NUMA policy for this vma set by mbind().\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/syscalls.go",
"new_path": "pkg/sentry/mm/syscalls.go",
"diff": "@@ -1026,6 +1026,32 @@ func (mm *MemoryManager) SetNumaPolicy(addr usermem.Addr, length uint64, policy\n}\n}\n+// SetDontFork implements the semantics of madvise MADV_DONTFORK.\n+func (mm *MemoryManager) SetDontFork(addr usermem.Addr, length uint64, dontfork bool) error {\n+ ar, ok := addr.ToRange(length)\n+ if !ok {\n+ return syserror.EINVAL\n+ }\n+\n+ mm.mappingMu.Lock()\n+ defer mm.mappingMu.Unlock()\n+ defer func() {\n+ mm.vmas.MergeRange(ar)\n+ mm.vmas.MergeAdjacent(ar)\n+ }()\n+\n+ for vseg := mm.vmas.LowerBoundSegment(ar.Start); vseg.Ok() && vseg.Start() < ar.End; vseg = vseg.NextSegment() {\n+ vseg = mm.vmas.Isolate(vseg, ar)\n+ vma := vseg.ValuePtr()\n+ vma.dontfork = dontfork\n+ }\n+\n+ if mm.vmas.SpanRange(ar) != ar.Length() {\n+ return syserror.ENOMEM\n+ }\n+ return nil\n+}\n+\n// Decommit implements the semantics of Linux's madvise(MADV_DONTNEED).\nfunc (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error {\nar, ok := addr.ToRange(length)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/vma.go",
"new_path": "pkg/sentry/mm/vma.go",
"diff": "@@ -439,6 +439,7 @@ func (vmaSetFunctions) Merge(ar1 usermem.AddrRange, vma1 vma, ar2 usermem.AddrRa\nvma1.mlockMode != vma2.mlockMode ||\nvma1.numaPolicy != vma2.numaPolicy ||\nvma1.numaNodemask != vma2.numaNodemask ||\n+ vma1.dontfork != vma2.dontfork ||\nvma1.id != vma2.id ||\nvma1.hint != vma2.hint {\nreturn vma{}, false\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_mmap.go",
"new_path": "pkg/sentry/syscalls/linux/sys_mmap.go",
"diff": "@@ -180,6 +180,10 @@ func Madvise(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca\nswitch adv {\ncase linux.MADV_DONTNEED:\nreturn 0, nil, t.MemoryManager().Decommit(addr, length)\n+ case linux.MADV_DOFORK:\n+ return 0, nil, t.MemoryManager().SetDontFork(addr, length, false)\n+ case linux.MADV_DONTFORK:\n+ return 0, nil, t.MemoryManager().SetDontFork(addr, length, true)\ncase linux.MADV_HUGEPAGE, linux.MADV_NOHUGEPAGE:\nfallthrough\ncase linux.MADV_MERGEABLE, linux.MADV_UNMERGEABLE:\n@@ -191,7 +195,7 @@ func Madvise(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca\ncase linux.MADV_NORMAL, linux.MADV_RANDOM, linux.MADV_SEQUENTIAL, linux.MADV_WILLNEED:\n// Do nothing, we totally ignore the suggestions above.\nreturn 0, nil, nil\n- case linux.MADV_REMOVE, linux.MADV_DOFORK, linux.MADV_DONTFORK:\n+ case linux.MADV_REMOVE:\n// These \"suggestions\" have application-visible side effects, so we\n// have to indicate that we don't support them.\nreturn 0, nil, syserror.ENOSYS\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -985,6 +985,7 @@ cc_binary(\n\"//test/util:file_descriptor\",\n\"//test/util:logging\",\n\"//test/util:memory_util\",\n+ \"//test/util:multiprocess_util\",\n\"//test/util:posix_error\",\n\"//test/util:temp_path\",\n\"//test/util:test_main\",\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/madvise.cc",
"new_path": "test/syscalls/linux/madvise.cc",
"diff": "#include \"test/util/file_descriptor.h\"\n#include \"test/util/logging.h\"\n#include \"test/util/memory_util.h\"\n+#include \"test/util/multiprocess_util.h\"\n#include \"test/util/posix_error.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n@@ -136,6 +137,114 @@ TEST(MadviseDontneedTest, IgnoresPermissions) {\nEXPECT_THAT(madvise(m.ptr(), m.len(), MADV_DONTNEED), SyscallSucceeds());\n}\n+TEST(MadviseDontforkTest, AddressLength) {\n+ auto m =\n+ ASSERT_NO_ERRNO_AND_VALUE(MmapAnon(kPageSize, PROT_NONE, MAP_PRIVATE));\n+ char *addr = static_cast<char *>(m.ptr());\n+\n+ // Address must be page aligned.\n+ EXPECT_THAT(madvise(addr + 1, kPageSize, MADV_DONTFORK),\n+ SyscallFailsWithErrno(EINVAL));\n+\n+ // Zero length madvise always succeeds.\n+ EXPECT_THAT(madvise(addr, 0, MADV_DONTFORK), SyscallSucceeds());\n+\n+ // Length must not roll over after rounding up.\n+ size_t badlen = std::numeric_limits<std::size_t>::max() - (kPageSize / 2);\n+ EXPECT_THAT(madvise(0, badlen, MADV_DONTFORK), SyscallFailsWithErrno(EINVAL));\n+\n+ // Length need not be page aligned - it is implicitly rounded up.\n+ EXPECT_THAT(madvise(addr, 1, MADV_DONTFORK), SyscallSucceeds());\n+ EXPECT_THAT(madvise(addr, kPageSize, MADV_DONTFORK), SyscallSucceeds());\n+}\n+\n+TEST(MadviseDontforkTest, DontforkShared) {\n+ // Mmap two shared file-backed pages and MADV_DONTFORK the second page.\n+ TempPath f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(\n+ /* parent = */ GetAbsoluteTestTmpdir(),\n+ /* content = */ std::string(kPageSize * 2, 2), TempPath::kDefaultFileMode));\n+ FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(f.path(), O_RDWR));\n+\n+ Mapping m = ASSERT_NO_ERRNO_AND_VALUE(Mmap(\n+ nullptr, kPageSize * 2, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));\n+\n+ const Mapping ms1 = Mapping(reinterpret_cast<void *>(m.addr()), kPageSize);\n+ const Mapping ms2 =\n+ Mapping(reinterpret_cast<void *>(m.addr() + kPageSize), kPageSize);\n+ m.release();\n+\n+ ASSERT_THAT(madvise(ms2.ptr(), kPageSize, MADV_DONTFORK), SyscallSucceeds());\n+\n+ const auto rest = [&] {\n+ // First page is mapped in child and modifications are visible to parent\n+ // via the shared mapping.\n+ TEST_CHECK(IsMapped(ms1.addr()));\n+ ExpectAllMappingBytes(ms1, 2);\n+ memset(ms1.ptr(), 1, kPageSize);\n+ ExpectAllMappingBytes(ms1, 1);\n+\n+ // Second page must not be mapped in child.\n+ TEST_CHECK(!IsMapped(ms2.addr()));\n+ };\n+\n+ EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n+\n+ ExpectAllMappingBytes(ms1, 1); // page contents modified by child.\n+ ExpectAllMappingBytes(ms2, 2); // page contents unchanged.\n+}\n+\n+TEST(MadviseDontforkTest, DontforkAnonPrivate) {\n+ // Mmap three anonymous pages and MADV_DONTFORK the middle page.\n+ Mapping m = ASSERT_NO_ERRNO_AND_VALUE(\n+ MmapAnon(kPageSize * 3, PROT_READ | PROT_WRITE, MAP_PRIVATE));\n+ const Mapping mp1 = Mapping(reinterpret_cast<void *>(m.addr()), kPageSize);\n+ const Mapping mp2 =\n+ Mapping(reinterpret_cast<void *>(m.addr() + kPageSize), kPageSize);\n+ const Mapping mp3 =\n+ Mapping(reinterpret_cast<void *>(m.addr() + 2 * kPageSize), kPageSize);\n+ m.release();\n+\n+ ASSERT_THAT(madvise(mp2.ptr(), kPageSize, MADV_DONTFORK), SyscallSucceeds());\n+\n+ // Verify that all pages are zeroed and memset the first, second and third\n+ // pages to 1, 2, and 3 respectively.\n+ ExpectAllMappingBytes(mp1, 0);\n+ memset(mp1.ptr(), 1, kPageSize);\n+\n+ ExpectAllMappingBytes(mp2, 0);\n+ memset(mp2.ptr(), 2, kPageSize);\n+\n+ ExpectAllMappingBytes(mp3, 0);\n+ memset(mp3.ptr(), 3, kPageSize);\n+\n+ const auto rest = [&] {\n+ // Verify first page is mapped, verify its contents and then modify the\n+ // page. The mapping is private so the modifications are not visible to\n+ // the parent.\n+ TEST_CHECK(IsMapped(mp1.addr()));\n+ ExpectAllMappingBytes(mp1, 1);\n+ memset(mp1.ptr(), 11, kPageSize);\n+ ExpectAllMappingBytes(mp1, 11);\n+\n+ // Verify second page is not mapped.\n+ TEST_CHECK(!IsMapped(mp2.addr()));\n+\n+ // Verify third page is mapped, verify its contents and then modify the\n+ // page. The mapping is private so the modifications are not visible to\n+ // the parent.\n+ TEST_CHECK(IsMapped(mp3.addr()));\n+ ExpectAllMappingBytes(mp3, 3);\n+ memset(mp3.ptr(), 13, kPageSize);\n+ ExpectAllMappingBytes(mp3, 13);\n+ };\n+ EXPECT_THAT(InForkedProcess(rest), IsPosixErrorOkAndHolds(0));\n+\n+ // The fork and COW by child should not affect the parent mappings.\n+ ExpectAllMappingBytes(mp1, 1);\n+ ExpectAllMappingBytes(mp2, 2);\n+ ExpectAllMappingBytes(mp3, 3);\n+}\n+\n} // namespace\n} // namespace testing\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/mremap.cc",
"new_path": "test/syscalls/linux/mremap.cc",
"diff": "@@ -46,17 +46,6 @@ PosixErrorOr<void*> Mremap(void* old_address, size_t old_size, size_t new_size,\nreturn rv;\n}\n-// Returns true if the page containing addr is mapped.\n-bool IsMapped(uintptr_t addr) {\n- int const rv = msync(reinterpret_cast<void*>(addr & ~(kPageSize - 1)),\n- kPageSize, MS_ASYNC);\n- if (rv == 0) {\n- return true;\n- }\n- TEST_PCHECK_MSG(errno == ENOMEM, \"msync failed with unexpected errno\");\n- return false;\n-}\n-\n// Fixture for mremap tests parameterized by mmap flags.\nusing MremapParamTest = ::testing::TestWithParam<int>;\n"
},
{
"change_type": "MODIFY",
"old_path": "test/util/memory_util.h",
"new_path": "test/util/memory_util.h",
"diff": "@@ -118,6 +118,17 @@ inline PosixErrorOr<Mapping> MmapAnon(size_t length, int prot, int flags) {\nreturn Mmap(nullptr, length, prot, flags | MAP_ANONYMOUS, -1, 0);\n}\n+// Returns true if the page containing addr is mapped.\n+inline bool IsMapped(uintptr_t addr) {\n+ int const rv = msync(reinterpret_cast<void*>(addr & ~(kPageSize - 1)),\n+ kPageSize, MS_ASYNC);\n+ if (rv == 0) {\n+ return true;\n+ }\n+ TEST_PCHECK_MSG(errno == ENOMEM, \"msync failed with unexpected errno\");\n+ return false;\n+}\n+\n} // namespace testing\n} // namespace gvisor\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement madvise(MADV_DONTFORK)
PiperOrigin-RevId: 254253777 |
259,881 | 20.06.2019 13:33:29 | 25,200 | 292f70cbf7b4d2da9f2fc5d1049ba49e6846e805 | Add package docs to seqfile and ramfs
These are the only packages missing docs: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/seqfile/seqfile.go",
"new_path": "pkg/sentry/fs/proc/seqfile/seqfile.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+// Package seqfile provides dynamic ordered files.\npackage seqfile\nimport (\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/ramfs/dir.go",
"new_path": "pkg/sentry/fs/ramfs/dir.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+// Package ramfs provides the fundamentals for a simple in-memory filesystem.\npackage ramfs\nimport (\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add package docs to seqfile and ramfs
These are the only packages missing docs:
https://godoc.org/gvisor.dev/gvisor
PiperOrigin-RevId: 254261022 |
259,885 | 20.06.2019 13:33:43 | 25,200 | 7db8685100bc5f69ec42e5498781301ee835667c | Preallocate auth.NewAnonymousCredentials() in contexttest.TestContext.
Otherwise every call to, say, fs.ContextCanAccessFile() in a benchmark
using contexttest allocates new auth.Credentials, a new
auth.UserNamespace, ... | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/context/contexttest/contexttest.go",
"new_path": "pkg/sentry/context/contexttest/contexttest.go",
"diff": "@@ -59,6 +59,7 @@ func Context(tb testing.TB) context.Context {\nl: limits.NewLimitSet(),\nmf: mf,\nplatform: p,\n+ creds: auth.NewAnonymousCredentials(),\notherValues: make(map[interface{}]interface{}),\n}\n}\n@@ -70,6 +71,7 @@ type TestContext struct {\nl *limits.LimitSet\nmf *pgalloc.MemoryFile\nplatform platform.Platform\n+ creds *auth.Credentials\notherValues map[interface{}]interface{}\n}\n@@ -108,6 +110,8 @@ func (t *TestContext) RegisterValue(key, value interface{}) {\n// Value implements context.Context.\nfunc (t *TestContext) Value(key interface{}) interface{} {\nswitch key {\n+ case auth.CtxCredentials:\n+ return t.creds\ncase limits.CtxLimits:\nreturn t.l\ncase pgalloc.CtxMemoryFile:\n"
}
] | Go | Apache License 2.0 | google/gvisor | Preallocate auth.NewAnonymousCredentials() in contexttest.TestContext.
Otherwise every call to, say, fs.ContextCanAccessFile() in a benchmark
using contexttest allocates new auth.Credentials, a new
auth.UserNamespace, ...
PiperOrigin-RevId: 254261051 |
260,008 | 20.06.2019 15:57:11 | 25,200 | 3c7448ab6f178cfc171545d15ae039f318e38225 | Deflake TestSIGALRMToMainThread.
Bump up the threshold on number of SIGALRMs received by worker
threads from 50 to 200. Even with the new threshold we still
expect that the majority of SIGALRMs are received by the
thread group leader. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/itimer.cc",
"new_path": "test/syscalls/linux/itimer.cc",
"diff": "@@ -196,13 +196,18 @@ int TestSIGALRMToMainThread() {\n// ITIMER_REAL-generated SIGALRMs prefer to deliver to the thread group leader\n// (but don't guarantee it), so we expect to see most samples on the main\n// thread.\n+ //\n+ // The number of SIGALRMs delivered to a worker should not exceed 20%\n+ // of the number of total signals expected (this is somewhat arbitrary).\n+ const int worker_threshold = result.expected_total / 5;\n+\n//\n// Linux only guarantees timers will never expire before the requested time.\n// Thus, we only check the upper bound and also it at least have one sample.\nTEST_CHECK(result.main_thread_samples <= result.expected_total);\nTEST_CHECK(result.main_thread_samples > 0);\nfor (int num : result.worker_samples) {\n- TEST_CHECK_MSG(num <= 50, \"worker received too many samples\");\n+ TEST_CHECK_MSG(num <= worker_threshold, \"worker received too many samples\");\n}\nreturn 0;\n"
}
] | Go | Apache License 2.0 | google/gvisor | Deflake TestSIGALRMToMainThread.
Bump up the threshold on number of SIGALRMs received by worker
threads from 50 to 200. Even with the new threshold we still
expect that the majority of SIGALRMs are received by the
thread group leader.
PiperOrigin-RevId: 254289787 |
259,854 | 20.06.2019 20:39:22 | 25,200 | dc36c34a766500507e4ac90547b58b88625bbc0d | Close FD on TcpSocketTest loop failure.
This helps prevent the blocking call from getting stuck and causing a test
timeout. | [
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/tcp_socket.cc",
"new_path": "test/syscalls/linux/tcp_socket.cc",
"diff": "@@ -265,10 +265,16 @@ TEST_P(TcpSocketTest, BlockingLargeWrite_NoRandomSave) {\nScopedThread t([this, &read_bytes]() {\n// Avoid interrupting the blocking write in main thread.\nconst DisableSave ds;\n+\n+ // Take ownership of the FD so that we close it on failure. This will\n+ // unblock the blocking write below.\n+ FileDescriptor fd(t_);\n+ t_ = -1;\n+\nchar readbuf[2500] = {};\nint n = -1;\nwhile (n != 0) {\n- ASSERT_THAT(n = RetryEINTR(read)(t_, &readbuf, sizeof(readbuf)),\n+ ASSERT_THAT(n = RetryEINTR(read)(fd.get(), &readbuf, sizeof(readbuf)),\nSyscallSucceeds());\nread_bytes += n;\n}\n@@ -342,10 +348,16 @@ TEST_P(TcpSocketTest, BlockingLargeSend_NoRandomSave) {\nScopedThread t([this, &read_bytes]() {\n// Avoid interrupting the blocking write in main thread.\nconst DisableSave ds;\n+\n+ // Take ownership of the FD so that we close it on failure. This will\n+ // unblock the blocking write below.\n+ FileDescriptor fd(t_);\n+ t_ = -1;\n+\nchar readbuf[2500] = {};\nint n = -1;\nwhile (n != 0) {\n- ASSERT_THAT(n = RetryEINTR(read)(t_, &readbuf, sizeof(readbuf)),\n+ ASSERT_THAT(n = RetryEINTR(read)(fd.get(), &readbuf, sizeof(readbuf)),\nSyscallSucceeds());\nread_bytes += n;\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Close FD on TcpSocketTest loop failure.
This helps prevent the blocking call from getting stuck and causing a test
timeout.
PiperOrigin-RevId: 254325926 |
259,853 | 21.06.2019 11:54:28 | 25,200 | f94653b3dea629f365ce5742b99bbcaa7673ded2 | kernel: call t.mu.Unlock() explicitly in WithMuLocked
defer here doesn't improve readability, but we know it slower that
the explicit call. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task.go",
"new_path": "pkg/sentry/kernel/task.go",
"diff": "@@ -703,8 +703,8 @@ func (t *Task) FDMap() *FDMap {\n// WithMuLocked executes f with t.mu locked.\nfunc (t *Task) WithMuLocked(f func(*Task)) {\nt.mu.Lock()\n- defer t.mu.Unlock()\nf(t)\n+ t.mu.Unlock()\n}\n// MountNamespace returns t's MountNamespace. MountNamespace does not take an\n"
}
] | Go | Apache License 2.0 | google/gvisor | kernel: call t.mu.Unlock() explicitly in WithMuLocked
defer here doesn't improve readability, but we know it slower that
the explicit call.
PiperOrigin-RevId: 254441473 |
259,992 | 21.06.2019 12:45:50 | 25,200 | 5ba16d51a950d55684c0348a9445784363467c9c | Add list of stuck tasks to panic message | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/watchdog/watchdog.go",
"new_path": "pkg/sentry/watchdog/watchdog.go",
"diff": "@@ -271,23 +271,23 @@ func (w *Watchdog) reportStuckWatchdog() {\nw.onStuckTask(true, &buf)\n}\n-func (w *Watchdog) onStuckTask(newTaskFound bool, buf *bytes.Buffer) {\n+func (w *Watchdog) onStuckTask(newTaskFound bool, msg *bytes.Buffer) {\nswitch w.timeoutAction {\ncase LogWarning:\n// Dump stack only if a new task is detected or if it sometime has passed since\n// the last time a stack dump was generated.\nif !newTaskFound && time.Since(w.lastStackDump) < stackDumpSameTaskPeriod {\n- buf.WriteString(\"\\n...[stack dump skipped]...\")\n- log.Warningf(buf.String())\n+ msg.WriteString(\"\\n...[stack dump skipped]...\")\n+ log.Warningf(msg.String())\n} else {\n- log.TracebackAll(buf.String())\n+ log.TracebackAll(msg.String())\nw.lastStackDump = time.Now()\n}\ncase Panic:\n// Panic will skip over running tasks, which is likely the culprit here. So manually\n// dump all stacks before panic'ing.\n- log.TracebackAll(buf.String())\n+ log.TracebackAll(msg.String())\n// Attempt to flush metrics, timeout and move on in case metrics are stuck as well.\nmetricsEmitted := make(chan struct{}, 1)\n@@ -300,6 +300,6 @@ func (w *Watchdog) onStuckTask(newTaskFound bool, buf *bytes.Buffer) {\ncase <-metricsEmitted:\ncase <-time.After(1 * time.Second):\n}\n- panic(\"Sentry detected stuck task(s). See stack trace and message above for more details\")\n+ panic(fmt.Sprintf(\"Stack for running G's are skipped while panicking.\\n%s\", msg.String()))\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add list of stuck tasks to panic message
PiperOrigin-RevId: 254450309 |
259,907 | 21.06.2019 15:41:42 | 25,200 | 727375321f292878237ac6ace447e43ed393750c | ext4 block group descriptor implementation in disk layout package. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fs/ext4/disklayout/BUILD",
"diff": "+package(licenses = [\"notice\"])\n+\n+load(\"//tools/go_stateify:defs.bzl\", \"go_library\", \"go_test\")\n+\n+go_library(\n+ name = \"disklayout\",\n+ srcs = [\n+ \"block_group.go\",\n+ \"block_group_32.go\",\n+ \"block_group_64.go\",\n+ ],\n+ importpath = \"gvisor.dev/gvisor/pkg/sentry/fs/ext4/disklayout\",\n+)\n+\n+go_test(\n+ name = \"disklayout_test\",\n+ size = \"small\",\n+ srcs = [\"block_group_test.go\"],\n+ embed = [\":disklayout\"],\n+ deps = [\"//pkg/binary\"],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fs/ext4/disklayout/block_group_32.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package disklayout\n+\n+// BlockGroup32Bit emulates the first half of struct ext4_group_desc in\n+// fs/ext4/ext4.h. It is the block group descriptor struct for 32-bit ext4\n+// filesystems. It implements BlockGroup interface.\n+//\n+// The suffix `Lo` here stands for lower bits because this is also used in the\n+// 64-bit version where these fields represent the lower half of the fields.\n+// The suffix `Raw` has been added to indicate that the field does not have a\n+// counterpart in the 64-bit version and to resolve name collision with the\n+// interface.\n+type BlockGroup32Bit struct {\n+ BlockBitmapLo uint32\n+ InodeBitmapLo uint32\n+ InodeTableLo uint32\n+ FreeBlocksCountLo uint16\n+ FreeInodesCountLo uint16\n+ UsedDirsCountLo uint16\n+ FlagsRaw uint16\n+ ExcludeBitmapLo uint32\n+ BlockBitmapChecksumLo uint16\n+ InodeBitmapChecksumLo uint16\n+ ItableUnusedLo uint16\n+ ChecksumRaw uint16\n+}\n+\n+// InodeTable implements BlockGroup.InodeTable.\n+func (bg *BlockGroup32Bit) InodeTable() uint64 { return uint64(bg.InodeTableLo) }\n+\n+// BlockBitmap implements BlockGroup.BlockBitmap.\n+func (bg *BlockGroup32Bit) BlockBitmap() uint64 { return uint64(bg.BlockBitmapLo) }\n+\n+// InodeBitmap implements BlockGroup.InodeBitmap.\n+func (bg *BlockGroup32Bit) InodeBitmap() uint64 { return uint64(bg.InodeBitmapLo) }\n+\n+// ExclusionBitmap implements BlockGroup.ExclusionBitmap.\n+func (bg *BlockGroup32Bit) ExclusionBitmap() uint64 { return uint64(bg.ExcludeBitmapLo) }\n+\n+// FreeBlocksCount implements BlockGroup.FreeBlocksCount.\n+func (bg *BlockGroup32Bit) FreeBlocksCount() uint32 { return uint32(bg.FreeBlocksCountLo) }\n+\n+// FreeInodesCount implements BlockGroup.FreeInodesCount.\n+func (bg *BlockGroup32Bit) FreeInodesCount() uint32 { return uint32(bg.FreeInodesCountLo) }\n+\n+// DirectoryCount implements BlockGroup.DirectoryCount.\n+func (bg *BlockGroup32Bit) DirectoryCount() uint32 { return uint32(bg.UsedDirsCountLo) }\n+\n+// UnusedInodeCount implements BlockGroup.UnusedInodeCount.\n+func (bg *BlockGroup32Bit) UnusedInodeCount() uint32 { return uint32(bg.ItableUnusedLo) }\n+\n+// BlockBitmapChecksum implements BlockGroup.BlockBitmapChecksum.\n+func (bg *BlockGroup32Bit) BlockBitmapChecksum() uint32 { return uint32(bg.BlockBitmapChecksumLo) }\n+\n+// InodeBitmapChecksum implements BlockGroup.InodeBitmapChecksum.\n+func (bg *BlockGroup32Bit) InodeBitmapChecksum() uint32 { return uint32(bg.InodeBitmapChecksumLo) }\n+\n+// Checksum implements BlockGroup.Checksum.\n+func (bg *BlockGroup32Bit) Checksum() uint16 { return bg.ChecksumRaw }\n+\n+// Flags implements BlockGroup.Flags.\n+func (bg *BlockGroup32Bit) Flags() BGFlags { return BGFlagsFromInt(bg.FlagsRaw) }\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fs/ext4/disklayout/block_group_64.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package disklayout\n+\n+// BlockGroup64Bit emulates struct ext4_group_desc in fs/ext4/ext4.h.\n+// It is the block group descriptor struct for 64-bit ext4 filesystems.\n+// It implements BlockGroup interface. It is an extension of the 32-bit\n+// version of BlockGroup.\n+//\n+// The suffix `Hi` here stands for upper bits because they represent the upper\n+// half of the fields.\n+type BlockGroup64Bit struct {\n+ // We embed the 32-bit struct here because 64-bit version is just an extension\n+ // of the 32-bit version.\n+ BlockGroup32Bit\n+\n+ // 64-bit specific fields.\n+ BlockBitmapHi uint32\n+ InodeBitmapHi uint32\n+ InodeTableHi uint32\n+ FreeBlocksCountHi uint16\n+ FreeInodesCountHi uint16\n+ UsedDirsCountHi uint16\n+ ItableUnusedHi uint16\n+ ExcludeBitmapHi uint32\n+ BlockBitmapChecksumHi uint16\n+ InodeBitmapChecksumHi uint16\n+ _ uint32 // Padding to 64 bytes.\n+}\n+\n+// Methods to override. Checksum() and Flags() are not overridden.\n+\n+// InodeTable implements BlockGroup.InodeTable.\n+func (bg *BlockGroup64Bit) InodeTable() uint64 {\n+ return (uint64(bg.InodeTableHi) << 32) | uint64(bg.InodeTableLo)\n+}\n+\n+// BlockBitmap implements BlockGroup.BlockBitmap.\n+func (bg *BlockGroup64Bit) BlockBitmap() uint64 {\n+ return (uint64(bg.BlockBitmapHi) << 32) | uint64(bg.BlockBitmapLo)\n+}\n+\n+// InodeBitmap implements BlockGroup.InodeBitmap.\n+func (bg *BlockGroup64Bit) InodeBitmap() uint64 {\n+ return (uint64(bg.InodeBitmapHi) << 32) | uint64(bg.InodeBitmapLo)\n+}\n+\n+// ExclusionBitmap implements BlockGroup.ExclusionBitmap.\n+func (bg *BlockGroup64Bit) ExclusionBitmap() uint64 {\n+ return (uint64(bg.ExcludeBitmapHi) << 32) | uint64(bg.ExcludeBitmapLo)\n+}\n+\n+// FreeBlocksCount implements BlockGroup.FreeBlocksCount.\n+func (bg *BlockGroup64Bit) FreeBlocksCount() uint32 {\n+ return (uint32(bg.FreeBlocksCountHi) << 16) | uint32(bg.FreeBlocksCountLo)\n+}\n+\n+// FreeInodesCount implements BlockGroup.FreeInodesCount.\n+func (bg *BlockGroup64Bit) FreeInodesCount() uint32 {\n+ return (uint32(bg.FreeInodesCountHi) << 16) | uint32(bg.FreeInodesCountLo)\n+}\n+\n+// DirectoryCount implements BlockGroup.DirectoryCount.\n+func (bg *BlockGroup64Bit) DirectoryCount() uint32 {\n+ return (uint32(bg.UsedDirsCountHi) << 16) | uint32(bg.UsedDirsCountLo)\n+}\n+\n+// UnusedInodeCount implements BlockGroup.UnusedInodeCount.\n+func (bg *BlockGroup64Bit) UnusedInodeCount() uint32 {\n+ return (uint32(bg.ItableUnusedHi) << 16) | uint32(bg.ItableUnusedLo)\n+}\n+\n+// BlockBitmapChecksum implements BlockGroup.BlockBitmapChecksum.\n+func (bg *BlockGroup64Bit) BlockBitmapChecksum() uint32 {\n+ return (uint32(bg.BlockBitmapChecksumHi) << 16) | uint32(bg.BlockBitmapChecksumLo)\n+}\n+\n+// InodeBitmapChecksum implements BlockGroup.InodeBitmapChecksum.\n+func (bg *BlockGroup64Bit) InodeBitmapChecksum() uint32 {\n+ return (uint32(bg.InodeBitmapChecksumHi) << 16) | uint32(bg.InodeBitmapChecksumLo)\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fs/ext4/disklayout/block_group_test.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package disklayout\n+\n+import (\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/binary\"\n+)\n+\n+// TestBlockGroupSize tests the fact that the block group struct for\n+// 32-bit ext filesystems should be exactly 32 bytes big and for 64-bit fs it\n+// should be 64 bytes.\n+func TestBlockGroupSize(t *testing.T) {\n+ if got, want := int(binary.Size(BlockGroup32Bit{})), 32; got != want {\n+ t.Errorf(\"BlockGroup32Bit should be exactly 32 bytes but is %d bytes\", got)\n+ }\n+\n+ if got, want := int(binary.Size(BlockGroup64Bit{})), 64; got != want {\n+ t.Errorf(\"BlockGroup64Bit should be exactly 64 bytes but is %d bytes\", got)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | ext4 block group descriptor implementation in disk layout package.
PiperOrigin-RevId: 254482180 |
259,847 | 21.06.2019 15:55:15 | 25,200 | ae4ef32b8c77a067229c593af784fbfa3098fd97 | Deflake TestSimpleReceive failures due to timeouts
This test will occasionally fail waiting to read a packet. From repeated runs,
I've seen it up to 1.5s for waitForPackets to complete. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go",
"diff": "@@ -636,7 +636,7 @@ func TestSimpleReceive(t *testing.T) {\nsyscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0})\n// Wait for packet to be received, then check it.\n- c.waitForPackets(1, time.After(time.Second), \"Error waiting for packet\")\n+ c.waitForPackets(1, time.After(5*time.Second), \"Timeout waiting for packet\")\nc.mu.Lock()\nrcvd := []byte(c.packets[0].vv.First())\nc.packets = c.packets[:0]\n"
}
] | Go | Apache License 2.0 | google/gvisor | Deflake TestSimpleReceive failures due to timeouts
This test will occasionally fail waiting to read a packet. From repeated runs,
I've seen it up to 1.5s for waitForPackets to complete.
PiperOrigin-RevId: 254484627 |
259,853 | 21.06.2019 17:24:11 | 25,200 | ab6774cebf5c618d0cae579e84bd39666857f78b | gvisor/fs: getdents returns 0 if offset is equal to FileMaxOffset
FileMaxOffset is a special case when lseek(d, 0, SEEK_END) has been called. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dirent.go",
"new_path": "pkg/sentry/fs/dirent.go",
"diff": "@@ -948,9 +948,6 @@ func direntReaddir(ctx context.Context, d *Dirent, it DirIterator, root *Dirent,\nif dirCtx.Serializer == nil {\npanic(\"Dirent.Readdir: serializer must not be nil\")\n}\n- if d.frozen {\n- return d.readdirFrozen(root, offset, dirCtx)\n- }\n// Check that this is actually a directory before emitting anything.\n// Once we have written entries for \".\" and \"..\", future errors from\n@@ -959,6 +956,16 @@ func direntReaddir(ctx context.Context, d *Dirent, it DirIterator, root *Dirent,\nreturn 0, syserror.ENOTDIR\n}\n+ // This is a special case for lseek(fd, 0, SEEK_END).\n+ // See SeekWithDirCursor for more details.\n+ if offset == FileMaxOffset {\n+ return offset, nil\n+ }\n+\n+ if d.frozen {\n+ return d.readdirFrozen(root, offset, dirCtx)\n+ }\n+\n// Collect attrs for \".\" and \"..\".\ndot, dotdot := d.GetDotAttrs(root)\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor/fs: getdents returns 0 if offset is equal to FileMaxOffset
FileMaxOffset is a special case when lseek(d, 0, SEEK_END) has been called.
PiperOrigin-RevId: 254498777 |
259,992 | 24.06.2019 15:01:58 | 25,200 | b21b1db7003aea8615ab9e6a4f07b74c97a32c24 | Allow to change logging options using 'runsc debug'
New options are:
runsc debug --strace=off|all|function1,function2
runsc debug --log-level=warning|info|debug
runsc debug --log-packets=true|false
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/log/log.go",
"new_path": "pkg/log/log.go",
"diff": "@@ -50,6 +50,19 @@ const (\nDebug\n)\n+func (l Level) String() string {\n+ switch l {\n+ case Warning:\n+ return \"Warning\"\n+ case Info:\n+ return \"Info\"\n+ case Debug:\n+ return \"Debug\"\n+ default:\n+ return fmt.Sprintf(\"Invalid level: %d\", l)\n+ }\n+}\n+\n// Emitter is the final destination for logs.\ntype Emitter interface {\n// Emit emits the given log statement. This allows for control over the\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/BUILD",
"new_path": "pkg/sentry/control/BUILD",
"diff": "@@ -6,6 +6,7 @@ go_library(\nname = \"control\",\nsrcs = [\n\"control.go\",\n+ \"logging.go\",\n\"pprof.go\",\n\"proc.go\",\n\"state.go\",\n@@ -26,8 +27,10 @@ go_library(\n\"//pkg/sentry/kernel/time\",\n\"//pkg/sentry/limits\",\n\"//pkg/sentry/state\",\n+ \"//pkg/sentry/strace\",\n\"//pkg/sentry/usage\",\n\"//pkg/sentry/watchdog\",\n+ \"//pkg/tcpip/link/sniffer\",\n\"//pkg/urpc\",\n],\n)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/control/logging.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package control\n+\n+import (\n+ \"fmt\"\n+ \"sync/atomic\"\n+\n+ \"gvisor.dev/gvisor/pkg/log\"\n+ \"gvisor.dev/gvisor/pkg/sentry/strace\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/link/sniffer\"\n+)\n+\n+// LoggingArgs are the arguments to use for changing the logging\n+// level and strace list.\n+type LoggingArgs struct {\n+ // SetLevel is a flag used to indicate that we should update\n+ // the logging level. We should be able to change the strace\n+ // list without affecting the logging level and vice versa.\n+ SetLevel bool\n+\n+ // Level is the log level that will be set if SetLevel is true.\n+ Level log.Level\n+\n+ // SetLogPackets indicates that we should update the log packets flag.\n+ SetLogPackets bool\n+\n+ // LogPackets is the actual value to set for LogPackets.\n+ // SetLogPackets must be enabled to indicate that we're changing\n+ // the value.\n+ LogPackets bool\n+\n+ // SetStrace is a flag used to indicate that strace related\n+ // arguments were passed in.\n+ SetStrace bool\n+\n+ // EnableStrace is a flag from the CLI that specifies whether to\n+ // enable strace at all. If this flag is false then a completely\n+ // pristine copy of the syscall table will be swapped in. This\n+ // approach is used to remain consistent with an empty strace\n+ // whitelist meaning trace all system calls.\n+ EnableStrace bool\n+\n+ // Strace is the whitelist of syscalls to trace to log. If this\n+ // and StraceEventWhitelist are empty trace all system calls.\n+ StraceWhitelist []string\n+\n+ // SetEventStrace is a flag used to indicate that event strace\n+ // related arguments were passed in.\n+ SetEventStrace bool\n+\n+ // StraceEventWhitelist is the whitelist of syscalls to trace\n+ // to event log.\n+ StraceEventWhitelist []string\n+}\n+\n+// Logging provides functions related to logging.\n+type Logging struct{}\n+\n+// Change will change the log level and strace arguments. Although\n+// this functions signature requires an error it never acctually\n+// return san error. It's required by the URPC interface.\n+// Additionally, it may look odd that this is the only method\n+// attached to an empty struct but this is also part of how\n+// URPC dispatches.\n+func (l *Logging) Change(args *LoggingArgs, code *int) error {\n+ if args.SetLevel {\n+ // Logging uses an atomic for the level so this is thread safe.\n+ log.SetLevel(args.Level)\n+ }\n+\n+ if args.SetLogPackets {\n+ if args.LogPackets {\n+ atomic.StoreUint32(&sniffer.LogPackets, 1)\n+ } else {\n+ atomic.StoreUint32(&sniffer.LogPackets, 0)\n+ }\n+ log.Infof(\"LogPackets set to: %v\", atomic.LoadUint32(&sniffer.LogPackets))\n+ }\n+\n+ if args.SetStrace {\n+ if err := l.configureStrace(args); err != nil {\n+ return fmt.Errorf(\"error configuring strace: %v\", err)\n+ }\n+ }\n+\n+ if args.SetEventStrace {\n+ if err := l.configureEventStrace(args); err != nil {\n+ return fmt.Errorf(\"error configuring event strace: %v\", err)\n+ }\n+ }\n+\n+ return nil\n+}\n+\n+func (l *Logging) configureStrace(args *LoggingArgs) error {\n+ if args.EnableStrace {\n+ // Install the whitelist specified.\n+ if len(args.StraceWhitelist) > 0 {\n+ if err := strace.Enable(args.StraceWhitelist, strace.SinkTypeLog); err != nil {\n+ return err\n+ }\n+ } else {\n+ // For convenience, if strace is enabled but whitelist\n+ // is empty, enable everything to log.\n+ strace.EnableAll(strace.SinkTypeLog)\n+ }\n+ } else {\n+ // Uninstall all strace functions.\n+ strace.Disable(strace.SinkTypeLog)\n+ }\n+ return nil\n+}\n+\n+func (l *Logging) configureEventStrace(args *LoggingArgs) error {\n+ if len(args.StraceEventWhitelist) > 0 {\n+ if err := strace.Enable(args.StraceEventWhitelist, strace.SinkTypeEvent); err != nil {\n+ return err\n+ }\n+ } else {\n+ strace.Disable(strace.SinkTypeEvent)\n+ }\n+ return nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -96,8 +96,10 @@ const (\n// SandboxStacks collects sandbox stacks for debugging.\nSandboxStacks = \"debug.Stacks\"\n+)\n// Profiling related commands (see pprof.go for more details).\n+const (\nStartCPUProfile = \"Profile.StartCPUProfile\"\nStopCPUProfile = \"Profile.StopCPUProfile\"\nHeapProfile = \"Profile.HeapProfile\"\n@@ -105,6 +107,11 @@ const (\nStopTrace = \"Profile.StopTrace\"\n)\n+// Logging related commands (see logging.go for more details).\n+const (\n+ ChangeLogging = \"Logging.Change\"\n+)\n+\n// ControlSocketAddr generates an abstract unix socket name for the given ID.\nfunc ControlSocketAddr(id string) string {\nreturn fmt.Sprintf(\"\\x00runsc-sandbox.%s\", id)\n@@ -143,6 +150,7 @@ func newController(fd int, l *Loader) (*controller, error) {\n}\nsrv.Register(&debug{})\n+ srv.Register(&control.Logging{})\nif l.conf.ProfileEnable {\nsrv.Register(&control.Profile{})\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/debug.go",
"new_path": "runsc/cmd/debug.go",
"diff": "@@ -17,12 +17,15 @@ package cmd\nimport (\n\"context\"\n\"os\"\n+ \"strconv\"\n+ \"strings\"\n\"syscall\"\n\"time\"\n\"flag\"\n\"github.com/google/subcommands\"\n\"gvisor.dev/gvisor/pkg/log\"\n+ \"gvisor.dev/gvisor/pkg/sentry/control\"\n\"gvisor.dev/gvisor/runsc/boot\"\n\"gvisor.dev/gvisor/runsc/container\"\n)\n@@ -36,6 +39,9 @@ type Debug struct {\nprofileCPU string\nprofileDelay int\ntrace string\n+ strace string\n+ logLevel string\n+ logPackets string\n}\n// Name implements subcommands.Command.\n@@ -62,6 +68,9 @@ func (d *Debug) SetFlags(f *flag.FlagSet) {\nf.IntVar(&d.profileDelay, \"profile-delay\", 5, \"amount of time to wait before stoping CPU profile\")\nf.StringVar(&d.trace, \"trace\", \"\", \"writes an execution trace to the given file.\")\nf.IntVar(&d.signal, \"signal\", -1, \"sends signal to the sandbox\")\n+ f.StringVar(&d.strace, \"strace\", \"\", `A comma separated list of syscalls to trace. \"all\" enables all traces, \"off\" disables all`)\n+ f.StringVar(&d.logLevel, \"log-level\", \"\", \"The log level to set: warning (0), info (1), or debug (2).\")\n+ f.StringVar(&d.logPackets, \"log-packets\", \"\", \"A boolean value to enable or disable packet logging: true or false.\")\n}\n// Execute implements subcommands.Command.Execute.\n@@ -78,7 +87,7 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nvar err error\nc, err = container.Load(conf.RootDir, f.Arg(0))\nif err != nil {\n- Fatalf(\"loading container %q: %v\", f.Arg(0), err)\n+ return Errorf(\"loading container %q: %v\", f.Arg(0), err)\n}\n} else {\nif f.NArg() != 0 {\n@@ -88,12 +97,12 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n// Go over all sandboxes and find the one that matches PID.\nids, err := container.List(conf.RootDir)\nif err != nil {\n- Fatalf(\"listing containers: %v\", err)\n+ return Errorf(\"listing containers: %v\", err)\n}\nfor _, id := range ids {\ncandidate, err := container.Load(conf.RootDir, id)\nif err != nil {\n- Fatalf(\"loading container %q: %v\", id, err)\n+ return Errorf(\"loading container %q: %v\", id, err)\n}\nif candidate.SandboxPid() == d.pid {\nc = candidate\n@@ -101,38 +110,38 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n}\n}\nif c == nil {\n- Fatalf(\"container with PID %d not found\", d.pid)\n+ return Errorf(\"container with PID %d not found\", d.pid)\n}\n}\nif c.Sandbox == nil || !c.Sandbox.IsRunning() {\n- Fatalf(\"container sandbox is not running\")\n+ return Errorf(\"container sandbox is not running\")\n}\nlog.Infof(\"Found sandbox %q, PID: %d\", c.Sandbox.ID, c.Sandbox.Pid)\nif d.signal > 0 {\nlog.Infof(\"Sending signal %d to process: %d\", d.signal, c.Sandbox.Pid)\nif err := syscall.Kill(c.Sandbox.Pid, syscall.Signal(d.signal)); err != nil {\n- Fatalf(\"failed to send signal %d to processs %d\", d.signal, c.Sandbox.Pid)\n+ return Errorf(\"failed to send signal %d to processs %d\", d.signal, c.Sandbox.Pid)\n}\n}\nif d.stacks {\nlog.Infof(\"Retrieving sandbox stacks\")\nstacks, err := c.Sandbox.Stacks()\nif err != nil {\n- Fatalf(\"retrieving stacks: %v\", err)\n+ return Errorf(\"retrieving stacks: %v\", err)\n}\nlog.Infof(\" *** Stack dump ***\\n%s\", stacks)\n}\nif d.profileHeap != \"\" {\nf, err := os.Create(d.profileHeap)\nif err != nil {\n- Fatalf(err.Error())\n+ return Errorf(err.Error())\n}\ndefer f.Close()\nif err := c.Sandbox.HeapProfile(f); err != nil {\n- Fatalf(err.Error())\n+ return Errorf(err.Error())\n}\nlog.Infof(\"Heap profile written to %q\", d.profileHeap)\n}\n@@ -142,7 +151,7 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\ndelay = true\nf, err := os.Create(d.profileCPU)\nif err != nil {\n- Fatalf(err.Error())\n+ return Errorf(err.Error())\n}\ndefer func() {\nf.Close()\n@@ -152,7 +161,7 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nlog.Infof(\"CPU profile written to %q\", d.profileCPU)\n}()\nif err := c.Sandbox.StartCPUProfile(f); err != nil {\n- Fatalf(err.Error())\n+ return Errorf(err.Error())\n}\nlog.Infof(\"CPU profile started for %d sec, writing to %q\", d.profileDelay, d.profileCPU)\n}\n@@ -160,7 +169,7 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\ndelay = true\nf, err := os.Create(d.trace)\nif err != nil {\n- Fatalf(err.Error())\n+ return Errorf(err.Error())\n}\ndefer func() {\nf.Close()\n@@ -170,15 +179,71 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nlog.Infof(\"Trace written to %q\", d.trace)\n}()\nif err := c.Sandbox.StartTrace(f); err != nil {\n- Fatalf(err.Error())\n+ return Errorf(err.Error())\n}\nlog.Infof(\"Tracing started for %d sec, writing to %q\", d.profileDelay, d.trace)\n}\n+ if d.strace != \"\" || len(d.logLevel) != 0 || len(d.logPackets) != 0 {\n+ args := control.LoggingArgs{}\n+ switch strings.ToLower(d.strace) {\n+ case \"\":\n+ // strace not set, nothing to do here.\n+\n+ case \"off\":\n+ log.Infof(\"Disabling strace\")\n+ args.SetStrace = true\n+\n+ case \"all\":\n+ log.Infof(\"Enabling all straces\")\n+ args.SetStrace = true\n+ args.EnableStrace = true\n+\n+ default:\n+ log.Infof(\"Enabling strace for syscalls: %s\", d.strace)\n+ args.SetStrace = true\n+ args.EnableStrace = true\n+ args.StraceWhitelist = strings.Split(d.strace, \",\")\n+ }\n+\n+ if len(d.logLevel) != 0 {\n+ args.SetLevel = true\n+ switch strings.ToLower(d.logLevel) {\n+ case \"warning\", \"0\":\n+ args.Level = log.Warning\n+ case \"info\", \"1\":\n+ args.Level = log.Info\n+ case \"debug\", \"2\":\n+ args.Level = log.Debug\n+ default:\n+ return Errorf(\"invalid log level %q\", d.logLevel)\n+ }\n+ log.Infof(\"Setting log level %v\", args.Level)\n+ }\n+\n+ if len(d.logPackets) != 0 {\n+ args.SetLogPackets = true\n+ lp, err := strconv.ParseBool(d.logPackets)\n+ if err != nil {\n+ return Errorf(\"invalid value for log_packets %q\", d.logPackets)\n+ }\n+ args.LogPackets = lp\n+ if args.LogPackets {\n+ log.Infof(\"Enabling packet logging\")\n+ } else {\n+ log.Infof(\"Disabling packet logging\")\n+ }\n+ }\n+\n+ if err := c.Sandbox.ChangeLogging(args); err != nil {\n+ return Errorf(err.Error())\n+ }\n+ log.Infof(\"Logging options changed\")\n+ }\n+\nif delay {\ntime.Sleep(time.Duration(d.profileDelay) * time.Second)\n-\n}\nreturn subcommands.ExitSuccess\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -960,7 +960,7 @@ func (s *Sandbox) StartTrace(f *os.File) error {\nreturn nil\n}\n-// StopTrace stops a previously started trace..\n+// StopTrace stops a previously started trace.\nfunc (s *Sandbox) StopTrace() error {\nlog.Debugf(\"Trace stop %q\", s.ID)\nconn, err := s.sandboxConnect()\n@@ -975,6 +975,21 @@ func (s *Sandbox) StopTrace() error {\nreturn nil\n}\n+// ChangeLogging changes logging options.\n+func (s *Sandbox) ChangeLogging(args control.LoggingArgs) error {\n+ log.Debugf(\"Change logging start %q\", s.ID)\n+ conn, err := s.sandboxConnect()\n+ if err != nil {\n+ return err\n+ }\n+ defer conn.Close()\n+\n+ if err := conn.Call(boot.ChangeLogging, &args, nil); err != nil {\n+ return fmt.Errorf(\"changing sandbox %q logging: %v\", s.ID, err)\n+ }\n+ return nil\n+}\n+\n// DestroyContainer destroys the given container. If it is the root container,\n// then the entire sandbox is destroyed.\nfunc (s *Sandbox) DestroyContainer(cid string) error {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow to change logging options using 'runsc debug'
New options are:
runsc debug --strace=off|all|function1,function2
runsc debug --log-level=warning|info|debug
runsc debug --log-packets=true|false
Updates #407
PiperOrigin-RevId: 254843128 |
259,853 | 24.06.2019 15:47:52 | 25,200 | c5486f512224dc0fda6b3509c1eb9f83dff950b4 | platform/ptrace: specify PTRACE_O_TRACEEXIT for stub-processes
The tracee is stopped early during process exit, when registers are still
available, allowing the tracer to see where the exit occurred, whereas the
normal exit notifi? cation is done after the process is finished exiting.
Without this option, dumpAndPanic fails to get registers. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess.go",
"diff": "@@ -376,7 +376,7 @@ func (t *thread) init() {\nsyscall.PTRACE_SETOPTIONS,\nuintptr(t.tid),\n0,\n- syscall.PTRACE_O_TRACESYSGOOD,\n+ syscall.PTRACE_O_TRACESYSGOOD|syscall.PTRACE_O_TRACEEXIT,\n0, 0)\nif errno != 0 {\npanic(fmt.Sprintf(\"ptrace set options failed: %v\", errno))\n@@ -419,7 +419,7 @@ func (t *thread) syscall(regs *syscall.PtraceRegs) (uintptr, error) {\n// between syscall-enter-stop and syscall-exit-stop; it happens *after*\n// syscall-exit-stop.)\" - ptrace(2), \"Syscall-stops\"\nif sig := t.wait(stopped); sig != (syscallEvent | syscall.SIGTRAP) {\n- panic(fmt.Sprintf(\"wait failed: expected SIGTRAP, got %v [%d]\", sig, sig))\n+ t.dumpAndPanic(fmt.Sprintf(\"wait failed: expected SIGTRAP, got %v [%d]\", sig, sig))\n}\n// Grab registers.\n"
}
] | Go | Apache License 2.0 | google/gvisor | platform/ptrace: specify PTRACE_O_TRACEEXIT for stub-processes
The tracee is stopped early during process exit, when registers are still
available, allowing the tracer to see where the exit occurred, whereas the
normal exit notifi? cation is done after the process is finished exiting.
Without this option, dumpAndPanic fails to get registers.
PiperOrigin-RevId: 254852917 |
259,985 | 24.06.2019 15:55:26 | 25,200 | 94a6bfab5d0a2a1950915d7db9cd5e1502de8c13 | Implement /proc/net/tcp. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/BUILD",
"new_path": "pkg/sentry/fs/proc/BUILD",
"diff": "@@ -31,6 +31,7 @@ go_library(\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n\"//pkg/abi/linux\",\n+ \"//pkg/binary\",\n\"//pkg/log\",\n\"//pkg/sentry/context\",\n\"//pkg/sentry/fs\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/net.go",
"new_path": "pkg/sentry/fs/proc/net.go",
"diff": "@@ -20,6 +20,7 @@ import (\n\"time\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/binary\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/sentry/context\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n@@ -55,8 +56,7 @@ func (p *proc) newNetDir(ctx context.Context, k *kernel.Kernel, msrc *fs.MountSo\n\"psched\": newStaticProcInode(ctx, msrc, []byte(fmt.Sprintf(\"%08x %08x %08x %08x\\n\", uint64(time.Microsecond/time.Nanosecond), 64, 1000000, uint64(time.Second/time.Nanosecond)))),\n\"ptype\": newStaticProcInode(ctx, msrc, []byte(\"Type Device Function\")),\n\"route\": newStaticProcInode(ctx, msrc, []byte(\"Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT\")),\n- \"tcp\": newStaticProcInode(ctx, msrc, []byte(\" sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode\")),\n-\n+ \"tcp\": seqfile.NewSeqFileInode(ctx, &netTCP{k: k}, msrc),\n\"udp\": newStaticProcInode(ctx, msrc, []byte(\" sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops\")),\n\"unix\": seqfile.NewSeqFileInode(ctx, &netUnix{k: k}, msrc),\n@@ -210,10 +210,6 @@ func (n *netUnix) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]s\n}\nvar buf bytes.Buffer\n- // Header\n- fmt.Fprintf(&buf, \"Num RefCount Protocol Flags Type St Inode Path\\n\")\n-\n- // Entries\nfor _, se := range n.k.ListSockets() {\ns := se.Sock.Get()\nif s == nil {\n@@ -222,6 +218,7 @@ func (n *netUnix) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]s\n}\nsfile := s.(*fs.File)\nif family, _, _ := sfile.FileOperations.(socket.Socket).Type(); family != linux.AF_UNIX {\n+ s.DecRef()\n// Not a unix socket.\ncontinue\n}\n@@ -281,12 +278,160 @@ func (n *netUnix) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]s\n}\nfmt.Fprintf(&buf, \"\\n\")\n- sfile.DecRef()\n+ s.DecRef()\n}\n- data := []seqfile.SeqData{{\n+ data := []seqfile.SeqData{\n+ {\n+ Buf: []byte(\"Num RefCount Protocol Flags Type St Inode Path\\n\"),\n+ Handle: n,\n+ },\n+ {\nBuf: buf.Bytes(),\n- Handle: (*netUnix)(nil),\n- }}\n+ Handle: n,\n+ },\n+ }\n+ return data, 0\n+}\n+\n+// netTCP implements seqfile.SeqSource for /proc/net/tcp.\n+//\n+// +stateify savable\n+type netTCP struct {\n+ k *kernel.Kernel\n+}\n+\n+// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.\n+func (*netTCP) NeedsUpdate(generation int64) bool {\n+ return true\n+}\n+\n+// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.\n+func (n *netTCP) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {\n+ t := kernel.TaskFromContext(ctx)\n+\n+ if h != nil {\n+ return nil, 0\n+ }\n+\n+ var buf bytes.Buffer\n+ for _, se := range n.k.ListSockets() {\n+ s := se.Sock.Get()\n+ if s == nil {\n+ log.Debugf(\"Couldn't resolve weakref %+v in socket table, racing with destruction?\", se.Sock)\n+ continue\n+ }\n+ sfile := s.(*fs.File)\n+ sops, ok := sfile.FileOperations.(socket.Socket)\n+ if !ok {\n+ panic(fmt.Sprintf(\"Found non-socket file in socket table: %+v\", sfile))\n+ }\n+ if family, stype, _ := sops.Type(); !(family == linux.AF_INET && stype == linux.SOCK_STREAM) {\n+ s.DecRef()\n+ // Not tcp4 sockets.\n+ continue\n+ }\n+\n+ // Linux's documentation for the fields below can be found at\n+ // https://www.kernel.org/doc/Documentation/networking/proc_net_tcp.txt.\n+ // For Linux's implementation, see net/ipv4/tcp_ipv4.c:get_tcp4_sock().\n+ // Note that the header doesn't contain labels for all the fields.\n+\n+ // Field: sl; entry number.\n+ fmt.Fprintf(&buf, \"%4d: \", se.ID)\n+\n+ portBuf := make([]byte, 2)\n+\n+ // Field: local_adddress.\n+ var localAddr linux.SockAddrInet\n+ if local, _, err := sops.GetSockName(t); err == nil {\n+ localAddr = local.(linux.SockAddrInet)\n+ }\n+ binary.LittleEndian.PutUint16(portBuf, localAddr.Port)\n+ fmt.Fprintf(&buf, \"%08X:%04X \",\n+ binary.LittleEndian.Uint32(localAddr.Addr[:]),\n+ portBuf)\n+\n+ // Field: rem_address.\n+ var remoteAddr linux.SockAddrInet\n+ if remote, _, err := sops.GetPeerName(t); err == nil {\n+ remoteAddr = remote.(linux.SockAddrInet)\n+ }\n+ binary.LittleEndian.PutUint16(portBuf, remoteAddr.Port)\n+ fmt.Fprintf(&buf, \"%08X:%04X \",\n+ binary.LittleEndian.Uint32(remoteAddr.Addr[:]),\n+ portBuf)\n+\n+ // Field: state; socket state.\n+ fmt.Fprintf(&buf, \"%02X \", sops.State())\n+\n+ // Field: tx_queue, rx_queue; number of packets in the transmit and\n+ // receive queue. Unimplemented.\n+ fmt.Fprintf(&buf, \"%08X:%08X \", 0, 0)\n+\n+ // Field: tr, tm->when; timer active state and number of jiffies\n+ // until timer expires. Unimplemented.\n+ fmt.Fprintf(&buf, \"%02X:%08X \", 0, 0)\n+\n+ // Field: retrnsmt; number of unrecovered RTO timeouts.\n+ // Unimplemented.\n+ fmt.Fprintf(&buf, \"%08X \", 0)\n+\n+ // Field: uid.\n+ uattr, err := sfile.Dirent.Inode.UnstableAttr(ctx)\n+ if err != nil {\n+ log.Warningf(\"Failed to retrieve unstable attr for socket file: %v\", err)\n+ fmt.Fprintf(&buf, \"%5d \", 0)\n+ } else {\n+ fmt.Fprintf(&buf, \"%5d \", uint32(uattr.Owner.UID.In(t.UserNamespace()).OrOverflow()))\n+ }\n+\n+ // Field: timeout; number of unanswered 0-window probes.\n+ // Unimplemented.\n+ fmt.Fprintf(&buf, \"%8d \", 0)\n+\n+ // Field: inode.\n+ fmt.Fprintf(&buf, \"%8d \", sfile.InodeID())\n+\n+ // Field: refcount. Don't count the ref we obtain while deferencing\n+ // the weakref to this socket.\n+ fmt.Fprintf(&buf, \"%d \", sfile.ReadRefs()-1)\n+\n+ // Field: Socket struct address. Redacted due to the same reason as\n+ // the 'Num' field in /proc/net/unix, see netUnix.ReadSeqFileData.\n+ fmt.Fprintf(&buf, \"%#016p \", (*socket.Socket)(nil))\n+\n+ // Field: retransmit timeout. Unimplemented.\n+ fmt.Fprintf(&buf, \"%d \", 0)\n+\n+ // Field: predicted tick of soft clock (delayed ACK control data).\n+ // Unimplemented.\n+ fmt.Fprintf(&buf, \"%d \", 0)\n+\n+ // Field: (ack.quick<<1)|ack.pingpong, Unimplemented.\n+ fmt.Fprintf(&buf, \"%d \", 0)\n+\n+ // Field: sending congestion window, Unimplemented.\n+ fmt.Fprintf(&buf, \"%d \", 0)\n+\n+ // Field: Slow start size threshold, -1 if threshold >= 0xFFFF.\n+ // Unimplemented, report as large threshold.\n+ fmt.Fprintf(&buf, \"%d\", -1)\n+\n+ fmt.Fprintf(&buf, \"\\n\")\n+\n+ s.DecRef()\n+ }\n+\n+ data := []seqfile.SeqData{\n+ {\n+ Buf: []byte(\" sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode \\n\"),\n+ Handle: n,\n+ },\n+ {\n+ Buf: buf.Bytes(),\n+ Handle: n,\n+ },\n+ }\nreturn data, 0\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/BUILD",
"new_path": "test/syscalls/linux/BUILD",
"diff": "@@ -3340,3 +3340,18 @@ cc_binary(\n\"@com_google_googletest//:gtest\",\n],\n)\n+\n+cc_binary(\n+ name = \"proc_net_tcp_test\",\n+ testonly = 1,\n+ srcs = [\"proc_net_tcp.cc\"],\n+ linkstatic = 1,\n+ deps = [\n+ \":ip_socket_test_util\",\n+ \"//test/util:file_descriptor\",\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ \"@com_google_absl//absl/strings\",\n+ \"@com_google_googletest//:gtest\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "test/syscalls/linux/proc_net_tcp.cc",
"diff": "+// Copyright 2019 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <sys/socket.h>\n+#include <sys/stat.h>\n+#include <sys/types.h>\n+#include <unistd.h>\n+\n+#include \"gtest/gtest.h\"\n+#include \"gtest/gtest.h\"\n+#include \"absl/strings/numbers.h\"\n+#include \"absl/strings/str_join.h\"\n+#include \"absl/strings/str_split.h\"\n+#include \"test/syscalls/linux/ip_socket_test_util.h\"\n+#include \"test/util/file_descriptor.h\"\n+#include \"test/util/test_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+namespace {\n+\n+using absl::StrCat;\n+using absl::StrSplit;\n+\n+constexpr char kProcNetTCPHeader[] =\n+ \" sl local_address rem_address st tx_queue rx_queue tr tm->when \"\n+ \"retrnsmt uid timeout inode \"\n+ \" \";\n+\n+// Possible values of the \"st\" field in a /proc/net/tcp entry. Source: Linux\n+// kernel, include/net/tcp_states.h.\n+enum {\n+ TCP_ESTABLISHED = 1,\n+ TCP_SYN_SENT,\n+ TCP_SYN_RECV,\n+ TCP_FIN_WAIT1,\n+ TCP_FIN_WAIT2,\n+ TCP_TIME_WAIT,\n+ TCP_CLOSE,\n+ TCP_CLOSE_WAIT,\n+ TCP_LAST_ACK,\n+ TCP_LISTEN,\n+ TCP_CLOSING,\n+ TCP_NEW_SYN_RECV,\n+\n+ TCP_MAX_STATES\n+};\n+\n+// TCPEntry represents a single entry from /proc/net/tcp.\n+struct TCPEntry {\n+ uint32_t local_addr;\n+ uint16_t local_port;\n+\n+ uint32_t remote_addr;\n+ uint16_t remote_port;\n+\n+ uint64_t state;\n+ uint64_t uid;\n+ uint64_t inode;\n+};\n+\n+uint32_t IP(const struct sockaddr* addr) {\n+ auto* in_addr = reinterpret_cast<const struct sockaddr_in*>(addr);\n+ return in_addr->sin_addr.s_addr;\n+}\n+\n+uint16_t Port(const struct sockaddr* addr) {\n+ auto* in_addr = reinterpret_cast<const struct sockaddr_in*>(addr);\n+ return ntohs(in_addr->sin_port);\n+}\n+\n+// Finds the first entry in 'entries' for which 'predicate' returns true.\n+// Returns true on match, and sets 'match' to point to the matching entry.\n+bool FindBy(std::vector<TCPEntry> entries, TCPEntry* match,\n+ std::function<bool(const TCPEntry&)> predicate) {\n+ for (int i = 0; i < entries.size(); ++i) {\n+ if (predicate(entries[i])) {\n+ *match = entries[i];\n+ return true;\n+ }\n+ }\n+ return false;\n+}\n+\n+bool FindByLocalAddr(std::vector<TCPEntry> entries, TCPEntry* match,\n+ const struct sockaddr* addr) {\n+ uint32_t host = IP(addr);\n+ uint16_t port = Port(addr);\n+ return FindBy(entries, match, [host, port](const TCPEntry& e) {\n+ return (e.local_addr == host && e.local_port == port);\n+ });\n+}\n+\n+bool FindByRemoteAddr(std::vector<TCPEntry> entries, TCPEntry* match,\n+ const struct sockaddr* addr) {\n+ uint32_t host = IP(addr);\n+ uint16_t port = Port(addr);\n+ return FindBy(entries, match, [host, port](const TCPEntry& e) {\n+ return (e.remote_addr == host && e.remote_port == port);\n+ });\n+}\n+\n+// Returns a parsed representation of /proc/net/tcp entries.\n+PosixErrorOr<std::vector<TCPEntry>> ProcNetTCPEntries() {\n+ std::string content;\n+ RETURN_IF_ERRNO(GetContents(\"/proc/net/tcp\", &content));\n+\n+ bool found_header = false;\n+ std::vector<TCPEntry> entries;\n+ std::vector<std::string> lines = StrSplit(content, '\\n');\n+ std::cerr << \"<contents of /proc/net/tcp>\" << std::endl;\n+ for (std::string line : lines) {\n+ std::cerr << line << std::endl;\n+\n+ if (!found_header) {\n+ EXPECT_EQ(line, kProcNetTCPHeader);\n+ found_header = true;\n+ continue;\n+ }\n+ if (line.empty()) {\n+ continue;\n+ }\n+\n+ // Parse a single entry from /proc/net/tcp.\n+ //\n+ // Example entries:\n+ //\n+ // clang-format off\n+ //\n+ // sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode\n+ // 0: 00000000:006F 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 1968 1 0000000000000000 100 0 0 10 0\n+ // 1: 0100007F:7533 00000000:0000 0A 00000000:00000000 00:00000000 00000000 120 0 10684 1 0000000000000000 100 0 0 10 0\n+ // ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^\n+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20\n+ //\n+ // clang-format on\n+\n+ TCPEntry entry;\n+ std::vector<std::string> fields =\n+ StrSplit(line, absl::ByAnyChar(\": \"), absl::SkipEmpty());\n+\n+ ASSIGN_OR_RETURN_ERRNO(entry.local_addr, AtoiBase(fields[1], 16));\n+ ASSIGN_OR_RETURN_ERRNO(entry.local_port, AtoiBase(fields[2], 16));\n+\n+ ASSIGN_OR_RETURN_ERRNO(entry.remote_addr, AtoiBase(fields[3], 16));\n+ ASSIGN_OR_RETURN_ERRNO(entry.remote_port, AtoiBase(fields[4], 16));\n+\n+ ASSIGN_OR_RETURN_ERRNO(entry.state, AtoiBase(fields[5], 16));\n+ ASSIGN_OR_RETURN_ERRNO(entry.uid, Atoi<uint64_t>(fields[11]));\n+ ASSIGN_OR_RETURN_ERRNO(entry.inode, Atoi<uint64_t>(fields[13]));\n+\n+ entries.push_back(entry);\n+ }\n+ std::cerr << \"<end of /proc/net/tcp>\" << std::endl;\n+\n+ return entries;\n+}\n+\n+TEST(ProcNetTCP, Exists) {\n+ const std::string content =\n+ ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/net/tcp\"));\n+ const std::string header_line = StrCat(kProcNetTCPHeader, \"\\n\");\n+ if (IsRunningOnGvisor()) {\n+ // Should be just the header since we don't have any tcp sockets yet.\n+ EXPECT_EQ(content, header_line);\n+ } else {\n+ // On a general linux machine, we could have abitrary sockets on the system,\n+ // so just check the header.\n+ EXPECT_THAT(content, ::testing::StartsWith(header_line));\n+ }\n+}\n+\n+TEST(ProcNetTCP, EntryUID) {\n+ auto sockets =\n+ ASSERT_NO_ERRNO_AND_VALUE(IPv4TCPAcceptBindSocketPair(0).Create());\n+ std::vector<TCPEntry> entries =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries());\n+ TCPEntry e;\n+ EXPECT_TRUE(FindByLocalAddr(entries, &e, sockets->first_addr()));\n+ EXPECT_EQ(e.uid, geteuid());\n+ EXPECT_TRUE(FindByRemoteAddr(entries, &e, sockets->first_addr()));\n+ EXPECT_EQ(e.uid, geteuid());\n+}\n+\n+TEST(ProcNetTCP, BindAcceptConnect) {\n+ auto sockets =\n+ ASSERT_NO_ERRNO_AND_VALUE(IPv4TCPAcceptBindSocketPair(0).Create());\n+ std::vector<TCPEntry> entries =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries());\n+ // We can only make assertions about the total number of entries if we control\n+ // the entire \"machine\".\n+ if (IsRunningOnGvisor()) {\n+ EXPECT_EQ(entries.size(), 2);\n+ }\n+\n+ TCPEntry e;\n+ EXPECT_TRUE(FindByLocalAddr(entries, &e, sockets->first_addr()));\n+ EXPECT_TRUE(FindByRemoteAddr(entries, &e, sockets->first_addr()));\n+}\n+\n+TEST(ProcNetTCP, InodeReasonable) {\n+ auto sockets =\n+ ASSERT_NO_ERRNO_AND_VALUE(IPv4TCPAcceptBindSocketPair(0).Create());\n+ std::vector<TCPEntry> entries =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries());\n+\n+ TCPEntry accepted_entry;\n+ ASSERT_TRUE(FindByLocalAddr(entries, &accepted_entry, sockets->first_addr()));\n+ EXPECT_NE(accepted_entry.inode, 0);\n+\n+ TCPEntry client_entry;\n+ ASSERT_TRUE(FindByRemoteAddr(entries, &client_entry, sockets->first_addr()));\n+ EXPECT_NE(client_entry.inode, 0);\n+ EXPECT_NE(accepted_entry.inode, client_entry.inode);\n+}\n+\n+TEST(ProcNetTCP, State) {\n+ std::unique_ptr<FileDescriptor> server =\n+ ASSERT_NO_ERRNO_AND_VALUE(IPv4TCPUnboundSocket(0).Create());\n+\n+ auto test_addr = V4Loopback();\n+ ASSERT_THAT(\n+ bind(server->get(), reinterpret_cast<struct sockaddr*>(&test_addr.addr),\n+ test_addr.addr_len),\n+ SyscallSucceeds());\n+\n+ struct sockaddr addr;\n+ socklen_t addrlen = sizeof(struct sockaddr);\n+ ASSERT_THAT(getsockname(server->get(), &addr, &addrlen), SyscallSucceeds());\n+ ASSERT_EQ(addrlen, sizeof(struct sockaddr));\n+\n+ ASSERT_THAT(listen(server->get(), 10), SyscallSucceeds());\n+ std::vector<TCPEntry> entries =\n+ ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries());\n+ TCPEntry listen_entry;\n+ ASSERT_TRUE(FindByLocalAddr(entries, &listen_entry, &addr));\n+ EXPECT_EQ(listen_entry.state, TCP_LISTEN);\n+\n+ std::unique_ptr<FileDescriptor> client =\n+ ASSERT_NO_ERRNO_AND_VALUE(IPv4TCPUnboundSocket(0).Create());\n+ ASSERT_THAT(connect(client->get(), &addr, addrlen), SyscallSucceeds());\n+ entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries());\n+ ASSERT_TRUE(FindByLocalAddr(entries, &listen_entry, &addr));\n+ EXPECT_EQ(listen_entry.state, TCP_LISTEN);\n+ TCPEntry client_entry;\n+ ASSERT_TRUE(FindByRemoteAddr(entries, &client_entry, &addr));\n+ EXPECT_EQ(client_entry.state, TCP_ESTABLISHED);\n+\n+ FileDescriptor accepted =\n+ ASSERT_NO_ERRNO_AND_VALUE(Accept(server->get(), nullptr, nullptr));\n+\n+ const uint32_t accepted_local_host = IP(&addr);\n+ const uint16_t accepted_local_port = Port(&addr);\n+\n+ entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries());\n+ TCPEntry accepted_entry;\n+ ASSERT_TRUE(FindBy(entries, &accepted_entry,\n+ [client_entry, accepted_local_host,\n+ accepted_local_port](const TCPEntry& e) {\n+ return e.local_addr == accepted_local_host &&\n+ e.local_port == accepted_local_port &&\n+ e.remote_addr == client_entry.local_addr &&\n+ e.remote_port == client_entry.local_port;\n+ }));\n+ EXPECT_EQ(accepted_entry.state, TCP_ESTABLISHED);\n+}\n+\n+} // namespace\n+} // namespace testing\n+} // namespace gvisor\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/proc_net_unix.cc",
"new_path": "test/syscalls/linux/proc_net_unix.cc",
"diff": "@@ -162,7 +162,7 @@ PosixErrorOr<std::vector<UnixEntry>> ProcNetUnixEntries() {\n// Finds the first entry in 'entries' for which 'predicate' returns true.\n// Returns true on match, and sets 'match' to point to the matching entry.\nbool FindBy(std::vector<UnixEntry> entries, UnixEntry* match,\n- std::function<bool(UnixEntry)> predicate) {\n+ std::function<bool(const UnixEntry&)> predicate) {\nfor (int i = 0; i < entries.size(); ++i) {\nif (predicate(entries[i])) {\n*match = entries[i];\n@@ -174,7 +174,8 @@ bool FindBy(std::vector<UnixEntry> entries, UnixEntry* match,\nbool FindByPath(std::vector<UnixEntry> entries, UnixEntry* match,\nconst std::string& path) {\n- return FindBy(entries, match, [path](UnixEntry e) { return e.path == path; });\n+ return FindBy(entries, match,\n+ [path](const UnixEntry& e) { return e.path == path; });\n}\nTEST(ProcNetUnix, Exists) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implement /proc/net/tcp.
PiperOrigin-RevId: 254854346 |
259,858 | 24.06.2019 17:28:46 | 25,200 | 7f5d0afe525af4728ed5ec75193e9e4560d9558c | Add O_EXITKILL to ptrace options.
This prevents a race before PDEATH_SIG can take effect during
a sentry crash.
Discovered and solution by avagin@. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess.go",
"diff": "@@ -370,13 +370,16 @@ func (t *thread) destroy() {\n// init initializes trace options.\nfunc (t *thread) init() {\n- // Set our TRACESYSGOOD option to differeniate real SIGTRAP.\n+ // Set our TRACESYSGOOD option to differeniate real SIGTRAP. We also\n+ // set PTRACE_O_EXITKILL to ensure that the unexpected exit of the\n+ // sentry will immediately kill the associated stubs.\n+ const PTRACE_O_EXITKILL = 0x100000\n_, _, errno := syscall.RawSyscall6(\nsyscall.SYS_PTRACE,\nsyscall.PTRACE_SETOPTIONS,\nuintptr(t.tid),\n0,\n- syscall.PTRACE_O_TRACESYSGOOD|syscall.PTRACE_O_TRACEEXIT,\n+ syscall.PTRACE_O_TRACESYSGOOD|syscall.PTRACE_O_TRACEEXIT|PTRACE_O_EXITKILL,\n0, 0)\nif errno != 0 {\npanic(fmt.Sprintf(\"ptrace set options failed: %v\", errno))\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add O_EXITKILL to ptrace options.
This prevents a race before PDEATH_SIG can take effect during
a sentry crash.
Discovered and solution by avagin@.
PiperOrigin-RevId: 254871534 |
259,916 | 21.06.2019 22:40:16 | 25,200 | 3688e6e99d16b0c6ecb7c8b3528a541ce6afe3a7 | Add CLOCK_BOOTTIME as a CLOCK_MONOTONIC alias
Makes CLOCK_BOOTTIME available with
* clock_gettime
* timerfd_create
* clock_gettime vDSO
CLOCK_BOOTTIME is implemented as an alias to CLOCK_MONOTONIC.
CLOCK_MONOTONIC already keeps track of time across save
and restore. This is the closest possible behavior to Linux
CLOCK_BOOTIME, as there is no concept of suspend/resume.
Updates google/gvisor#218 | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_time.go",
"new_path": "pkg/sentry/syscalls/linux/sys_time.go",
"diff": "@@ -121,8 +121,13 @@ func getClock(t *kernel.Task, clockID int32) (ktime.Clock, error) {\nswitch clockID {\ncase linux.CLOCK_REALTIME, linux.CLOCK_REALTIME_COARSE:\nreturn t.Kernel().RealtimeClock(), nil\n- case linux.CLOCK_MONOTONIC, linux.CLOCK_MONOTONIC_COARSE, linux.CLOCK_MONOTONIC_RAW:\n+ case linux.CLOCK_MONOTONIC, linux.CLOCK_MONOTONIC_COARSE,\n+ linux.CLOCK_MONOTONIC_RAW, linux.CLOCK_BOOTTIME:\n// CLOCK_MONOTONIC approximates CLOCK_MONOTONIC_RAW.\n+ // CLOCK_BOOTTIME is internally mapped to CLOCK_MONOTONIC, as:\n+ // - CLOCK_BOOTTIME should behave as CLOCK_MONOTONIC while also including suspend time.\n+ // - gVisor has no concept of suspend/resume.\n+ // - CLOCK_MONOTONIC already includes save/restore time, which is the closest to suspend time.\nreturn t.Kernel().MonotonicClock(), nil\ncase linux.CLOCK_PROCESS_CPUTIME_ID:\nreturn t.ThreadGroup().CPUClock(), nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_timerfd.go",
"new_path": "pkg/sentry/syscalls/linux/sys_timerfd.go",
"diff": "@@ -38,7 +38,7 @@ func TimerfdCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel\nswitch clockID {\ncase linux.CLOCK_REALTIME:\nc = t.Kernel().RealtimeClock()\n- case linux.CLOCK_MONOTONIC:\n+ case linux.CLOCK_MONOTONIC, linux.CLOCK_BOOTTIME:\nc = t.Kernel().MonotonicClock()\ndefault:\nreturn 0, nil, syserror.EINVAL\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/clock_gettime.cc",
"new_path": "test/syscalls/linux/clock_gettime.cc",
"diff": "@@ -132,6 +132,9 @@ std::string PrintClockId(::testing::TestParamInfo<clockid_t> info) {\nreturn \"CLOCK_MONOTONIC_COARSE\";\ncase CLOCK_MONOTONIC_RAW:\nreturn \"CLOCK_MONOTONIC_RAW\";\n+ case CLOCK_BOOTTIME:\n+ // CLOCK_BOOTTIME is a monotonic clock.\n+ return \"CLOCK_BOOTTIME\";\ndefault:\nreturn absl::StrCat(info.param);\n}\n@@ -140,15 +143,14 @@ std::string PrintClockId(::testing::TestParamInfo<clockid_t> info) {\nINSTANTIATE_TEST_SUITE_P(ClockGettime, MonotonicClockTest,\n::testing::Values(CLOCK_MONOTONIC,\nCLOCK_MONOTONIC_COARSE,\n- CLOCK_MONOTONIC_RAW),\n+ CLOCK_MONOTONIC_RAW,\n+ CLOCK_BOOTTIME),\nPrintClockId);\nTEST(ClockGettime, UnimplementedReturnsEINVAL) {\nSKIP_IF(!IsRunningOnGvisor());\nstruct timespec tp;\n- EXPECT_THAT(clock_gettime(CLOCK_BOOTTIME, &tp),\n- SyscallFailsWithErrno(EINVAL));\nEXPECT_THAT(clock_gettime(CLOCK_REALTIME_ALARM, &tp),\nSyscallFailsWithErrno(EINVAL));\nEXPECT_THAT(clock_gettime(CLOCK_BOOTTIME_ALARM, &tp),\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/linux/vdso_clock_gettime.cc",
"new_path": "test/syscalls/linux/vdso_clock_gettime.cc",
"diff": "@@ -39,6 +39,8 @@ std::string PrintClockId(::testing::TestParamInfo<clockid_t> info) {\nreturn \"CLOCK_MONOTONIC\";\ncase CLOCK_REALTIME:\nreturn \"CLOCK_REALTIME\";\n+ case CLOCK_BOOTTIME:\n+ return \"CLOCK_BOOTTIME\";\ndefault:\nreturn absl::StrCat(info.param);\n}\n@@ -95,7 +97,9 @@ TEST_P(CorrectVDSOClockTest, IsCorrect) {\n}\nINSTANTIATE_TEST_SUITE_P(ClockGettime, CorrectVDSOClockTest,\n- ::testing::Values(CLOCK_MONOTONIC, CLOCK_REALTIME),\n+ ::testing::Values(CLOCK_MONOTONIC,\n+ CLOCK_REALTIME,\n+ CLOCK_BOOTTIME),\nPrintClockId);\n} // namespace\n"
},
{
"change_type": "MODIFY",
"old_path": "vdso/vdso.cc",
"new_path": "vdso/vdso.cc",
"diff": "@@ -33,6 +33,8 @@ int __common_clock_gettime(clockid_t clock, struct timespec* ts) {\nret = ClockRealtime(ts);\nbreak;\n+ // Fallthrough, CLOCK_BOOTTIME is an alias for CLOCK_MONOTONIC\n+ case CLOCK_BOOTTIME:\ncase CLOCK_MONOTONIC:\nret = ClockMonotonic(ts);\nbreak;\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add CLOCK_BOOTTIME as a CLOCK_MONOTONIC alias
Makes CLOCK_BOOTTIME available with
* clock_gettime
* timerfd_create
* clock_gettime vDSO
CLOCK_BOOTTIME is implemented as an alias to CLOCK_MONOTONIC.
CLOCK_MONOTONIC already keeps track of time across save
and restore. This is the closest possible behavior to Linux
CLOCK_BOOTIME, as there is no concept of suspend/resume.
Updates google/gvisor#218 |
259,853 | 24.06.2019 21:43:14 | 25,200 | fd16a329ce0c9fa1e7dd4c0fc1edc201f4c19571 | fsgopher: reopen files via /proc/self/fd
When we reopen file by path, we can't be sure that
we will open exactly the same file. The file can be
deleted and another one with the same name can be
created. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "@@ -37,6 +37,9 @@ import (\nfunc init() {\nlog.SetLevel(log.Debug)\nrand.Seed(time.Now().UnixNano())\n+ if err := fsgofer.OpenProcSelfFD(); err != nil {\n+ panic(err)\n+ }\n}\nfunc testConfig() *Config {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/gofer.go",
"new_path": "runsc/cmd/gofer.go",
"diff": "@@ -152,6 +152,10 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n// modes exactly as sent by the sandbox, which will have applied its own umask.\nsyscall.Umask(0)\n+ if err := fsgofer.OpenProcSelfFD(); err != nil {\n+ Fatalf(\"failed to open /proc/self/fd: %v\", err)\n+ }\n+\nif err := syscall.Chroot(root); err != nil {\nFatalf(\"failed to chroot to %q: %v\", root, err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer.go",
"new_path": "runsc/fsgofer/fsgofer.go",
"diff": "@@ -28,6 +28,7 @@ import (\n\"path\"\n\"path/filepath\"\n\"runtime\"\n+ \"strconv\"\n\"sync\"\n\"syscall\"\n@@ -223,6 +224,28 @@ type localFile struct {\nlastDirentOffset uint64\n}\n+var procSelfFD *fd.FD\n+\n+// OpenProcSelfFD opens the /proc/self/fd directory, which will be used to\n+// reopen file descriptors.\n+func OpenProcSelfFD() error {\n+ d, err := syscall.Open(\"/proc/self/fd\", syscall.O_RDONLY|syscall.O_DIRECTORY, 0)\n+ if err != nil {\n+ return fmt.Errorf(\"error opening /proc/self/fd: %v\", err)\n+ }\n+ procSelfFD = fd.New(d)\n+ return nil\n+}\n+\n+func reopenProcFd(f *fd.FD, mode int) (*fd.FD, error) {\n+ d, err := syscall.Openat(int(procSelfFD.FD()), strconv.Itoa(f.FD()), mode&^syscall.O_NOFOLLOW, 0)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ return fd.New(d), nil\n+}\n+\nfunc openAnyFileFromParent(parent *localFile, name string) (*fd.FD, string, error) {\npath := path.Join(parent.hostPath, name)\nf, err := openAnyFile(path, func(mode int) (*fd.FD, error) {\n@@ -348,7 +371,7 @@ func (l *localFile) Open(mode p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) {\n// name_to_handle_at and open_by_handle_at aren't supported by overlay2.\nlog.Debugf(\"Open reopening file, mode: %v, %q\", mode, l.hostPath)\nvar err error\n- newFile, err = fd.Open(l.hostPath, openFlags|mode.OSFlags(), 0)\n+ newFile, err = reopenProcFd(l.file, openFlags|mode.OSFlags())\nif err != nil {\nreturn nil, p9.QID{}, 0, extractErrno(err)\n}\n@@ -477,7 +500,7 @@ func (l *localFile) Walk(names []string) ([]p9.QID, p9.File, error) {\n// Duplicate current file if 'names' is empty.\nif len(names) == 0 {\nnewFile, err := openAnyFile(l.hostPath, func(mode int) (*fd.FD, error) {\n- return fd.Open(l.hostPath, openFlags|mode, 0)\n+ return reopenProcFd(l.file, openFlags|mode)\n})\nif err != nil {\nreturn nil, nil, extractErrno(err)\n@@ -635,7 +658,7 @@ func (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\nf := l.file\nif l.ft == regular && l.mode != p9.WriteOnly && l.mode != p9.ReadWrite {\nvar err error\n- f, err = fd.Open(l.hostPath, openFlags|syscall.O_WRONLY, 0)\n+ f, err = reopenProcFd(l.file, openFlags|os.O_WRONLY)\nif err != nil {\nreturn extractErrno(err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer_test.go",
"new_path": "runsc/fsgofer/fsgofer_test.go",
"diff": "@@ -31,6 +31,10 @@ func init() {\nallConfs = append(allConfs, rwConfs...)\nallConfs = append(allConfs, roConfs...)\n+\n+ if err := OpenProcSelfFD(); err != nil {\n+ panic(err)\n+ }\n}\nfunc assertPanic(t *testing.T, f func()) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | fsgopher: reopen files via /proc/self/fd
When we reopen file by path, we can't be sure that
we will open exactly the same file. The file can be
deleted and another one with the same name can be
created.
PiperOrigin-RevId: 254898594 |
259,885 | 25.06.2019 15:37:11 | 25,200 | ffee0f36b1314934e694863f1cb00924e6fc582e | Add //pkg/fdchannel.
To accompany flipcall connections in cases where passing FDs is required
(as for gofers). | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/fdchannel/BUILD",
"diff": "+load(\"//tools/go_stateify:defs.bzl\", \"go_library\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"fdchannel\",\n+ srcs = [\"fdchannel_unsafe.go\"],\n+ importpath = \"gvisor.dev/gvisor/pkg/fdchannel\",\n+ visibility = [\"//visibility:public\"],\n+)\n+\n+go_test(\n+ name = \"fdchannel_test\",\n+ size = \"small\",\n+ srcs = [\"fdchannel_test.go\"],\n+ embed = [\":fdchannel\"],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/fdchannel/fdchannel_test.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package fdchannel\n+\n+import (\n+ \"io/ioutil\"\n+ \"os\"\n+ \"sync\"\n+ \"syscall\"\n+ \"testing\"\n+ \"time\"\n+)\n+\n+func TestSendRecvFD(t *testing.T) {\n+ sendFile, err := ioutil.TempFile(\"\", \"fdchannel_test_\")\n+ if err != nil {\n+ t.Fatalf(\"failed to create temporary file: %v\", err)\n+ }\n+ defer sendFile.Close()\n+\n+ chanFDs, err := NewConnectedSockets()\n+ if err != nil {\n+ t.Fatalf(\"failed to create fdchannel sockets: %v\", err)\n+ }\n+ sendEP := NewEndpoint(chanFDs[0])\n+ defer sendEP.Destroy()\n+ recvEP := NewEndpoint(chanFDs[1])\n+ defer recvEP.Destroy()\n+\n+ recvFD, err := recvEP.RecvFDNonblock()\n+ if err != syscall.EAGAIN && err != syscall.EWOULDBLOCK {\n+ t.Errorf(\"RecvFDNonblock before SendFD: got (%d, %v), wanted (<unspecified>, EAGAIN or EWOULDBLOCK\", recvFD, err)\n+ }\n+\n+ if err := sendEP.SendFD(int(sendFile.Fd())); err != nil {\n+ t.Fatalf(\"SendFD failed: %v\", err)\n+ }\n+ recvFD, err = recvEP.RecvFD()\n+ if err != nil {\n+ t.Fatalf(\"RecvFD failed: %v\", err)\n+ }\n+ recvFile := os.NewFile(uintptr(recvFD), \"received file\")\n+ defer recvFile.Close()\n+\n+ sendInfo, err := sendFile.Stat()\n+ if err != nil {\n+ t.Fatalf(\"failed to stat sent file: %v\", err)\n+ }\n+ sendInfoSys := sendInfo.Sys()\n+ sendStat, ok := sendInfoSys.(*syscall.Stat_t)\n+ if !ok {\n+ t.Fatalf(\"sent file's FileInfo is backed by unknown type %T\", sendInfoSys)\n+ }\n+\n+ recvInfo, err := recvFile.Stat()\n+ if err != nil {\n+ t.Fatalf(\"failed to stat received file: %v\", err)\n+ }\n+ recvInfoSys := recvInfo.Sys()\n+ recvStat, ok := recvInfoSys.(*syscall.Stat_t)\n+ if !ok {\n+ t.Fatalf(\"received file's FileInfo is backed by unknown type %T\", recvInfoSys)\n+ }\n+\n+ if sendStat.Dev != recvStat.Dev || sendStat.Ino != recvStat.Ino {\n+ t.Errorf(\"sent file (dev=%d, ino=%d) does not match received file (dev=%d, ino=%d)\", sendStat.Dev, sendStat.Ino, recvStat.Dev, recvStat.Ino)\n+ }\n+}\n+\n+func TestShutdownThenRecvFD(t *testing.T) {\n+ sendFile, err := ioutil.TempFile(\"\", \"fdchannel_test_\")\n+ if err != nil {\n+ t.Fatalf(\"failed to create temporary file: %v\", err)\n+ }\n+ defer sendFile.Close()\n+\n+ chanFDs, err := NewConnectedSockets()\n+ if err != nil {\n+ t.Fatalf(\"failed to create fdchannel sockets: %v\", err)\n+ }\n+ sendEP := NewEndpoint(chanFDs[0])\n+ defer sendEP.Destroy()\n+ recvEP := NewEndpoint(chanFDs[1])\n+ defer recvEP.Destroy()\n+\n+ recvEP.Shutdown()\n+ if _, err := recvEP.RecvFD(); err == nil {\n+ t.Error(\"RecvFD succeeded unexpectedly\")\n+ }\n+}\n+\n+func TestRecvFDThenShutdown(t *testing.T) {\n+ sendFile, err := ioutil.TempFile(\"\", \"fdchannel_test_\")\n+ if err != nil {\n+ t.Fatalf(\"failed to create temporary file: %v\", err)\n+ }\n+ defer sendFile.Close()\n+\n+ chanFDs, err := NewConnectedSockets()\n+ if err != nil {\n+ t.Fatalf(\"failed to create fdchannel sockets: %v\", err)\n+ }\n+ sendEP := NewEndpoint(chanFDs[0])\n+ defer sendEP.Destroy()\n+ recvEP := NewEndpoint(chanFDs[1])\n+ defer recvEP.Destroy()\n+\n+ var receiverWG sync.WaitGroup\n+ receiverWG.Add(1)\n+ go func() {\n+ defer receiverWG.Done()\n+ if _, err := recvEP.RecvFD(); err == nil {\n+ t.Error(\"RecvFD succeeded unexpectedly\")\n+ }\n+ }()\n+ defer receiverWG.Wait()\n+ time.Sleep(time.Second) // to ensure recvEP.RecvFD() has blocked\n+ recvEP.Shutdown()\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/fdchannel/fdchannel_unsafe.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris\n+\n+// Package fdchannel implements passing file descriptors between processes over\n+// Unix domain sockets.\n+package fdchannel\n+\n+import (\n+ \"fmt\"\n+ \"reflect\"\n+ \"sync/atomic\"\n+ \"syscall\"\n+ \"unsafe\"\n+)\n+\n+// int32 is the real type of a file descriptor.\n+const sizeofInt32 = int(unsafe.Sizeof(int32(0)))\n+\n+// NewConnectedSockets returns a pair of file descriptors, owned by the caller,\n+// representing connected sockets that may be passed to separate calls to\n+// NewEndpoint to create connected Endpoints.\n+func NewConnectedSockets() ([2]int, error) {\n+ return syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_SEQPACKET|syscall.SOCK_CLOEXEC, 0)\n+}\n+\n+// Endpoint sends file descriptors to, and receives them from, another\n+// connected Endpoint.\n+//\n+// Endpoint is not copyable or movable by value.\n+type Endpoint struct {\n+ sockfd int32 // accessed using atomic memory operations\n+ msghdr syscall.Msghdr\n+ cmsg *syscall.Cmsghdr // followed by sizeofInt32 bytes of data\n+}\n+\n+// Init must be called on zero-value Endpoints before first use. sockfd must be\n+// a blocking AF_UNIX SOCK_SEQPACKET socket.\n+func (ep *Endpoint) Init(sockfd int) {\n+ // \"Datagram sockets in various domains (e.g., the UNIX and Internet\n+ // domains) permit zero-length datagrams.\" - recv(2). Experimentally,\n+ // sendmsg+recvmsg for a zero-length datagram is slightly faster than\n+ // sendmsg+recvmsg for a single byte over a stream socket.\n+ cmsgSlice := make([]byte, syscall.CmsgSpace(sizeofInt32))\n+ cmsgReflect := (*reflect.SliceHeader)((unsafe.Pointer)(&cmsgSlice))\n+ ep.sockfd = int32(sockfd)\n+ ep.msghdr.Control = (*byte)((unsafe.Pointer)(cmsgReflect.Data))\n+ ep.cmsg = (*syscall.Cmsghdr)((unsafe.Pointer)(cmsgReflect.Data))\n+ // ep.msghdr.Controllen and ep.cmsg.* are mutated by recvmsg(2), so they're\n+ // set before calling sendmsg/recvmsg.\n+}\n+\n+// NewEndpoint is a convenience function that returns an initialized Endpoint\n+// allocated on the heap.\n+func NewEndpoint(sockfd int) *Endpoint {\n+ ep := &Endpoint{}\n+ ep.Init(sockfd)\n+ return ep\n+}\n+\n+// Destroy releases resources owned by ep. No other Endpoint methods may be\n+// called after Destroy.\n+func (ep *Endpoint) Destroy() {\n+ // These need not use sync/atomic since there must not be any concurrent\n+ // calls to Endpoint methods.\n+ if ep.sockfd >= 0 {\n+ syscall.Close(int(ep.sockfd))\n+ ep.sockfd = -1\n+ }\n+}\n+\n+// Shutdown causes concurrent and future calls to ep.SendFD(), ep.RecvFD(), and\n+// ep.RecvFDNonblock(), as well as the same calls in the connected Endpoint, to\n+// unblock and return errors. It does not wait for concurrent calls to return.\n+//\n+// Shutdown is the only Endpoint method that may be called concurrently with\n+// other methods.\n+func (ep *Endpoint) Shutdown() {\n+ if sockfd := int(atomic.SwapInt32(&ep.sockfd, -1)); sockfd >= 0 {\n+ syscall.Shutdown(sockfd, syscall.SHUT_RDWR)\n+ syscall.Close(sockfd)\n+ }\n+}\n+\n+// SendFD sends the open file description represented by the given file\n+// descriptor to the connected Endpoint.\n+func (ep *Endpoint) SendFD(fd int) error {\n+ cmsgLen := syscall.CmsgLen(sizeofInt32)\n+ ep.cmsg.Level = syscall.SOL_SOCKET\n+ ep.cmsg.Type = syscall.SCM_RIGHTS\n+ ep.cmsg.SetLen(cmsgLen)\n+ *ep.cmsgData() = int32(fd)\n+ ep.msghdr.SetControllen(cmsgLen)\n+ _, _, e := syscall.Syscall(syscall.SYS_SENDMSG, uintptr(atomic.LoadInt32(&ep.sockfd)), uintptr((unsafe.Pointer)(&ep.msghdr)), 0)\n+ if e != 0 {\n+ return e\n+ }\n+ return nil\n+}\n+\n+// RecvFD receives an open file description from the connected Endpoint and\n+// returns a file descriptor representing it, owned by the caller.\n+func (ep *Endpoint) RecvFD() (int, error) {\n+ return ep.recvFD(0)\n+}\n+\n+// RecvFDNonblock receives an open file description from the connected Endpoint\n+// and returns a file descriptor representing it, owned by the caller. If there\n+// are no pending receivable open file descriptions, RecvFDNonblock returns\n+// (<unspecified>, EAGAIN or EWOULDBLOCK).\n+func (ep *Endpoint) RecvFDNonblock() (int, error) {\n+ return ep.recvFD(syscall.MSG_DONTWAIT)\n+}\n+\n+func (ep *Endpoint) recvFD(flags uintptr) (int, error) {\n+ cmsgLen := syscall.CmsgLen(sizeofInt32)\n+ ep.msghdr.SetControllen(cmsgLen)\n+ _, _, e := syscall.Syscall(syscall.SYS_RECVMSG, uintptr(atomic.LoadInt32(&ep.sockfd)), uintptr((unsafe.Pointer)(&ep.msghdr)), flags|syscall.MSG_TRUNC)\n+ if e != 0 {\n+ return -1, e\n+ }\n+ if int(ep.msghdr.Controllen) != cmsgLen {\n+ return -1, fmt.Errorf(\"received control message has incorrect length: got %d, wanted %d\", ep.msghdr.Controllen, cmsgLen)\n+ }\n+ if ep.cmsg.Level != syscall.SOL_SOCKET || ep.cmsg.Type != syscall.SCM_RIGHTS {\n+ return -1, fmt.Errorf(\"received control message has incorrect (level, type): got (%v, %v), wanted (%v, %v)\", ep.cmsg.Level, ep.cmsg.Type, syscall.SOL_SOCKET, syscall.SCM_RIGHTS)\n+ }\n+ return int(*ep.cmsgData()), nil\n+}\n+\n+func (ep *Endpoint) cmsgData() *int32 {\n+ // syscall.CmsgLen(0) == syscall.cmsgAlignOf(syscall.SizeofCmsghdr)\n+ return (*int32)((unsafe.Pointer)(uintptr((unsafe.Pointer)(ep.cmsg)) + uintptr(syscall.CmsgLen(0))))\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add //pkg/fdchannel.
To accompany flipcall connections in cases where passing FDs is required
(as for gofers).
PiperOrigin-RevId: 255062277 |
259,881 | 25.06.2019 17:11:10 | 25,200 | e98ce4a2c681855b6b4f2c1298484f60014e4b88 | Add TODO reminder to remove tmpfs caching options
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tmpfs/fs.go",
"new_path": "pkg/sentry/fs/tmpfs/fs.go",
"diff": "@@ -133,6 +133,9 @@ func (f *Filesystem) Mount(ctx context.Context, device string, flags fs.MountSou\n}\n// Construct a mount which will follow the cache options provided.\n+ //\n+ // TODO(gvisor.dev/issue/179): There should be no reason to disable\n+ // caching once bind mounts are properly supported.\nvar msrc *fs.MountSource\nswitch options[cacheKey] {\ncase \"\", cacheAll:\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add TODO reminder to remove tmpfs caching options
Updates #179
PiperOrigin-RevId: 255081565 |
259,884 | 14.06.2019 21:07:53 | 14,400 | 8d8380564f8284899183a0799bccb8c0b9096566 | Redirect to the custom domain from appspot.com | [
{
"change_type": "MODIFY",
"old_path": "cmd/gvisor-website/main.go",
"new_path": "cmd/gvisor-website/main.go",
"diff": "@@ -97,6 +97,14 @@ func hostRedirectHandler(h http.Handler) http.Handler {\nhttp.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently)\nreturn\n}\n+\n+ if *projectId != \"\" && r.Host == *projectId+\".appspot.com\" && *customHost != \"\" {\n+ // Redirect to the custom domain.\n+ r.URL.Scheme = \"https\" // Assume https.\n+ r.URL.Host = *customHost\n+ http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently)\n+ return\n+ }\nh.ServeHTTP(w, r)\n})\n}\n@@ -210,6 +218,9 @@ func envFlagString(name, def string) string {\nvar (\naddr = flag.String(\"http\", envFlagString(\"HTTP\", \":8080\"), \"HTTP service address\")\nstaticDir = flag.String(\"static-dir\", envFlagString(\"STATIC_DIR\", \"static\"), \"static files directory\")\n+ // Uses the standard GOOGLE_CLOUD_PROJECT environment variable set by App Engine.\n+ projectId = flag.String(\"project-id\", envFlagString(\"GOOGLE_CLOUD_PROJECT\", \"\"), \"The App Engine project ID.\")\n+ customHost = flag.String(\"custom-domain\", envFlagString(\"CUSTOM_DOMAIN\", \"gvisor.dev\"), \"The application's custom domain.\")\n)\nfunc main() {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Redirect to the custom domain from appspot.com |
259,884 | 13.06.2019 22:14:41 | 14,400 | 8e6c43e5c57ffbe6a4c0c11f1e61e2eaa76db52c | Add fragment links to sub-headers in docs (fixes
Allows users to copy links to sub-headers within docs. | [
{
"change_type": "MODIFY",
"old_path": "assets/scss/_styles_project.scss",
"new_path": "assets/scss/_styles_project.scss",
"diff": "@@ -72,3 +72,10 @@ a.doc-table-anchor {\n.td-default main section.td-cover-block p.lead strong {\nfont-weight: bold;\n}\n+\n+.td-page,.td-section {\n+ main a.header-link {\n+ color: inherit;\n+ font-size: 60%;\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "layouts/partials/scripts.html",
"new_path": "layouts/partials/scripts.html",
"diff": "<script src=\"{{ $js.RelPermalink }}\" integrity=\"{{ $js.Data.Integrity }}\"></script>\n{{ end }}\n{{ partial \"hooks/body-end.html\" . }}\n+\n+<script type=\"text/javascript\">\n+ $(\"body.td-page,body.td-section\").find(\"main h2,h3,h4\").each(function() {\n+ var fragment = $(this).attr('id');\n+ $(this).append(' <a href=\"#'+fragment+'\" class=\"header-link\"><i class=\"fas fa-link\"></i></a>');\n+ });\n+</script>\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add fragment links to sub-headers in docs (fixes #61)
Allows users to copy links to sub-headers within docs. |
259,858 | 24.06.2019 15:49:08 | 25,200 | 34954077893756672f9a91f751032e2e1ce9ca30 | Add group to install instructions | [
{
"change_type": "MODIFY",
"old_path": "content/docs/includes/install_gvisor.md",
"new_path": "content/docs/includes/install_gvisor.md",
"diff": "@@ -21,7 +21,7 @@ a good place to put the `runsc` binary.\nsha512sum -c runsc.sha512\nchmod a+x runsc\nsudo mv runsc /usr/local/bin\n- sudo chown root /usr/local/bin/runsc\n+ sudo chown root:root /usr/local/bin/runsc\n)\n```\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add group to install instructions |
259,992 | 26.06.2019 14:23:35 | 25,200 | 42e212f6b7d4f6dd70e9751562f1524231e39a0e | Preserve permissions when checking lower
The code was wrongly assuming that only read access was
required from the lower overlay when checking for permissions.
This allowed non-writable files to be writable in the overlay.
Fixes | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/inode_overlay.go",
"new_path": "pkg/sentry/fs/inode_overlay.go",
"diff": "@@ -537,12 +537,6 @@ func overlayCheck(ctx context.Context, o *overlayEntry, p PermMask) error {\nif o.upper != nil {\nerr = o.upper.check(ctx, p)\n} else {\n- if p.Write {\n- // Since writes will be redirected to the upper filesystem, the lower\n- // filesystem need not be writable, but must be readable for copy-up.\n- p.Write = false\n- p.Read = true\n- }\nerr = o.lower.check(ctx, p)\n}\no.copyMu.RUnlock()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -85,6 +85,19 @@ func addOverlay(ctx context.Context, conf *Config, lower *fs.Inode, name string,\nif err != nil {\nreturn nil, fmt.Errorf(\"creating tmpfs overlay: %v\", err)\n}\n+\n+ // Replicate permissions and owner from lower to upper mount point.\n+ attr, err := lower.UnstableAttr(ctx)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"reading attributes from lower mount point: %v\", err)\n+ }\n+ if !upper.InodeOperations.SetPermissions(ctx, upper, attr.Perms) {\n+ return nil, fmt.Errorf(\"error setting permission to upper mount point\")\n+ }\n+ if err := upper.InodeOperations.SetOwner(ctx, upper, attr.Owner); err != nil {\n+ return nil, fmt.Errorf(\"setting owner to upper mount point: %v\", err)\n+ }\n+\nreturn fs.NewOverlayRoot(ctx, upper, lower, upperFlags)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "test/syscalls/BUILD",
"new_path": "test/syscalls/BUILD",
"diff": "@@ -240,7 +240,7 @@ syscall_test(\nsyscall_test(test = \"//test/syscalls/linux:munmap_test\")\nsyscall_test(\n- add_overlay = False, # TODO(gvisor.dev/issue/316): enable when fixed.\n+ add_overlay = True,\ntest = \"//test/syscalls/linux:open_create_test\",\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Preserve permissions when checking lower
The code was wrongly assuming that only read access was
required from the lower overlay when checking for permissions.
This allowed non-writable files to be writable in the overlay.
Fixes #316
PiperOrigin-RevId: 255263686 |
259,853 | 27.06.2019 13:23:49 | 25,200 | e2760839033618410cdea9aeab039e096cde54be | gvisor/ptrace: grub initial thread registers only once | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess.go",
"diff": "@@ -155,6 +155,7 @@ func newSubprocess(create func() (*thread, error)) (*subprocess, error) {\nerrChan <- err\nreturn\n}\n+ firstThread.grabInitRegs()\n// Ready to handle requests.\nerrChan <- nil\n@@ -179,6 +180,7 @@ func newSubprocess(create func() (*thread, error)) (*subprocess, error) {\n// Detach the thread.\nt.detach()\n+ t.initRegs = firstThread.initRegs\n// Return the thread.\nr <- t\n@@ -269,7 +271,9 @@ func (t *thread) attach() {\n// Initialize options.\nt.init()\n+}\n+func (t *thread) grabInitRegs() {\n// Grab registers.\n//\n// Note that we adjust the current register RIP value to be just before\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess_linux.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess_linux.go",
"diff": "@@ -235,6 +235,7 @@ func attachedThread(flags uintptr, defaultAction linux.BPFAction) (*thread, erro\nreturn nil, fmt.Errorf(\"wait failed: expected SIGSTOP, got %v\", sig)\n}\nt.attach()\n+ t.grabInitRegs()\nreturn t, nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | gvisor/ptrace: grub initial thread registers only once
PiperOrigin-RevId: 255465635 |
259,853 | 28.06.2019 11:48:27 | 25,200 | 8a625ceeb173307094e81d273458b6651e54220a | runsc: allow openat for runsc-race
I see that runsc-race is killed by SIGSYS, because openat isn't
allowed by seccomp filters:
60052 openat(AT_FDCWD, "/proc/sys/vm/overcommit_memory",
O_RDONLY|O_CLOEXEC <unfinished ...>
60052 <... openat resumed> ) = 257
60052 --- SIGSYS {si_signo=SIGSYS, si_code=SYS_SECCOMP, si_call_addr=0xfaacf1,
si_syscall=__NR_openat, si_arch=AUDIT_ARCH_X86_64} --- | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/extra_filters_race.go",
"new_path": "runsc/boot/filter/extra_filters_race.go",
"diff": "@@ -33,6 +33,7 @@ func instrumentationFilters() seccomp.SyscallRules {\nsyscall.SYS_MUNLOCK: {},\nsyscall.SYS_NANOSLEEP: {},\nsyscall.SYS_OPEN: {},\n+ syscall.SYS_OPENAT: {},\nsyscall.SYS_SET_ROBUST_LIST: {},\n// Used within glibc's malloc.\nsyscall.SYS_TIME: {},\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: allow openat for runsc-race
I see that runsc-race is killed by SIGSYS, because openat isn't
allowed by seccomp filters:
60052 openat(AT_FDCWD, "/proc/sys/vm/overcommit_memory",
O_RDONLY|O_CLOEXEC <unfinished ...>
60052 <... openat resumed> ) = 257
60052 --- SIGSYS {si_signo=SIGSYS, si_code=SYS_SECCOMP, si_call_addr=0xfaacf1,
si_syscall=__NR_openat, si_arch=AUDIT_ARCH_X86_64} ---
PiperOrigin-RevId: 255640808 |
259,907 | 28.06.2019 12:06:17 | 25,200 | 7c13789818ec43644c3a159cd5cad2a5aad2e26d | Superblock interface in the disk layout package for ext4. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/ext4/disklayout/BUILD",
"new_path": "pkg/sentry/fs/ext4/disklayout/BUILD",
"diff": "@@ -8,6 +8,7 @@ go_library(\n\"block_group.go\",\n\"block_group_32.go\",\n\"block_group_64.go\",\n+ \"superblock.go\",\n],\nimportpath = \"gvisor.dev/gvisor/pkg/sentry/fs/ext4/disklayout\",\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/ext4/disklayout/block_group.go",
"new_path": "pkg/sentry/fs/ext4/disklayout/block_group.go",
"diff": "@@ -128,8 +128,8 @@ func (f BGFlags) ToInt() uint16 {\n// BGFlagsFromInt converts the 16-bit flag representation to a BGFlags struct.\nfunc BGFlagsFromInt(flags uint16) BGFlags {\nreturn BGFlags{\n- InodeUninit: (flags & BgInodeUninit) > 0,\n- BlockUninit: (flags & BgBlockUninit) > 0,\n- InodeZeroed: (flags & BgInodeZeroed) > 0,\n+ InodeUninit: flags&BgInodeUninit > 0,\n+ BlockUninit: flags&BgBlockUninit > 0,\n+ InodeZeroed: flags&BgInodeZeroed > 0,\n}\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fs/ext4/disklayout/superblock.go",
"diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package disklayout\n+\n+// SuperBlock should be implemented by structs representing ext4 superblock.\n+// The superblock holds a lot of information about the enclosing filesystem.\n+// This interface aims to provide access methods to important information held\n+// by the superblock. It does NOT expose all fields of the superblock, only the\n+// ones necessary. This can be expanded when need be.\n+//\n+// Location and replication:\n+// - The superblock is located at offset 1024 in block group 0.\n+// - Redundant copies of the superblock and group descriptors are kept in\n+// all groups if sparse_super feature flag is NOT set. If it is set, the\n+// replicas only exist in groups whose group number is either 0 or a\n+// power of 3, 5, or 7.\n+// - There is also a sparse superblock feature v2 in which there are just\n+// two replicas saved in block groups pointed by the s_backup_bgs field.\n+//\n+// Replicas should eventually be updated if the superblock is updated.\n+//\n+// See https://www.kernel.org/doc/html/latest/filesystems/ext4/globals.html#super-block.\n+type SuperBlock interface {\n+ // InodesCount returns the total number of inodes in this filesystem.\n+ InodesCount() uint32\n+\n+ // BlocksCount returns the total number of data blocks in this filesystem.\n+ BlocksCount() uint64\n+\n+ // FreeBlocksCount returns the number of free blocks in this filesystem.\n+ FreeBlocksCount() uint64\n+\n+ // FreeInodesCount returns the number of free inodes in this filesystem.\n+ FreeInodesCount() uint32\n+\n+ // MountCount returns the number of mounts since the last fsck.\n+ MountCount() uint16\n+\n+ // MaxMountCount returns the number of mounts allowed beyond which a fsck is\n+ // needed.\n+ MaxMountCount() uint16\n+\n+ // FirstDataBlock returns the absolute block number of the first data block,\n+ // which contains the super block itself.\n+ //\n+ // If the filesystem has 1kb data blocks then this should return 1. For all\n+ // other configurations, this typically returns 0.\n+ //\n+ // The first block group descriptor is in (FirstDataBlock() + 1)th block.\n+ FirstDataBlock() uint32\n+\n+ // BlockSize returns the size of one data block in this filesystem.\n+ // This can be calculated by 2^(10 + sb.s_log_block_size). This ensures that\n+ // the smallest block size is 1kb.\n+ BlockSize() uint64\n+\n+ // BlocksPerGroup returns the number of data blocks in a block group.\n+ BlocksPerGroup() uint32\n+\n+ // ClusterSize returns block cluster size (set during mkfs time by admin).\n+ // This can be calculated by 2^(10 + sb.s_log_cluster_size). This ensures that\n+ // the smallest cluster size is 1kb.\n+ //\n+ // sb.s_log_cluster_size must equal sb.s_log_block_size if bigalloc feature\n+ // is NOT set and consequently BlockSize() = ClusterSize() in that case.\n+ ClusterSize() uint64\n+\n+ // ClustersPerGroup returns:\n+ // - number of clusters per group if bigalloc is enabled.\n+ // - BlocksPerGroup() otherwise.\n+ ClustersPerGroup() uint32\n+\n+ // InodeSize returns the size of the inode disk record size in bytes. Use this\n+ // to iterate over inode arrays on disk.\n+ //\n+ // In ext2 and ext3:\n+ // - Each inode had a disk record of 128 bytes.\n+ // - The inode struct size was fixed at 128 bytes.\n+ //\n+ // In ext4 its possible to allocate larger on-disk inodes:\n+ // - Inode disk record size = sb.s_inode_size (function return value).\n+ // = 256 (default)\n+ // - Inode struct size = 128 + inode.i_extra_isize.\n+ // = 128 + 28 = 156 (default)\n+ InodeSize() uint16\n+\n+ // InodesPerGroup returns the number of inodes in a block group.\n+ InodesPerGroup() uint32\n+\n+ // BgDescSize returns the size of the block group descriptor struct.\n+ //\n+ // In ext2, ext3, ext4 (without 64-bit feature), the block group descriptor\n+ // is only 32 bytes long.\n+ // In ext4 with 64-bit feature, the block group descriptor expands to AT LEAST\n+ // 64 bytes. It might be bigger than that.\n+ BgDescSize() uint16\n+\n+ // CompatibleFeatures returns the CompatFeatures struct which holds all the\n+ // compatible features this fs supports.\n+ CompatibleFeatures() CompatFeatures\n+\n+ // IncompatibleFeatures returns the CompatFeatures struct which holds all the\n+ // incompatible features this fs supports.\n+ IncompatibleFeatures() IncompatFeatures\n+\n+ // ReadOnlyCompatibleFeatures returns the CompatFeatures struct which holds all the\n+ // readonly compatible features this fs supports.\n+ ReadOnlyCompatibleFeatures() RoCompatFeatures\n+\n+ // Magic() returns the magic signature which must be 0xef53.\n+ Magic() uint16\n+\n+ // Revision returns the superblock revision. Superblock struct fields from\n+ // offset 0x54 till 0x150 should only be used if superblock has DynamicRev.\n+ Revision() SbRevision\n+}\n+\n+// SbRevision is the type for superblock revisions.\n+type SbRevision int\n+\n+// Super block revisions.\n+const (\n+ // OldRev is the good old (original) format.\n+ OldRev SbRevision = 0\n+\n+ // DynamicRev is v2 format w/ dynamic inode sizes.\n+ DynamicRev SbRevision = 1\n+)\n+\n+// Superblock compatible features.\n+// This is not exhaustive, unused features are not listed.\n+const (\n+ // SbDirPrealloc indicates directory preallocation.\n+ SbDirPrealloc = 0x1\n+\n+ // SbHasJournal indicates the presence of a journal. jbd2 should only work\n+ // with this being set.\n+ SbHasJournal = 0x4\n+\n+ // SbExtAttr indicates extended attributes support.\n+ SbExtAttr = 0x8\n+\n+ // SbResizeInode indicates that the fs has reserved GDT blocks (right after\n+ // group descriptors) for fs expansion.\n+ SbResizeInode = 0x10\n+\n+ // SbDirIndex indicates that the fs has directory indices.\n+ SbDirIndex = 0x20\n+\n+ // SbSparseV2 stands for Sparse superblock version 2.\n+ SbSparseV2 = 0x200\n+)\n+\n+// CompatFeatures represents a superblock's compatible feature set. If the\n+// kernel does not understand any of these feature, it can still read/write\n+// to this fs.\n+type CompatFeatures struct {\n+ DirPrealloc bool\n+ HasJournal bool\n+ ExtAttr bool\n+ ResizeInode bool\n+ DirIndex bool\n+ SparseV2 bool\n+}\n+\n+// ToInt converts superblock compatible features back to its 32-bit rep.\n+func (f CompatFeatures) ToInt() uint32 {\n+ var res uint32\n+\n+ if f.DirPrealloc {\n+ res |= SbDirPrealloc\n+ }\n+ if f.HasJournal {\n+ res |= SbHasJournal\n+ }\n+ if f.ExtAttr {\n+ res |= SbExtAttr\n+ }\n+ if f.ResizeInode {\n+ res |= SbResizeInode\n+ }\n+ if f.DirIndex {\n+ res |= SbDirIndex\n+ }\n+ if f.SparseV2 {\n+ res |= SbSparseV2\n+ }\n+\n+ return res\n+}\n+\n+// CompatFeaturesFromInt converts the integer representation of superblock\n+// compatible features to CompatFeatures struct.\n+func CompatFeaturesFromInt(f uint32) CompatFeatures {\n+ return CompatFeatures{\n+ DirPrealloc: f&SbDirPrealloc > 0,\n+ HasJournal: f&SbHasJournal > 0,\n+ ExtAttr: f&SbExtAttr > 0,\n+ ResizeInode: f&SbResizeInode > 0,\n+ DirIndex: f&SbDirIndex > 0,\n+ SparseV2: f&SbSparseV2 > 0,\n+ }\n+}\n+\n+// Superblock incompatible features.\n+// This is not exhaustive, unused features are not listed.\n+const (\n+ // SbDirentFileType indicates that directory entries record the file type.\n+ // We should use struct ext4_dir_entry_2 for dirents then.\n+ SbDirentFileType = 0x2\n+\n+ // SbRecovery indicates that the filesystem needs recovery.\n+ SbRecovery = 0x4\n+\n+ // SbJournalDev indicates that the filesystem has a separate journal device.\n+ SbJournalDev = 0x8\n+\n+ // SbMetaBG indicates that the filesystem is using Meta block groups. Moves\n+ // the group descriptors from the congested first block group into the first\n+ // group of each metablock group to increase the maximum block groups limit\n+ // and hence support much larger filesystems.\n+ //\n+ // See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#meta-block-groups.\n+ SbMetaBG = 0x10\n+\n+ // SbExtents indicates that the filesystem uses extents. Must be set in ext4\n+ // filesystems.\n+ SbExtents = 0x40\n+\n+ // SbIs64Bit indicates that this filesystem addresses blocks with 64-bits.\n+ // Hence can support 2^64 data blocks.\n+ SbIs64Bit = 0x80\n+\n+ // SbMMP indicates that this filesystem has multiple mount protection.\n+ //\n+ // See https://www.kernel.org/doc/html/latest/filesystems/ext4/globals.html#multiple-mount-protection.\n+ SbMMP = 0x100\n+\n+ // SbFlexBg indicates that this filesystem has flexible block groups. Several\n+ // block groups are tied into one logical block group so that all the metadata\n+ // for the block groups (bitmaps and inode tables) are close together for\n+ // faster loading. Consequently, large files will be continuous on disk.\n+ // However, this does not affect the placement of redundant superblocks and\n+ // group descriptors.\n+ //\n+ // See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#flexible-block-groups.\n+ SbFlexBg = 0x200\n+\n+ // SbLargeDir shows that large directory enabled. Directory htree can be 3\n+ // levels deep. Directory htrees are allowed to be 2 levels deep otherwise.\n+ SbLargeDir = 0x4000\n+\n+ // SbInlineData allows inline data in inodes for really small files.\n+ SbInlineData = 0x8000\n+\n+ // SbEncrypted indicates that this fs contains encrypted inodes.\n+ SbEncrypted = 0x10000\n+)\n+\n+// IncompatFeatures represents a superblock's incompatible feature set. If the\n+// kernel does not understand any of these feature, it should refuse to mount.\n+type IncompatFeatures struct {\n+ DirentFileType bool\n+ Recovery bool\n+ JournalDev bool\n+ MetaBG bool\n+ Extents bool\n+ Is64Bit bool\n+ MMP bool\n+ FlexBg bool\n+ LargeDir bool\n+ InlineData bool\n+ Encrypted bool\n+}\n+\n+// ToInt converts superblock incompatible features back to its 32-bit rep.\n+func (f IncompatFeatures) ToInt() uint32 {\n+ var res uint32\n+\n+ if f.DirentFileType {\n+ res |= SbDirentFileType\n+ }\n+ if f.Recovery {\n+ res |= SbRecovery\n+ }\n+ if f.JournalDev {\n+ res |= SbJournalDev\n+ }\n+ if f.MetaBG {\n+ res |= SbMetaBG\n+ }\n+ if f.Extents {\n+ res |= SbExtents\n+ }\n+ if f.Is64Bit {\n+ res |= SbIs64Bit\n+ }\n+ if f.MMP {\n+ res |= SbMMP\n+ }\n+ if f.FlexBg {\n+ res |= SbFlexBg\n+ }\n+ if f.LargeDir {\n+ res |= SbLargeDir\n+ }\n+ if f.InlineData {\n+ res |= SbInlineData\n+ }\n+ if f.Encrypted {\n+ res |= SbEncrypted\n+ }\n+\n+ return res\n+}\n+\n+// IncompatFeaturesFromInt converts the integer representation of superblock\n+// incompatible features to IncompatFeatures struct.\n+func IncompatFeaturesFromInt(f uint32) IncompatFeatures {\n+ return IncompatFeatures{\n+ DirentFileType: f&SbDirentFileType > 0,\n+ Recovery: f&SbRecovery > 0,\n+ JournalDev: f&SbJournalDev > 0,\n+ MetaBG: f&SbMetaBG > 0,\n+ Extents: f&SbExtents > 0,\n+ Is64Bit: f&SbIs64Bit > 0,\n+ MMP: f&SbMMP > 0,\n+ FlexBg: f&SbFlexBg > 0,\n+ LargeDir: f&SbLargeDir > 0,\n+ InlineData: f&SbInlineData > 0,\n+ Encrypted: f&SbEncrypted > 0,\n+ }\n+}\n+\n+// Superblock readonly compatible features.\n+// This is not exhaustive, unused features are not listed.\n+const (\n+ // SbSparse indicates sparse superblocks. Only groups with number either 0 or\n+ // a power of 3, 5, or 7 will have redundant copies of the superblock and\n+ // block descriptors.\n+ SbSparse = 0x1\n+\n+ // SbLargeFile indicates that this fs has been used to store a file >= 2GiB.\n+ SbLargeFile = 0x2\n+\n+ // SbHugeFile indicates that this fs contains files whose sizes are\n+ // represented in units of logicals blocks, not 512-byte sectors.\n+ SbHugeFile = 0x8\n+\n+ // SbGdtCsum indicates that group descriptors have checksums.\n+ SbGdtCsum = 0x10\n+\n+ // SbDirNlink indicates that the new subdirectory limit is 64,999. Ext3 has a\n+ // 32,000 subdirectory limit.\n+ SbDirNlink = 0x20\n+\n+ // SbExtraIsize indicates that large inodes exist on this filesystem.\n+ SbExtraIsize = 0x40\n+\n+ // SbHasSnapshot indicates the existence of a snapshot.\n+ SbHasSnapshot = 0x80\n+\n+ // SbQuota enables usage tracking for all quota types.\n+ SbQuota = 0x100\n+\n+ // SbBigalloc maps to the bigalloc feature. When set, the minimum allocation\n+ // unit becomes a cluster rather than a data block. Then block bitmaps track\n+ // clusters, not data blocks.\n+ //\n+ // See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#bigalloc.\n+ SbBigalloc = 0x200\n+\n+ // SbMetadataCsum indicates that the fs supports metadata checksumming.\n+ SbMetadataCsum = 0x400\n+\n+ // SbReadOnly marks this filesystem as readonly. Should refuse to mount in\n+ // read/write mode.\n+ SbReadOnly = 0x1000\n+)\n+\n+// RoCompatFeatures represents a superblock's readonly compatible feature set.\n+// If the kernel does not understand any of these feature, it can still mount\n+// readonly. But if the user wants to mount read/write, the kernel should\n+// refuse to mount.\n+type RoCompatFeatures struct {\n+ Sparse bool\n+ LargeFile bool\n+ HugeFile bool\n+ GdtCsum bool\n+ DirNlink bool\n+ ExtraIsize bool\n+ HasSnapshot bool\n+ Quota bool\n+ Bigalloc bool\n+ MetadataCsum bool\n+ ReadOnly bool\n+}\n+\n+// ToInt converts superblock readonly compatible features to its 32-bit rep.\n+func (f RoCompatFeatures) ToInt() uint32 {\n+ var res uint32\n+\n+ if f.Sparse {\n+ res |= SbSparse\n+ }\n+ if f.LargeFile {\n+ res |= SbLargeFile\n+ }\n+ if f.HugeFile {\n+ res |= SbHugeFile\n+ }\n+ if f.GdtCsum {\n+ res |= SbGdtCsum\n+ }\n+ if f.DirNlink {\n+ res |= SbDirNlink\n+ }\n+ if f.ExtraIsize {\n+ res |= SbExtraIsize\n+ }\n+ if f.HasSnapshot {\n+ res |= SbHasSnapshot\n+ }\n+ if f.Quota {\n+ res |= SbQuota\n+ }\n+ if f.Bigalloc {\n+ res |= SbBigalloc\n+ }\n+ if f.MetadataCsum {\n+ res |= SbMetadataCsum\n+ }\n+ if f.ReadOnly {\n+ res |= SbReadOnly\n+ }\n+\n+ return res\n+}\n+\n+// RoCompatFeaturesFromInt converts the integer representation of superblock\n+// readonly compatible features to RoCompatFeatures struct.\n+func RoCompatFeaturesFromInt(f uint32) RoCompatFeatures {\n+ return RoCompatFeatures{\n+ Sparse: f&SbSparse > 0,\n+ LargeFile: f&SbLargeFile > 0,\n+ HugeFile: f&SbHugeFile > 0,\n+ GdtCsum: f&SbGdtCsum > 0,\n+ DirNlink: f&SbDirNlink > 0,\n+ ExtraIsize: f&SbExtraIsize > 0,\n+ HasSnapshot: f&SbHasSnapshot > 0,\n+ Quota: f&SbQuota > 0,\n+ Bigalloc: f&SbBigalloc > 0,\n+ MetadataCsum: f&SbMetadataCsum > 0,\n+ ReadOnly: f&SbReadOnly > 0,\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Superblock interface in the disk layout package for ext4.
PiperOrigin-RevId: 255644277 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.