author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
259,884
03.04.2019 22:33:28
14,400
896812ebe2e0d435aafba96aeb08152a16579a70
Adjust font sizes for headers
[ { "change_type": "MODIFY", "old_path": "assets/scss/_styles_project.scss", "new_path": "assets/scss/_styles_project.scss", "diff": "@@ -67,7 +67,7 @@ a.doc-table-anchor {\n@media (min-width: 768px) {\nwidth: 50%;\n}\n- font-size: 2rem;\n+ font-size: $h1-font-size;\n}\n.td-default main section.td-cover-block p.lead strong {\nfont-weight: bold;\n" }, { "change_type": "MODIFY", "old_path": "assets/scss/_variables_project.scss", "new_path": "assets/scss/_variables_project.scss", "diff": "@@ -8,9 +8,9 @@ $google_font_name: \"Roboto\";\n$google_font_family: \"Roboto\";\n$font-size-base: 1rem;\n-$h1-font-size: $font-size-base * 2.5;\n-$h2-font-size: $font-size-base * 2;\n-$h3-font-size: $font-size-base * 1.75;\n-$h4-font-size: $font-size-base * 1.5;\n+$h1-font-size: $font-size-base * 2;\n+$h2-font-size: $font-size-base * 1.85;\n+$h3-font-size: $font-size-base * 1.65;\n+$h4-font-size: $font-size-base * 1.35;\n$h5-font-size: $font-size-base * 1.25;\n$h6-font-size: $font-size-base;\n" } ]
Go
Apache License 2.0
google/gvisor
Adjust font sizes for headers
259,858
04.04.2019 21:46:15
25,200
f28db3f7755e46056b12ba45cffce700873dca4e
Remove confusing fully- prefix
[ { "change_type": "MODIFY", "old_path": "content/docs/architecture_guide/_index.md", "new_path": "content/docs/architecture_guide/_index.md", "diff": "title = \"Architecture Guide\"\nweight = 20\n+++\n-gVisor provides a fully-virtualized environment in order to sandbox untrusted\n+gVisor provides a virtualized environment in order to sandbox untrusted\ncontainers. The system interfaces normally implemented by the host kernel are\nmoved into a distinct, per-sandbox user space kernel in order to minimize the\nrisk of an exploit. gVisor does not introduce large fixed overheads however,\n" } ]
Go
Apache License 2.0
google/gvisor
Remove confusing fully- prefix
259,884
03.04.2019 02:32:50
14,400
383e6cbd07181e8df4987a7b0bad8889e6731afa
Updated README to be more contributor friendly Remove doc about deploying and running Cloud Builds since that is mostly handled by CI. Re-organize to have info on contributing first.
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -21,8 +21,6 @@ public:\npublic/app.yaml: public\ncp -vr cmd/gvisor-website/app.yaml public/\n-\n-\n# Load repositories.\nupstream:\nmkdir -p upstream\n@@ -44,29 +42,38 @@ content/docs/community/sigs: upstream/community $(wildcard upstream/community/si\n$(GO_TARGET): public $(GO_SOURCE)\ncd cmd/gvisor-website && find . -name \"*.go\" -exec cp --parents \\{\\} ../../public \\;\n-deploy: public/app.yaml\n- cd public && $(GCLOUD) app deploy\n-.PHONY: deploy\n-\npublic/static: node_modules config.toml $(shell find archetypes assets content themes -type f | sed 's/ /\\\\ /g')\n$(HUGO)\n-server: all-upstream\n- $(HUGO) server -FD --port 8080\n-\nnode_modules: package.json package-lock.json\n# Use npm ci because npm install will update the package-lock.json.\n# See: https://github.com/npm/npm/issues/18286\n$(NPM) ci\n+# Run a local content development server. Redirects will not be supported.\n+server: all-upstream\n+ $(HUGO) server -FD --port 8080\n+.PHONY: server\n+\n+# Deploy the website to App Engine.\n+deploy: public/app.yaml\n+ cd public && $(GCLOUD) app deploy\n+.PHONY: deploy\n+\n+# CI related Commmands\n+##############################################################################\n+\n+# Submit a build to Cloud Build manually. Used to test cloudbuild.yaml changes.\ncloud-build:\ngcloud builds submit --config cloudbuild/cloudbuild.yaml .\n+# Build and push the hugo Docker image used by Cloud Build.\nhugo-docker-image:\ndocker build --build-arg HUGO_VERSION=$(HUGO_VERSION) -t gcr.io/gvisor-website/hugo:$(HUGO_VERSION) cloudbuild/hugo/\ndocker push gcr.io/gvisor-website/hugo:$(HUGO_VERSION)\n.PHONY: hugo-docker-image\n+# Build and push the html-proofer image used by Cloud Build.\nhtmlproofer-docker-image:\ndocker build --build-arg HTMLPROOFER_VERSION=$(HTMLPROOFER_VERSION) -t gcr.io/gvisor-website/html-proofer:$(HTMLPROOFER_VERSION) cloudbuild/html-proofer/\ndocker push gcr.io/gvisor-website/html-proofer:$(HTMLPROOFER_VERSION)\n" }, { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -13,18 +13,24 @@ generate CSS files. Please install them before building.\n- Node.js >= 10.15.0 LTS\n- hugo extended >= v0.53\n-## Building\n+## Contributing to Documentation\n-Build the website using `make`:\n+### Using Github\n-```\n-make\n-```\n+You can use the \"Edit this page\" link on any documentation page to edit the\n+page content directly via GitHub and submit a pull request. This should\n+generally be done for relatively small changes.\n-This will output the App Engine application code, configuration, and html and\n-CSS into the `public/` directory.\n+### Using Git\n-## Testing\n+You can submit pull requests by making changes in a Git branch. See more\n+information on GitHub pull requests\n+[here](https://help.github.com/en/articles/about-pull-requests).\n+\n+Documentation is located in the [content/docs/](content/docs/) directory.\n+Documentation is written in markdown with hugo extensions. Please read more\n+about [content management](https://gohugo.io/categories/content-management) in\n+the hugo documentation.\nYou can use the hugo web server for testing. This will start a webserver that\nwill rebuild the site when you make content changes:\n@@ -35,53 +41,24 @@ make server\nAccess the site at http://localhost:8080\n-## Deploy\n-\n-Deploying the website to App Engine requires gcloud. First create a configuration:\n-\n-```\n-{\n- gcloud config configurations create gvisor-website\n- gcloud config set project gvisor-website\n-}\n-```\n+## Building\n-Deploy the application:\n+If you are making changes to App Engine config or application code, you can\n+build the website using `make`. This will output the App Engine application\n+code, configuration, and html and CSS into the `public/` directory.\n```\n-make deploy\n+make\n```\n-## Editing documentation\n-\n-Documentation is located in the [content/docs/](content/docs/) directory.\n-Documentation is written in markdown with hugo extensions. Please read more\n-about [content management](https://gohugo.io/categories/content-management) in\n-the hugo documentation.\n-\n-## Submit a Build\n-\n-Normally a build is triggered when you push to the gvisor-website repository.\n-However, you can submit a build to Cloud Build manually.\n-\n-As one-time setup, enable the App Engine Admin API, and set IAM roles for the [Cloud Build service\n-account](https://cloud.google.com/cloud-build/docs/securing-builds/set-service-account-permissions).\n+If you have Go installed you can run a local version of the website via the\n+`public/` directory.\n```\n{\n- PROJECT_NUMBER=$(gcloud projects list --filter=projectId:gvisor-website --format=\"value(projectNumber)\")\n- gcloud services enable appengine.googleapis.com\n- gcloud projects add-iam-policy-binding gvisor-website \\\n- --member=serviceAccount:${PROJECT_NUMBER}@cloudbuild.gserviceaccount.com \\\n- --role='roles/appengine.deployer'\n- gcloud projects add-iam-policy-binding gvisor-website \\\n- --member=serviceAccount:${PROJECT_NUMBER}@cloudbuild.gserviceaccount.com \\\n- --role='roles/appengine.serviceAdmin'\n+ cd public/\n+ go run main.go\n}\n```\n-Submit the build.\n-\n-```\n-make cloud-build\n-```\n+Access the site at http://localhost:8080\n" } ]
Go
Apache License 2.0
google/gvisor
Updated README to be more contributor friendly - Remove doc about deploying and running Cloud Builds since that is mostly handled by CI. - Re-organize to have info on contributing first.
259,992
02.04.2019 21:16:28
25,200
f4135148a842066ba9399e41bf030a650c291f49
Expand user guide landing page
[ { "change_type": "MODIFY", "old_path": "content/docs/user_guide/_index.md", "new_path": "content/docs/user_guide/_index.md", "diff": "@@ -3,6 +3,10 @@ title = \"User Guide\"\nweight = 10\n+++\n-Using gVisor for the first time? To get started, use either the [Docker Quick\n-Start](./docker/), the [OCI Quick Start](./oci/) or select a specific topic via\n-the menu.\n+gVisor can be used with Docker, Kubernetes, or directly using`runsc` with crafted OCI\n+spec for your container. Use the links below to see detailed instructions for each\n+of them:\n+\n+ * [Docker](./docker/): quickest and easiest way to get started\n+ * [Kubernetes](./kubernetes/): isolate Pods in your K8s cluster with gVisor\n+ * [OCI Quick Start](./oci/): expert mode. Customize gVisor for your environment\n" } ]
Go
Apache License 2.0
google/gvisor
Expand user guide landing page
259,992
04.04.2019 16:51:50
25,200
a5852fe8fa01500627308b9ae8732d579527fd56
Add runsc debug commands to Debugging section
[ { "change_type": "MODIFY", "old_path": "content/docs/user_guide/debugging.md", "new_path": "content/docs/user_guide/debugging.md", "diff": "@@ -31,3 +31,73 @@ sudo systemctl restart docker\nRun your container again, and inspect the files under `/tmp/runsc`. The log file\nwith name `boot` will contain the strace logs from your application, which can\nbe useful for identifying missing or broken system calls in gVisor.\n+\n+## Stack Trace\n+\n+`runsc debug --stacks` command allows stack traces to be collected while the\n+sandbox is running which can be useful to troubleshoot hangs or just to learn\n+more about gVisor. It connects to the sandbox process, collects a stack dump,\n+and writes it to the console.\n+\n+Here is an example:\n+\n+```bash\n+docker run --runtime=runsc --rm -d alpine sh -c \"while true; do echo running; sleep .1; done\"\n+63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b\n+\n+sudo runsc --root /var/run/docker/runtime-runsc/moby debug --stacks 63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b\n+```\n+\n+> Note: `--root` variable is provided by docker and is normally set to\n+> `/var/run/docker/runtime-[runtime-name]/moby`. If in doubt, `--root` is logged to\n+> `runsc` logs.\n+\n+\n+## Profiling\n+\n+`runsc` integrates with Go profiler and gives you easy commands to profile CPU\n+and heap usage. First you need to enable `--profile` in the command line options\n+before starting the container:\n+\n+```json\n+{\n+ \"runtimes\": {\n+ \"runsc-prof\": {\n+ \"path\": \"/usr/local/bin/runsc\",\n+ \"runtimeArgs\": [\n+ \"--profile\"\n+ ]\n+ }\n+ }\n+}\n+```\n+\n+> Note: Enabling profiler loosens the seccomp protection added to the sandbox.\n+\n+Then restart docker to refresh the runtime options. While the container is running,\n+execute `runsc debug` to collect profile information and save to a file. Here are\n+the options available:\n+\n+ * **--profile-heap:** It generates heap profile to the speficied file.\n+ * **--profile-cpu:** It enables CPU profiler, waits for `--profile-delay` seconds\n+ and generates CPU profile to the speficied file.\n+\n+Here is an example::\n+\n+```bash\n+docker run --runtime=runsc-prof --rm -d alpine sleep 1000\n+63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b\n+\n+sudo runsc --root /var/run/docker/runtime-runsc-prof/moby debug --profile-heap=/tmp/heap.prof 63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b\n+sudo runsc --root /var/run/docker/runtime-runsc-prof/moby debug --profile-cpu=/tmp/cpu.prof --profile-delay=30 63254c6ab3a6989623fa1fb53616951eed31ac605a2637bb9ddba5d8d404b35b\n+```\n+\n+The resulting files can be opened using `go tool pprof` or [pprof]\n+(https://github.com/google/pprof/blob/master/doc/README.md). The examples below\n+create image file (`.svg`) with the heap profile and writes the top functions\n+using CPU to the console:\n+\n+```bash\n+go tool pprof -svg /usr/local/bin/runsc /tmp/heap.prof\n+go tool pprof -top /usr/local/bin/runsc /tmp/cpu.prof\n+```\n" } ]
Go
Apache License 2.0
google/gvisor
Add runsc debug commands to Debugging section
259,885
05.04.2019 17:53:24
25,200
124bafc81c7291d31cbe2a74f9bda155d0f71469
Deflake PtraceTest.SeizeSetOptions.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/ptrace.cc", "new_path": "test/syscalls/linux/ptrace.cc", "diff": "@@ -1152,10 +1152,46 @@ TEST(PtraceTest, SeizeSetOptions) {\nEXPECT_TRUE(WIFSTOPPED(status) && WSTOPSIG(status) == (SIGTRAP | 0x80))\n<< \" status \" << status;\n- // SIGKILL the child (detaching the tracer) and wait for it to exit.\n+ // Clean up the child.\nASSERT_THAT(kill(child_pid, SIGKILL), SyscallSucceeds());\nASSERT_THAT(waitpid(child_pid, &status, 0),\nSyscallSucceedsWithValue(child_pid));\n+ if (WIFSTOPPED(status) && WSTOPSIG(status) == (SIGTRAP | 0x80)) {\n+ // \"SIGKILL kills even within system calls (syscall-exit-stop is not\n+ // generated prior to death by SIGKILL). The net effect is that SIGKILL\n+ // always kills the process (all its threads), even if some threads of the\n+ // process are ptraced.\" - ptrace(2). This is technically true, but...\n+ //\n+ // When we send SIGKILL to the child, kernel/signal.c:complete_signal() =>\n+ // signal_wake_up(resume=1) kicks the tracee out of the syscall-enter-stop.\n+ // The pending SIGKILL causes the syscall to be skipped, but the child\n+ // thread still reports syscall-exit before checking for pending signals; in\n+ // current kernels, this is\n+ // arch/x86/entry/common.c:syscall_return_slowpath() =>\n+ // syscall_slow_exit_work() =>\n+ // include/linux/tracehook.h:tracehook_report_syscall_exit() =>\n+ // ptrace_report_syscall() => kernel/signal.c:ptrace_notify() =>\n+ // ptrace_do_notify() => ptrace_stop().\n+ //\n+ // ptrace_stop() sets the task's state to TASK_TRACED and the task's\n+ // exit_code to SIGTRAP|0x80 (passed by ptrace_report_syscall()), then calls\n+ // freezable_schedule(). freezable_schedule() eventually reaches\n+ // __schedule(), which detects signal_pending_state() due to the pending\n+ // SIGKILL, sets the task's state back to TASK_RUNNING, and returns without\n+ // descheduling. Thus, the task never enters syscall-exit-stop. However, if\n+ // our wait4() => kernel/exit.c:wait_task_stopped() racily observes the\n+ // TASK_TRACED state and the non-zero exit code set by ptrace_stop() before\n+ // __schedule() sets the state back to TASK_RUNNING, it will return the\n+ // task's exit_code as status W_STOPCODE(SIGTRAP|0x80). So we get a spurious\n+ // syscall-exit-stop notification, and need to wait4() again for task exit.\n+ //\n+ // gVisor is not susceptible to this race because\n+ // kernel.Task.waitCollectTraceeStopLocked() checks specifically for an\n+ // active ptraceStop, which is not initiated if SIGKILL is pending.\n+ LOG(INFO) << \"Observed syscall-exit after SIGKILL\";\n+ ASSERT_THAT(waitpid(child_pid, &status, 0),\n+ SyscallSucceedsWithValue(child_pid));\n+ }\nEXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL)\n<< \" status \" << status;\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Deflake PtraceTest.SeizeSetOptions. PiperOrigin-RevId: 242226319 Change-Id: Iefc78656841315f6b7d48bd85db451486850264d
259,992
03.04.2019 12:18:46
25,200
37433204a8ecda68ea2164315686006240bf11aa
Edits to user guide + added filesystem section
[ { "change_type": "MODIFY", "old_path": "content/docs/user_guide/FAQ.md", "new_path": "content/docs/user_guide/FAQ.md", "diff": "@@ -20,12 +20,13 @@ not realize a new file was copied to a given directory. To invalidate the cache\nand force a refresh, create a file under the directory in question and list the\ncontents again.\n+As a workaround, shared root filesystem can be enabled. See [Filesystem](../filesystem/).\n+\nThis bug is tracked in [bug #4](https://github.com/google/gvisor/issues/4).\nNote that `kubectl cp` works because it does the copy by exec'ing inside the\n-sandbox, and thus gVisor cache is aware of the new files and dirs.\n-\n-There are also different filesystem modes that can be used to avoid this issue.\n+sandbox, and thus gVisor's internal cache is made aware of the new files and\n+directories.\n### What's the security model?\n" }, { "change_type": "MODIFY", "old_path": "content/docs/user_guide/debugging.md", "new_path": "content/docs/user_guide/debugging.md", "diff": "@@ -21,6 +21,10 @@ To enable debug and system call logging, add the `runtimeArgs` below to your\n}\n```\n+> Note: the last `/` in `--debug-log` is needed to interpret it as a directory.\n+> Then each `runsc` command executed will create a separate log file.\n+> Otherwise, log messages from all commands will be appended to the same file.\n+\nYou may also want to pass `--log-packets` to troubleshoot network problems. Then\nrestart the Docker daemon:\n@@ -29,8 +33,10 @@ sudo systemctl restart docker\n```\nRun your container again, and inspect the files under `/tmp/runsc`. The log file\n-with name `boot` will contain the strace logs from your application, which can\n-be useful for identifying missing or broken system calls in gVisor.\n+ending with `.boot` will contain the strace logs from your application, which can\n+be useful for identifying missing or broken system calls in gVisor. If you are\n+having problems starting the container, the log file ending with `.create` may\n+have the reason for the failure.\n## Stack traces\n" }, { "change_type": "MODIFY", "old_path": "content/docs/user_guide/docker.md", "new_path": "content/docs/user_guide/docker.md", "diff": "@@ -3,7 +3,7 @@ title = \"Docker Quick Start\"\nweight = 10\n+++\nThis guide will help you quickly get started running Docker containers using\n-gVisor with the default platform.\n+gVisor.\n## Install gVisor\n@@ -43,13 +43,19 @@ sudo systemctl restart docker\nNow run your container using the `runsc` runtime:\n```bash\n-docker run --runtime=runsc hello-world\n+docker run --runtime=runsc --rm hello-world\n```\nYou can also run a terminal to explore the container.\n```bash\n-docker run --runtime=runsc -it ubuntu /bin/bash\n+docker run --runtime=runsc --rm -it ubuntu /bin/bash\n+```\n+\n+Many docker options are compatible with gVisor, try them out. Here is an example:\n+\n+```bash\n+docker run --runtime=runsc --rm --link backend:database -v ~/bin:/tools:ro -p 8080:80 --cpus=0.5 -it busybox telnet towel.blinkenlights.nl\n```\n## Verify the runtime\n@@ -75,7 +81,8 @@ $ docker run --runtime=runsc -it ubuntu dmesg\nNote that this is easily replicated by an attacker so applications should never\nuse `dmesg` to verify the runtime in a security sensitive context.\n-Next, try running gVisor using the [KVM platform](../platforms/).\n+Next, look at the different options available for gVisor: [platform](../platforms/),\n+[network](../networking/), [filesystem](../filesystem/).\n[docker]: https://docs.docker.com/install/\n[storage-driver]: https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-storage-driver\n" }, { "change_type": "ADD", "old_path": null, "new_path": "content/docs/user_guide/filesystem.md", "diff": "++++\n+title = \"Filesystem\"\n+weight = 45\n++++\n+gVisor accesses the filesystem through a file proxy, called the Gofer. The gofer\n+runs as a separate process, that is isolated from the sandbox. Gofer instances\n+communicate with their respective sentry using the 9P protocol. For a more detailed\n+explanation see [Overview > Gofer](../../architecture_guide/overview/#gofer).\n+\n+## Sandbox overlay\n+\n+To isolate the host filesystem from the sandbox, you can set a writable tmpfs overlay\n+on top of the entire filesystem. All modifications are made to the overlay, keeping\n+the host filesystem unmodified.\n+\n+> Note: All created and modified files are stored in memory inside the sandbox.\n+\n+To use the tmpfs overlay, add the following `runtimeArgs` to your Docker configuration\n+(`/etc/docker/daemon.json`) and restart the Docker daemon:\n+\n+```json\n+{\n+ \"runtimes\": {\n+ \"runsc\": {\n+ \"path\": \"/usr/local/bin/runsc\",\n+ \"runtimeArgs\": [\n+ \"--overlay\"\n+ ]\n+ }\n+ }\n+}\n+```\n+\n+## Shared root filesystem\n+\n+The root filesystem is where the image is extracted and is not generally modified\n+from outside the sandbox. This allows for some optimizations, like skipping checks\n+to determine if a directory has changed since the last time it was cached, thus\n+missing updates that may have happened. If you need to `docker cp` files inside the\n+root filesystem, you may want to enable shared mode. Just be aware that file system\n+access will be slower due to the extra checks that are required.\n+\n+> Note: External mounts are always shared.\n+\n+To use set the root filesystem shared, add the following `runtimeArgs` to your Docker\n+configuration (`/etc/docker/daemon.json`) and restart the Docker daemon:\n+\n+```json\n+{\n+ \"runtimes\": {\n+ \"runsc\": {\n+ \"path\": \"/usr/local/bin/runsc\",\n+ \"runtimeArgs\": [\n+ \"--file-access=shared\"\n+ ]\n+ }\n+ }\n+}\n+```\n" }, { "change_type": "MODIFY", "old_path": "content/docs/user_guide/platforms.md", "new_path": "content/docs/user_guide/platforms.md", "diff": "@@ -14,9 +14,10 @@ more depth in the [Architecture Guide](../../architecture_guide/).\n## Selecting a Platform\n-The platform is selected by a `--platform` command line flag passed to `runsc`.\n-To select a different platform, modify your Docker configuration\n-(`/etc/docker/daemon.json`) to pass this argument:\n+The platform is selected by the `--platform` command line flag passed to\n+`runsc`. By default, the ptrace platform is selected. To select a different\n+platform, modify your Docker configuration (`/etc/docker/daemon.json`) to\n+pass this argument:\n```json\n{\n@@ -31,7 +32,12 @@ To select a different platform, modify your Docker configuration\n}\n```\n-Then restart the Docker daemon.\n+You must restart the Docker daemon after making changes to this file, typically\n+this is done via `systemd`:\n+\n+```bash\n+sudo systemctl restart docker\n+```\n## Example: Using the KVM Platform\n@@ -50,7 +56,7 @@ sudo apt-get install qemu-kvm\nIf you are using a virtual machine you will need to make sure that nested\nvirtualization is configured. Here are links to documents on how to set up\n-nested virtualization in several popular environments.\n+nested virtualization in several popular environments:\n* Google Cloud: [Enabling Nested Virtualization for VM Instances][nested-gcp]\n* Microsoft Azure: [How to enable nested virtualization in an Azure VM][nested-azure]\n@@ -99,7 +105,7 @@ Now run your container using the `runsc-kvm` runtime. This will run the\ncontainer using the KVM platform:\n```bash\n-docker run --runtime=runsc-kvm hello-world\n+docker run --runtime=runsc-kvm --rm hello-world\n```\n[nested-azure]: https://docs.microsoft.com/en-us/azure/virtual-machines/windows/nested-virtualization\n" } ]
Go
Apache License 2.0
google/gvisor
Edits to user guide + added filesystem section
259,885
08.04.2019 16:31:06
25,200
9471c013483b0709479c51d470ac840621ae7d46
Export kernel.SignalInfoPriv. Also add kernel.SignalInfoNoInfo, and use it in RLIMIT_FSIZE checks.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/tty.go", "new_path": "pkg/sentry/fs/host/tty.go", "diff": "@@ -344,11 +344,8 @@ func (t *TTYFileOperations) checkChange(ctx context.Context, sig linux.Signal) e\n// 040b6362d58f \"tty: fix leakage of -ERESTARTSYS to userland\" doesn't\n// apply: the sentry will handle -ERESTARTSYS in\n// kernel.runApp.execute() even if the kernel.Task isn't interrupted.\n- si := arch.SignalInfo{\n- Code: arch.SignalInfoKernel,\n- Signo: int32(sig),\n- }\n+ //\n// Linux ignores the result of kill_pgrp().\n- _ = pg.SendSignal(&si)\n+ _ = pg.SendSignal(kernel.SignalInfoPriv(sig))\nreturn kernel.ERESTARTSYS\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/fasync/BUILD", "new_path": "pkg/sentry/kernel/fasync/BUILD", "diff": "@@ -9,7 +9,6 @@ go_library(\nvisibility = [\"//:sandbox\"],\ndeps = [\n\"//pkg/abi/linux\",\n- \"//pkg/sentry/arch\",\n\"//pkg/sentry/fs\",\n\"//pkg/sentry/kernel\",\n\"//pkg/sentry/kernel/auth\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/fasync/fasync.go", "new_path": "pkg/sentry/kernel/fasync/fasync.go", "diff": "@@ -19,7 +19,6 @@ import (\n\"sync\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n@@ -72,11 +71,7 @@ func (a *FileAsync) Callback(e *waiter.Entry) {\na.requester.EffectiveKUID == c.RealKUID ||\na.requester.RealKUID == c.SavedKUID ||\na.requester.RealKUID == c.RealKUID {\n- t.SendSignal(&arch.SignalInfo{\n- Signo: int32(linux.SIGIO),\n- // SEND_SIG_PRIV\n- Code: arch.SignalInfoKernel,\n- })\n+ t.SendSignal(kernel.SignalInfoPriv(linux.SIGIO))\n}\na.mu.Unlock()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/sessions.go", "new_path": "pkg/sentry/kernel/sessions.go", "diff": "@@ -219,8 +219,8 @@ func (pg *ProcessGroup) handleOrphan() {\nreturn\n}\ntg.signalHandlers.mu.Lock()\n- tg.leader.sendSignalLocked(sigPriv(linux.SIGHUP), true /* group */)\n- tg.leader.sendSignalLocked(sigPriv(linux.SIGCONT), true /* group */)\n+ tg.leader.sendSignalLocked(SignalInfoPriv(linux.SIGHUP), true /* group */)\n+ tg.leader.sendSignalLocked(SignalInfoPriv(linux.SIGCONT), true /* group */)\ntg.signalHandlers.mu.Unlock()\n})\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/signal.go", "new_path": "pkg/sentry/kernel/signal.go", "diff": "@@ -56,11 +56,21 @@ func (k *Kernel) sendExternalSignal(info *arch.SignalInfo, context string) {\n}\n}\n-// sigPriv returns a SignalInfo representing a signal sent by the sentry. (The\n-// name reflects its equivalence to Linux's SEND_SIG_PRIV.)\n-func sigPriv(sig linux.Signal) *arch.SignalInfo {\n+// SignalInfoPriv returns a SignalInfo equivalent to Linux's SEND_SIG_PRIV.\n+func SignalInfoPriv(sig linux.Signal) *arch.SignalInfo {\nreturn &arch.SignalInfo{\nSigno: int32(sig),\nCode: arch.SignalInfoKernel,\n}\n}\n+\n+// SignalInfoNoInfo returns a SignalInfo equivalent to Linux's SEND_SIG_NOINFO.\n+func SignalInfoNoInfo(sig linux.Signal, sender, receiver *Task) *arch.SignalInfo {\n+ info := &arch.SignalInfo{\n+ Signo: int32(sig),\n+ Code: arch.SignalInfoUser,\n+ }\n+ info.SetPid(int32(receiver.tg.pidns.IDOfThreadGroup(sender.tg)))\n+ info.SetUid(int32(sender.Credentials().RealKUID.In(receiver.UserNamespace()).OrOverflow()))\n+ return info\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_run.go", "new_path": "pkg/sentry/kernel/task_run.go", "diff": "@@ -176,7 +176,7 @@ func (*runApp) execute(t *Task) taskRunState {\nif err := t.rseqCopyOutCPU(); err != nil {\nt.Warningf(\"Failed to copy CPU to %#x for RSEQ: %v\", t.rseqCPUAddr, err)\nt.forceSignal(linux.SIGSEGV, false)\n- t.SendSignal(sigPriv(linux.SIGSEGV))\n+ t.SendSignal(SignalInfoPriv(linux.SIGSEGV))\n// Re-enter the task run loop for signal delivery.\nreturn (*runApp)(nil)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_sched.go", "new_path": "pkg/sentry/kernel/task_sched.go", "diff": "@@ -394,7 +394,7 @@ func (ticker *kernelCPUClockTicker) Notify(exp uint64) {\nnewItimerVirtSetting, exp := tg.itimerVirtSetting.At(tgVirtNow)\ntg.itimerVirtSetting = newItimerVirtSetting\nif exp != 0 {\n- virtReceiver.sendSignalLocked(sigPriv(linux.SIGVTALRM), true)\n+ virtReceiver.sendSignalLocked(SignalInfoPriv(linux.SIGVTALRM), true)\n}\n}\nif profReceiver != nil {\n@@ -402,18 +402,18 @@ func (ticker *kernelCPUClockTicker) Notify(exp uint64) {\nnewItimerProfSetting, exp := tg.itimerProfSetting.At(tgProfNow)\ntg.itimerProfSetting = newItimerProfSetting\nif exp != 0 {\n- profReceiver.sendSignalLocked(sigPriv(linux.SIGPROF), true)\n+ profReceiver.sendSignalLocked(SignalInfoPriv(linux.SIGPROF), true)\n}\n// RLIMIT_CPU soft limit\nnewRlimitCPUSoftSetting, exp := tg.rlimitCPUSoftSetting.At(tgProfNow)\ntg.rlimitCPUSoftSetting = newRlimitCPUSoftSetting\nif exp != 0 {\n- profReceiver.sendSignalLocked(sigPriv(linux.SIGXCPU), true)\n+ profReceiver.sendSignalLocked(SignalInfoPriv(linux.SIGXCPU), true)\n}\n// RLIMIT_CPU hard limit\nrlimitCPUMax := tg.limits.Get(limits.CPU).Max\nif rlimitCPUMax != limits.Infinity && !tgProfNow.Before(ktime.FromSeconds(int64(rlimitCPUMax))) {\n- profReceiver.sendSignalLocked(sigPriv(linux.SIGKILL), true)\n+ profReceiver.sendSignalLocked(SignalInfoPriv(linux.SIGKILL), true)\n}\n}\ntg.signalHandlers.mu.Unlock()\n@@ -471,7 +471,7 @@ func (t *Task) NotifyRlimitCPUUpdated() {\ntgcpu := t.tg.cpuStatsAtLocked(t.k.CPUClockNow())\ntgProfNow := ktime.FromNanoseconds((tgcpu.UserTime + tgcpu.SysTime).Nanoseconds())\nif !tgProfNow.Before(ktime.FromSeconds(int64(rlimitCPU.Max))) {\n- t.sendSignalLocked(sigPriv(linux.SIGKILL), true)\n+ t.sendSignalLocked(SignalInfoPriv(linux.SIGKILL), true)\n}\n}\nt.tg.updateCPUTimersEnabledLocked()\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_signals.go", "new_path": "pkg/sentry/kernel/task_signals.go", "diff": "@@ -224,7 +224,7 @@ func (t *Task) deliverSignal(info *arch.SignalInfo, act arch.SignalAct) taskRunS\n// Send a forced SIGSEGV. If the signal that couldn't be delivered\n// was a SIGSEGV, force the handler to SIG_DFL.\nt.forceSignal(linux.SIGSEGV, linux.Signal(info.Signo) == linux.SIGSEGV /* unconditional */)\n- t.SendSignal(sigPriv(linux.SIGSEGV))\n+ t.SendSignal(SignalInfoPriv(linux.SIGSEGV))\n}\ndefault:\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_syscall.go", "new_path": "pkg/sentry/kernel/task_syscall.go", "diff": "@@ -250,7 +250,7 @@ type runSyscallAfterSyscallEnterStop struct{}\nfunc (*runSyscallAfterSyscallEnterStop) execute(t *Task) taskRunState {\nif sig := linux.Signal(t.ptraceCode); sig.IsValid() {\nt.tg.signalHandlers.mu.Lock()\n- t.sendSignalLocked(sigPriv(sig), false /* group */)\n+ t.sendSignalLocked(SignalInfoPriv(sig), false /* group */)\nt.tg.signalHandlers.mu.Unlock()\n}\nif t.killed() {\n@@ -270,7 +270,7 @@ type runSyscallAfterSysemuStop struct{}\nfunc (*runSyscallAfterSysemuStop) execute(t *Task) taskRunState {\nif sig := linux.Signal(t.ptraceCode); sig.IsValid() {\nt.tg.signalHandlers.mu.Lock()\n- t.sendSignalLocked(sigPriv(sig), false /* group */)\n+ t.sendSignalLocked(SignalInfoPriv(sig), false /* group */)\nt.tg.signalHandlers.mu.Unlock()\n}\nif t.killed() {\n@@ -335,7 +335,7 @@ func (t *Task) doVsyscall(addr usermem.Addr, sysno uintptr) taskRunState {\nif _, err := t.CopyIn(usermem.Addr(t.Arch().Stack()), caller); err != nil {\nt.Debugf(\"vsyscall %d: error reading return address from stack: %v\", sysno, err)\nt.forceSignal(linux.SIGSEGV, false /* unconditional */)\n- t.SendSignal(sigPriv(linux.SIGSEGV))\n+ t.SendSignal(SignalInfoPriv(linux.SIGSEGV))\nreturn (*runApp)(nil)\n}\n@@ -405,7 +405,7 @@ func (t *Task) doVsyscallInvoke(sysno uintptr, args arch.SyscallArguments, calle\nt.Debugf(\"vsyscall %d, caller %x: emulated syscall returned error: %v\", sysno, t.Arch().Value(caller), err)\nif err == syserror.EFAULT {\nt.forceSignal(linux.SIGSEGV, false /* unconditional */)\n- t.SendSignal(sigPriv(linux.SIGSEGV))\n+ t.SendSignal(SignalInfoPriv(linux.SIGSEGV))\n// A return is not emulated in this case.\nreturn (*runApp)(nil)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/thread_group.go", "new_path": "pkg/sentry/kernel/thread_group.go", "diff": "@@ -322,7 +322,7 @@ type itimerRealListener struct {\n// Notify implements ktime.TimerListener.Notify.\nfunc (l *itimerRealListener) Notify(exp uint64) {\n- l.tg.SendSignal(sigPriv(linux.SIGALRM))\n+ l.tg.SendSignal(SignalInfoPriv(linux.SIGALRM))\n}\n// Destroy implements ktime.TimerListener.Destroy.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/error.go", "new_path": "pkg/sentry/syscalls/linux/error.go", "diff": "@@ -19,9 +19,9 @@ import (\n\"sync\"\n\"syscall\"\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/pkg/metric\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n@@ -53,10 +53,7 @@ func handleIOError(t *kernel.Task, partialResult bool, err, intr error, op strin\n//\n// Do not consume the error and return it as EFBIG.\n// Simultaneously send a SIGXFSZ per setrlimit(2).\n- t.SendSignal(&arch.SignalInfo{\n- Signo: int32(syscall.SIGXFSZ),\n- Code: arch.SignalInfoKernel,\n- })\n+ t.SendSignal(kernel.SignalInfoNoInfo(linux.SIGXFSZ, t, t))\nreturn syscall.EFBIG\ncase syserror.ErrInterrupted:\n// The syscall was interrupted. Return nil if it completed\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/write.cc", "new_path": "test/syscalls/linux/write.cc", "diff": "#include <signal.h>\n#include <sys/resource.h>\n#include <sys/stat.h>\n+#include <sys/types.h>\n#include <time.h>\n#include <unistd.h>\n@@ -105,23 +106,33 @@ TEST_F(WriteTest, WriteExceedsRLimit) {\nEXPECT_THAT(write(fd, buf.data(), target_lim + 1),\nSyscallSucceedsWithValue(target_lim));\nEXPECT_THAT(write(fd, buf.data(), 1), SyscallFailsWithErrno(EFBIG));\n+ siginfo_t info;\nstruct timespec timelimit = {0, 0};\n- EXPECT_THAT(RetryEINTR(sigtimedwait)(&filesize_mask, nullptr, &timelimit),\n+ ASSERT_THAT(RetryEINTR(sigtimedwait)(&filesize_mask, &info, &timelimit),\nSyscallSucceedsWithValue(SIGXFSZ));\n+ EXPECT_EQ(info.si_code, SI_USER);\n+ EXPECT_EQ(info.si_pid, getpid());\n+ EXPECT_EQ(info.si_uid, getuid());\nEXPECT_THAT(pwrite(fd, buf.data(), target_lim + 1, 1),\nSyscallSucceedsWithValue(target_lim - 1));\nEXPECT_THAT(pwrite(fd, buf.data(), 1, target_lim),\nSyscallFailsWithErrno(EFBIG));\n- EXPECT_THAT(RetryEINTR(sigtimedwait)(&filesize_mask, nullptr, &timelimit),\n+ ASSERT_THAT(RetryEINTR(sigtimedwait)(&filesize_mask, &info, &timelimit),\nSyscallSucceedsWithValue(SIGXFSZ));\n+ EXPECT_EQ(info.si_code, SI_USER);\n+ EXPECT_EQ(info.si_pid, getpid());\n+ EXPECT_EQ(info.si_uid, getuid());\nEXPECT_THAT(pwrite64(fd, buf.data(), target_lim + 1, 1),\nSyscallSucceedsWithValue(target_lim - 1));\nEXPECT_THAT(pwrite64(fd, buf.data(), 1, target_lim),\nSyscallFailsWithErrno(EFBIG));\n- EXPECT_THAT(RetryEINTR(sigtimedwait)(&filesize_mask, nullptr, &timelimit),\n+ ASSERT_THAT(RetryEINTR(sigtimedwait)(&filesize_mask, &info, &timelimit),\nSyscallSucceedsWithValue(SIGXFSZ));\n+ EXPECT_EQ(info.si_code, SI_USER);\n+ EXPECT_EQ(info.si_pid, getpid());\n+ EXPECT_EQ(info.si_uid, getuid());\nASSERT_THAT(sigprocmask(SIG_UNBLOCK, &filesize_mask, nullptr),\nSyscallSucceeds());\n" } ]
Go
Apache License 2.0
google/gvisor
Export kernel.SignalInfoPriv. Also add kernel.SignalInfoNoInfo, and use it in RLIMIT_FSIZE checks. PiperOrigin-RevId: 242562428 Change-Id: I4887c0e1c8f5fddcabfe6d4281bf76d2f2eafe90
259,962
09.04.2019 11:22:28
25,200
eaac2806ffadbb3db6317e58c61b855b1350f0aa
Add TCP checksum verification.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/epsocket/epsocket.go", "new_path": "pkg/sentry/socket/epsocket/epsocket.go", "diff": "@@ -154,6 +154,7 @@ var Metrics = tcpip.Stats{\nSlowStartRetransmits: mustCreateMetric(\"/netstack/tcp/slow_start_retransmits\", \"Number of segments retransmitted in slow start mode.\"),\nFastRetransmit: mustCreateMetric(\"/netstack/tcp/fast_retransmit\", \"Number of TCP segments which were fast retransmitted.\"),\nTimeouts: mustCreateMetric(\"/netstack/tcp/timeouts\", \"Number of times RTO expired.\"),\n+ ChecksumErrors: mustCreateMetric(\"/netstack/tcp/checksum_errors\", \"Number of segments dropped due to bad checksums.\"),\n},\nUDP: tcpip.UDPStats{\nPacketsReceived: mustCreateMetric(\"/netstack/udp/packets_received\", \"Number of UDP datagrams received via HandlePacket.\"),\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/header/tcp.go", "new_path": "pkg/tcpip/header/tcp.go", "diff": "@@ -22,16 +22,17 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum\"\n)\n+// These constants are the offsets of the respective fields in the TCP header.\nconst (\n- srcPort = 0\n- dstPort = 2\n- seqNum = 4\n- ackNum = 8\n- dataOffset = 12\n- tcpFlags = 13\n- winSize = 14\n- tcpChecksum = 16\n- urgentPtr = 18\n+ TCPSrcPortOffset = 0\n+ TCPDstPortOffset = 2\n+ TCPSeqNumOffset = 4\n+ TCPAckNumOffset = 8\n+ TCPDataOffset = 12\n+ TCPFlagsOffset = 13\n+ TCPWinSizeOffset = 14\n+ TCPChecksumOffset = 16\n+ TCPUrgentPtrOffset = 18\n)\nconst (\n@@ -179,27 +180,27 @@ const (\n// SourcePort returns the \"source port\" field of the tcp header.\nfunc (b TCP) SourcePort() uint16 {\n- return binary.BigEndian.Uint16(b[srcPort:])\n+ return binary.BigEndian.Uint16(b[TCPSrcPortOffset:])\n}\n// DestinationPort returns the \"destination port\" field of the tcp header.\nfunc (b TCP) DestinationPort() uint16 {\n- return binary.BigEndian.Uint16(b[dstPort:])\n+ return binary.BigEndian.Uint16(b[TCPDstPortOffset:])\n}\n// SequenceNumber returns the \"sequence number\" field of the tcp header.\nfunc (b TCP) SequenceNumber() uint32 {\n- return binary.BigEndian.Uint32(b[seqNum:])\n+ return binary.BigEndian.Uint32(b[TCPSeqNumOffset:])\n}\n// AckNumber returns the \"ack number\" field of the tcp header.\nfunc (b TCP) AckNumber() uint32 {\n- return binary.BigEndian.Uint32(b[ackNum:])\n+ return binary.BigEndian.Uint32(b[TCPAckNumOffset:])\n}\n// DataOffset returns the \"data offset\" field of the tcp header.\nfunc (b TCP) DataOffset() uint8 {\n- return (b[dataOffset] >> 4) * 4\n+ return (b[TCPDataOffset] >> 4) * 4\n}\n// Payload returns the data in the tcp packet.\n@@ -209,32 +210,32 @@ func (b TCP) Payload() []byte {\n// Flags returns the flags field of the tcp header.\nfunc (b TCP) Flags() uint8 {\n- return b[tcpFlags]\n+ return b[TCPFlagsOffset]\n}\n// WindowSize returns the \"window size\" field of the tcp header.\nfunc (b TCP) WindowSize() uint16 {\n- return binary.BigEndian.Uint16(b[winSize:])\n+ return binary.BigEndian.Uint16(b[TCPWinSizeOffset:])\n}\n// Checksum returns the \"checksum\" field of the tcp header.\nfunc (b TCP) Checksum() uint16 {\n- return binary.BigEndian.Uint16(b[tcpChecksum:])\n+ return binary.BigEndian.Uint16(b[TCPChecksumOffset:])\n}\n// SetSourcePort sets the \"source port\" field of the tcp header.\nfunc (b TCP) SetSourcePort(port uint16) {\n- binary.BigEndian.PutUint16(b[srcPort:], port)\n+ binary.BigEndian.PutUint16(b[TCPSrcPortOffset:], port)\n}\n// SetDestinationPort sets the \"destination port\" field of the tcp header.\nfunc (b TCP) SetDestinationPort(port uint16) {\n- binary.BigEndian.PutUint16(b[dstPort:], port)\n+ binary.BigEndian.PutUint16(b[TCPDstPortOffset:], port)\n}\n// SetChecksum sets the checksum field of the tcp header.\nfunc (b TCP) SetChecksum(checksum uint16) {\n- binary.BigEndian.PutUint16(b[tcpChecksum:], checksum)\n+ binary.BigEndian.PutUint16(b[TCPChecksumOffset:], checksum)\n}\n// CalculateChecksum calculates the checksum of the tcp segment.\n@@ -258,20 +259,20 @@ func (b TCP) ParsedOptions() TCPOptions {\n}\nfunc (b TCP) encodeSubset(seq, ack uint32, flags uint8, rcvwnd uint16) {\n- binary.BigEndian.PutUint32(b[seqNum:], seq)\n- binary.BigEndian.PutUint32(b[ackNum:], ack)\n- b[tcpFlags] = flags\n- binary.BigEndian.PutUint16(b[winSize:], rcvwnd)\n+ binary.BigEndian.PutUint32(b[TCPSeqNumOffset:], seq)\n+ binary.BigEndian.PutUint32(b[TCPAckNumOffset:], ack)\n+ b[TCPFlagsOffset] = flags\n+ binary.BigEndian.PutUint16(b[TCPWinSizeOffset:], rcvwnd)\n}\n// Encode encodes all the fields of the tcp header.\nfunc (b TCP) Encode(t *TCPFields) {\nb.encodeSubset(t.SeqNum, t.AckNum, t.Flags, t.WindowSize)\n- binary.BigEndian.PutUint16(b[srcPort:], t.SrcPort)\n- binary.BigEndian.PutUint16(b[dstPort:], t.DstPort)\n- b[dataOffset] = (t.DataOffset / 4) << 4\n- binary.BigEndian.PutUint16(b[tcpChecksum:], t.Checksum)\n- binary.BigEndian.PutUint16(b[urgentPtr:], t.UrgentPointer)\n+ binary.BigEndian.PutUint16(b[TCPSrcPortOffset:], t.SrcPort)\n+ binary.BigEndian.PutUint16(b[TCPDstPortOffset:], t.DstPort)\n+ b[TCPDataOffset] = (t.DataOffset / 4) << 4\n+ binary.BigEndian.PutUint16(b[TCPChecksumOffset:], t.Checksum)\n+ binary.BigEndian.PutUint16(b[TCPUrgentPtrOffset:], t.UrgentPointer)\n}\n// EncodePartial updates a subset of the fields of the tcp header. It is useful\n@@ -290,18 +291,13 @@ func (b TCP) EncodePartial(partialChecksum, length uint16, seqnum, acknum uint32\nb.encodeSubset(seqnum, acknum, flags, rcvwnd)\n// Add the contributions of the passed-in fields to the checksum.\n- checksum = Checksum(b[seqNum:seqNum+8], checksum)\n- checksum = Checksum(b[winSize:winSize+2], checksum)\n+ checksum = Checksum(b[TCPSeqNumOffset:TCPSeqNumOffset+8], checksum)\n+ checksum = Checksum(b[TCPWinSizeOffset:TCPWinSizeOffset+2], checksum)\n// Encode the checksum.\nb.SetChecksum(^checksum)\n}\n-// TCPChecksumOffset returns offset of the checksum field.\n-func TCPChecksumOffset() uint16 {\n- return tcpChecksum\n-}\n-\n// ParseSynOptions parses the options received in a SYN segment and returns the\n// relevant ones. opts should point to the option part of the TCP Header.\nfunc ParseSynOptions(opts []byte, isAck bool) TCPSynOptions {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/fdbased/endpoint.go", "new_path": "pkg/tcpip/link/fdbased/endpoint.go", "diff": "@@ -122,13 +122,14 @@ type Options struct {\nFD int\nMTU uint32\nEthernetHeader bool\n- ChecksumOffload bool\nClosedFunc func(*tcpip.Error)\nAddress tcpip.LinkAddress\nSaveRestore bool\nDisconnectOk bool\nGSOMaxSize uint32\nPacketDispatchMode PacketDispatchMode\n+ TXChecksumOffload bool\n+ RXChecksumOffload bool\n}\n// New creates a new fd-based endpoint.\n@@ -142,8 +143,12 @@ func New(opts *Options) tcpip.LinkEndpointID {\n}\ncaps := stack.LinkEndpointCapabilities(0)\n- if opts.ChecksumOffload {\n- caps |= stack.CapabilityChecksumOffload\n+ if opts.RXChecksumOffload {\n+ caps |= stack.CapabilityRXChecksumOffload\n+ }\n+\n+ if opts.TXChecksumOffload {\n+ caps |= stack.CapabilityTXChecksumOffload\n}\nhdrSize := 0\n@@ -527,12 +532,13 @@ func (e *InjectableEndpoint) Inject(protocol tcpip.NetworkProtocolNumber, vv buf\n}\n// NewInjectable creates a new fd-based InjectableEndpoint.\n-func NewInjectable(fd int, mtu uint32) (tcpip.LinkEndpointID, *InjectableEndpoint) {\n+func NewInjectable(fd int, mtu uint32, capabilities stack.LinkEndpointCapabilities) (tcpip.LinkEndpointID, *InjectableEndpoint) {\nsyscall.SetNonblock(fd, true)\ne := &InjectableEndpoint{endpoint: endpoint{\nfd: fd,\nmtu: mtu,\n+ caps: capabilities,\n}}\nreturn stack.RegisterLinkEndpoint(e), e\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/loopback/loopback.go", "new_path": "pkg/tcpip/link/loopback/loopback.go", "diff": "@@ -56,7 +56,7 @@ func (*endpoint) MTU() uint32 {\n// Capabilities implements stack.LinkEndpoint.Capabilities. Loopback advertises\n// itself as supporting checksum offload, but in reality it's just omitted.\nfunc (*endpoint) Capabilities() stack.LinkEndpointCapabilities {\n- return stack.CapabilityChecksumOffload | stack.CapabilitySaveRestore | stack.CapabilityLoopback\n+ return stack.CapabilityRXChecksumOffload | stack.CapabilityTXChecksumOffload | stack.CapabilitySaveRestore | stack.CapabilityLoopback\n}\n// MaxHeaderLength implements stack.LinkEndpoint.MaxHeaderLength. Given that the\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/muxed/injectable.go", "new_path": "pkg/tcpip/link/muxed/injectable.go", "diff": "@@ -105,7 +105,7 @@ func (m *InjectableEndpoint) WriteRawPacket(dest tcpip.Address, packet []byte) *\n}\n// NewInjectableEndpoint creates a new multi-endpoint injectable endpoint.\n-func NewInjectableEndpoint(routes map[tcpip.Address]stack.InjectableLinkEndpoint, mtu uint32) (tcpip.LinkEndpointID, *InjectableEndpoint) {\n+func NewInjectableEndpoint(routes map[tcpip.Address]stack.InjectableLinkEndpoint) (tcpip.LinkEndpointID, *InjectableEndpoint) {\ne := &InjectableEndpoint{\nroutes: routes,\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/muxed/injectable_test.go", "new_path": "pkg/tcpip/link/muxed/injectable_test.go", "diff": "@@ -87,8 +87,8 @@ func makeTestInjectableEndpoint(t *testing.T) (*InjectableEndpoint, *os.File, tc\nif err != nil {\nt.Fatal(\"Failed to create socket pair:\", err)\n}\n- _, underlyingEndpoint := fdbased.NewInjectable(pair[1], 6500)\n+ _, underlyingEndpoint := fdbased.NewInjectable(pair[1], 6500, stack.CapabilityNone)\nroutes := map[tcpip.Address]stack.InjectableLinkEndpoint{dstIP: underlyingEndpoint}\n- _, endpoint := NewInjectableEndpoint(routes, 6500)\n+ _, endpoint := NewInjectableEndpoint(routes)\nreturn endpoint, os.NewFile(uintptr(pair[0]), \"test route end\"), dstIP\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/registration.go", "new_path": "pkg/tcpip/stack/registration.go", "diff": "@@ -232,7 +232,15 @@ type LinkEndpointCapabilities uint\n// The following are the supported link endpoint capabilities.\nconst (\n- CapabilityChecksumOffload LinkEndpointCapabilities = 1 << iota\n+ CapabilityNone LinkEndpointCapabilities = 0\n+ // CapabilityTXChecksumOffload indicates that the link endpoint supports\n+ // checksum computation for outgoing packets and the stack can skip\n+ // computing checksums when sending packets.\n+ CapabilityTXChecksumOffload LinkEndpointCapabilities = 1 << iota\n+ // CapabilityRXChecksumOffload indicates that the link endpoint supports\n+ // checksum verification on received packets and that it's safe for the\n+ // stack to skip checksum verification.\n+ CapabilityRXChecksumOffload\nCapabilityResolutionRequired\nCapabilitySaveRestore\nCapabilityDisconnectOk\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tcpip.go", "new_path": "pkg/tcpip/tcpip.go", "diff": "@@ -801,6 +801,9 @@ type TCPStats struct {\n// Timeouts is the number of times the RTO expired.\nTimeouts *StatCounter\n+\n+ // ChecksumErrors is the number of segments dropped due to bad checksums.\n+ ChecksumErrors *StatCounter\n}\n// UDPStats collects UDP-specific stats.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/connect.go", "new_path": "pkg/tcpip/transport/tcp/connect.go", "diff": "@@ -595,7 +595,7 @@ func sendTCP(r *stack.Route, id stack.TransportEndpointID, data buffer.Vectorise\n// TCP header, then the kernel calculate a checksum of the\n// header and data and get the right sum of the TCP packet.\ntcp.SetChecksum(xsum)\n- } else if r.Capabilities()&stack.CapabilityChecksumOffload == 0 {\n+ } else if r.Capabilities()&stack.CapabilityTXChecksumOffload == 0 {\nxsum = header.ChecksumVV(data, xsum)\ntcp.SetChecksum(^tcp.CalculateChecksum(xsum))\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -1447,6 +1447,13 @@ func (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, vv\nreturn\n}\n+ if !s.csumValid {\n+ e.stack.Stats().MalformedRcvdPackets.Increment()\n+ e.stack.Stats().TCP.ChecksumErrors.Increment()\n+ s.decRef()\n+ return\n+ }\n+\ne.stack.Stats().TCP.ValidSegmentsReceived.Increment()\nif (s.flags & header.TCPFlagRst) != 0 {\ne.stack.Stats().TCP.ResetsReceived.Increment()\n@@ -1721,7 +1728,7 @@ func (e *endpoint) initGSO() {\npanic(fmt.Sprintf(\"Unknown netProto: %v\", e.netProto))\n}\ngso.NeedsCsum = true\n- gso.CsumOffset = header.TCPChecksumOffset()\n+ gso.CsumOffset = header.TCPChecksumOffset\ngso.MaxSize = e.route.GSOMaxSize()\ne.gso = gso\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/forwarder.go", "new_path": "pkg/tcpip/transport/tcp/forwarder.go", "diff": "@@ -68,7 +68,7 @@ func (f *Forwarder) HandlePacket(r *stack.Route, id stack.TransportEndpointID, n\ndefer s.decRef()\n// We only care about well-formed SYN packets.\n- if !s.parse() || s.flags != header.TCPFlagSyn {\n+ if !s.parse() || !s.csumValid || s.flags != header.TCPFlagSyn {\nreturn false\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/protocol.go", "new_path": "pkg/tcpip/transport/tcp/protocol.go", "diff": "@@ -130,7 +130,7 @@ func (*protocol) HandleUnknownDestinationPacket(r *stack.Route, id stack.Transpo\ns := newSegment(r, id, vv)\ndefer s.decRef()\n- if !s.parse() {\n+ if !s.parse() || !s.csumValid {\nreturn false\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/segment.go", "new_path": "pkg/tcpip/transport/tcp/segment.go", "diff": "@@ -45,6 +45,10 @@ type segment struct {\nackNumber seqnum.Value\nflags uint8\nwindow seqnum.Size\n+ // csum is only populated for received segments.\n+ csum uint16\n+ // csumValid is true if the csum in the received segment is valid.\n+ csumValid bool\n// parsedOptions stores the parsed values from the options in the segment.\nparsedOptions header.TCPOptions\n@@ -124,7 +128,13 @@ func (s *segment) logicalLen() seqnum.Size {\n// parse populates the sequence & ack numbers, flags, and window fields of the\n// segment from the TCP header stored in the data. It then updates the view to\n-// skip the data. Returns boolean indicating if the parsing was successful.\n+// skip the header.\n+//\n+// Returns boolean indicating if the parsing was successful.\n+//\n+// If checksum verification is not offloaded then parse also verifies the\n+// TCP checksum and stores the checksum and result of checksum verification in\n+// the csum and csumValid fields of the segment.\nfunc (s *segment) parse() bool {\nh := header.TCP(s.data.First())\n@@ -145,12 +155,27 @@ func (s *segment) parse() bool {\ns.options = []byte(h[header.TCPMinimumSize:offset])\ns.parsedOptions = header.ParseTCPOptions(s.options)\n+\n+ // Query the link capabilities to decide if checksum validation is\n+ // required.\n+ verifyChecksum := true\n+ if s.route.Capabilities()&stack.CapabilityRXChecksumOffload != 0 {\n+ s.csumValid = true\n+ verifyChecksum = false\ns.data.TrimFront(offset)\n+ }\n+ if verifyChecksum {\n+ s.csum = h.Checksum()\n+ xsum := s.route.PseudoHeaderChecksum(ProtocolNumber, uint16(s.data.Size()))\n+ xsum = h.CalculateChecksum(xsum)\n+ s.data.TrimFront(offset)\n+ xsum = header.ChecksumVV(s.data, xsum)\n+ s.csumValid = xsum == 0xffff\n+ }\ns.sequenceNumber = seqnum.Value(h.SequenceNumber())\ns.ackNumber = seqnum.Value(h.AckNumber())\ns.flags = h.Flags()\ns.window = seqnum.Size(h.WindowSize())\n-\nreturn true\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_test.go", "diff": "@@ -2963,8 +2963,7 @@ func TestReceivedInvalidSegmentCountIncrement(t *testing.T) {\nRcvWnd: 30000,\n})\ntcpbuf := vv.First()[header.IPv4MinimumSize:]\n- // 12 is the TCP header data offset.\n- tcpbuf[12] = ((header.TCPMinimumSize - 1) / 4) << 4\n+ tcpbuf[header.TCPDataOffset] = ((header.TCPMinimumSize - 1) / 4) << 4\nc.SendSegment(vv)\n@@ -2973,6 +2972,32 @@ func TestReceivedInvalidSegmentCountIncrement(t *testing.T) {\n}\n}\n+func TestReceivedIncorrectChecksumIncrement(t *testing.T) {\n+ c := context.New(t, defaultMTU)\n+ defer c.Cleanup()\n+ c.CreateConnected(789, 30000, nil)\n+ stats := c.Stack().Stats()\n+ want := stats.TCP.ChecksumErrors.Value() + 1\n+ vv := c.BuildSegment([]byte{0x1, 0x2, 0x3}, &context.Headers{\n+ SrcPort: context.TestPort,\n+ DstPort: c.Port,\n+ Flags: header.TCPFlagAck,\n+ SeqNum: seqnum.Value(790),\n+ AckNum: c.IRS.Add(1),\n+ RcvWnd: 30000,\n+ })\n+ tcpbuf := vv.First()[header.IPv4MinimumSize:]\n+ // Overwrite a byte in the payload which should cause checksum\n+ // verification to fail.\n+ tcpbuf[(tcpbuf[header.TCPDataOffset]>>4)*4] = 0x4\n+\n+ c.SendSegment(vv)\n+\n+ if got := stats.TCP.ChecksumErrors.Value(); got != want {\n+ t.Errorf(\"got stats.TCP.ChecksumErrors.Value() = %d, want = %d\", got, want)\n+ }\n+}\n+\nfunc TestReceivedSegmentQueuing(t *testing.T) {\n// This test sends 200 segments containing a few bytes each to an\n// endpoint and checks that they're all received and acknowledged by\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/endpoint.go", "new_path": "pkg/tcpip/transport/udp/endpoint.go", "diff": "@@ -640,7 +640,7 @@ func sendUDP(r *stack.Route, data buffer.VectorisedView, localPort, remotePort u\n})\n// Only calculate the checksum if offloading isn't supported.\n- if r.Capabilities()&stack.CapabilityChecksumOffload == 0 {\n+ if r.Capabilities()&stack.CapabilityTXChecksumOffload == 0 {\nxsum := r.PseudoHeaderChecksum(ProtocolNumber, length)\nfor _, v := range data.Views() {\nxsum = header.Checksum(v, xsum)\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/network.go", "new_path": "runsc/boot/network.go", "diff": "@@ -142,6 +142,7 @@ func (n *Network) CreateLinksAndRoutes(args *CreateLinksAndRoutesArgs, _ *struct\nAddress: mac,\nPacketDispatchMode: fdbased.PacketMMap,\nGSOMaxSize: link.GSOMaxSize,\n+ RXChecksumOffload: true,\n})\nlog.Infof(\"Enabling interface %q with id %d on addresses %+v (%v)\", link.Name, nicID, link.Addresses, mac)\n" } ]
Go
Apache License 2.0
google/gvisor
Add TCP checksum verification. PiperOrigin-RevId: 242704699 Change-Id: I87db368ca343b3b4bf4f969b17d3aa4ce2f8bd4f
259,853
09.04.2019 11:30:35
25,200
93b3c9b76c16104cbb5cc55b6f2339cb43c356b5
runsc: set UID and GID if gofer is executed in a new user namespace Otherwise, we will not have capabilities in the user namespace. And this patch adds the noexec option for mounts.
[ { "change_type": "MODIFY", "old_path": "runsc/container/container.go", "new_path": "runsc/container/container.go", "diff": "@@ -866,8 +866,13 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bund\n// Setup any uid/gid mappings, and create or join the configured user\n// namespace so the gofer's view of the filesystem aligns with the\n// users in the sandbox.\n- nss = append(nss, specutils.FilterNS([]specs.LinuxNamespaceType{specs.UserNamespace}, spec)...)\n+ userNS := specutils.FilterNS([]specs.LinuxNamespaceType{specs.UserNamespace}, spec)\n+ nss = append(nss, userNS...)\nspecutils.SetUIDGIDMappings(cmd, spec)\n+ if len(userNS) != 0 {\n+ // We need to set UID and GID to have capabilities in a new user namespace.\n+ cmd.SysProcAttr.Credential = &syscall.Credential{Uid: 0, Gid: 0}\n+ }\n// Start the gofer in the given namespace.\nlog.Debugf(\"Starting gofer: %s %v\", binPath, args)\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/container_test.go", "new_path": "runsc/container/container_test.go", "diff": "@@ -1250,6 +1250,82 @@ func TestReadonlyRoot(t *testing.T) {\n}\n}\n+func TestUIDMap(t *testing.T) {\n+ for _, conf := range configs(noOverlay...) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+ testDir, err := ioutil.TempDir(testutil.TmpDir(), \"test-mount\")\n+ if err != nil {\n+ t.Fatal(err)\n+ }\n+ defer os.RemoveAll(testDir)\n+ testFile := path.Join(testDir, \"testfile\")\n+\n+ spec := testutil.NewSpecWithArgs(\"touch\", \"/tmp/testfile\")\n+ uid := os.Getuid()\n+ gid := os.Getgid()\n+ spec.Linux = &specs.Linux{\n+ Namespaces: []specs.LinuxNamespace{\n+ {Type: specs.UserNamespace},\n+ {Type: specs.PIDNamespace},\n+ {Type: specs.MountNamespace},\n+ },\n+ UIDMappings: []specs.LinuxIDMapping{\n+ {\n+ ContainerID: 0,\n+ HostID: uint32(uid),\n+ Size: 1,\n+ },\n+ },\n+ GIDMappings: []specs.LinuxIDMapping{\n+ {\n+ ContainerID: 0,\n+ HostID: uint32(gid),\n+ Size: 1,\n+ },\n+ },\n+ }\n+\n+ spec.Mounts = append(spec.Mounts, specs.Mount{\n+ Destination: \"/tmp\",\n+ Source: testDir,\n+ Type: \"bind\",\n+ })\n+\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ defer os.RemoveAll(bundleDir)\n+\n+ // Create, start and wait for the container.\n+ c, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer c.Destroy()\n+ if err := c.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+\n+ ws, err := c.Wait()\n+ if err != nil {\n+ t.Fatalf(\"error waiting on container: %v\", err)\n+ }\n+ if !ws.Exited() || ws.ExitStatus() != 0 {\n+ t.Fatalf(\"container failed, waitStatus: %v\", ws)\n+ }\n+ st := syscall.Stat_t{}\n+ if err := syscall.Stat(testFile, &st); err != nil {\n+ t.Fatalf(\"error stat /testfile: %v\", err)\n+ }\n+\n+ if st.Uid != uint32(uid) || st.Gid != uint32(gid) {\n+ t.Fatalf(\"UID: %d (%d) GID: %d (%d)\", st.Uid, uid, st.Gid, gid)\n+ }\n+ }\n+}\n+\nfunc TestReadonlyMount(t *testing.T) {\nfor _, conf := range configs(overlay) {\nt.Logf(\"Running test with conf: %+v\", conf)\n" } ]
Go
Apache License 2.0
google/gvisor
runsc: set UID and GID if gofer is executed in a new user namespace Otherwise, we will not have capabilities in the user namespace. And this patch adds the noexec option for mounts. https://github.com/google/gvisor/issues/145 PiperOrigin-RevId: 242706519 Change-Id: I1b78b77d6969bd18038c71616e8eb7111b71207c
259,881
09.04.2019 12:51:13
25,200
3513350de63079505a1ad142942cc50ae8e511dc
Specify /proc explicitly
[ { "change_type": "MODIFY", "old_path": "content/docs/architecture_guide/security.md", "new_path": "content/docs/architecture_guide/security.md", "diff": "@@ -37,8 +37,8 @@ might involve some combination of the following:\n1. Racing with multiple threads in order to hit specific code paths.\nFor example, for the [Dirty Cow][dirtycow] privilege escalation bug, an\n-application would open a specific file in proc or use a specific `ptrace` system\n-call, and use multiple threads in order to trigger a race condition when\n+application would open a specific file in `/proc` or use a specific `ptrace`\n+system call, and use multiple threads in order to trigger a race condition when\ntouching a fresh page of memory. The attacker then gains control over a page of\nmemory belonging to the system. With additional privileges or access to\nprivileged data in the kernel, an attacker will often be able to employ\n" } ]
Go
Apache License 2.0
google/gvisor
Specify /proc explicitly
259,859
09.04.2019 14:56:04
25,200
b3b140ea4f9e1b632463cbf83c97f58464eceeac
syscalls: sendfile: limit the count to MAX_RW_COUNT From sendfile spec and also the linux kernel code, we should limit the count arg to 'MAX_RW_COUNT'. This patch export 'MAX_RW_COUNT' in kernel pkg and use it in the implementation of sendfile syscall.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_usermem.go", "new_path": "pkg/sentry/kernel/task_usermem.go", "diff": "@@ -22,10 +22,10 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n)\n-// _MAX_RW_COUNT is the maximum size in bytes of a single read or write.\n+// MAX_RW_COUNT is the maximum size in bytes of a single read or write.\n// Reads and writes that exceed this size may be silently truncated.\n// (Linux: include/linux/fs.h:MAX_RW_COUNT)\n-var _MAX_RW_COUNT = int(usermem.Addr(math.MaxInt32).RoundDown())\n+var MAX_RW_COUNT = int(usermem.Addr(math.MaxInt32).RoundDown())\n// Activate ensures that the task has an active address space.\nfunc (t *Task) Activate() {\n@@ -187,9 +187,9 @@ func (t *Task) CopyOutIovecs(addr usermem.Addr, src usermem.AddrRangeSeq) error\n// - If any AddrRange would include addresses outside the application address\n// range, CopyInIovecs returns EFAULT.\n//\n-// - The combined length of all AddrRanges is limited to _MAX_RW_COUNT. If the\n+// - The combined length of all AddrRanges is limited to MAX_RW_COUNT. If the\n// combined length of all AddrRanges would otherwise exceed this amount, ranges\n-// beyond _MAX_RW_COUNT are silently truncated.\n+// beyond MAX_RW_COUNT are silently truncated.\n//\n// Preconditions: As for usermem.IO.CopyIn. The caller must be running on the\n// task goroutine. t's AddressSpace must be active.\n@@ -228,7 +228,7 @@ func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRange\nif numIovecs == 1 {\n// Special case to avoid allocating dst.\n- return usermem.AddrRangeSeqOf(ar).TakeFirst(_MAX_RW_COUNT), nil\n+ return usermem.AddrRangeSeqOf(ar).TakeFirst(MAX_RW_COUNT), nil\n}\ndst = append(dst, ar)\n@@ -239,11 +239,11 @@ func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRange\nreturn usermem.AddrRangeSeq{}, syserror.ENOSYS\n}\n- // Truncate to _MAX_RW_COUNT.\n+ // Truncate to MAX_RW_COUNT.\nvar total uint64\nfor i := range dst {\ndstlen := uint64(dst[i].Length())\n- if rem := uint64(_MAX_RW_COUNT) - total; rem < dstlen {\n+ if rem := uint64(MAX_RW_COUNT) - total; rem < dstlen {\ndst[i].End -= usermem.Addr(dstlen - rem)\ndstlen = rem\n}\n@@ -256,16 +256,16 @@ func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRange\n// SingleIOSequence returns a usermem.IOSequence representing [addr,\n// addr+length) in t's address space. If this contains addresses outside the\n// application address range, it returns EFAULT. If length exceeds\n-// _MAX_RW_COUNT, the range is silently truncated.\n+// MAX_RW_COUNT, the range is silently truncated.\n//\n// SingleIOSequence is analogous to Linux's\n// lib/iov_iter.c:import_single_range(). (Note that the non-vectorized read and\n// write syscalls in Linux do not use import_single_range(). However they check\n// access_ok() in fs/read_write.c:vfs_read/vfs_write, and overflowing address\n-// ranges are truncated to _MAX_RW_COUNT by fs/read_write.c:rw_verify_area().)\n+// ranges are truncated to MAX_RW_COUNT by fs/read_write.c:rw_verify_area().)\nfunc (t *Task) SingleIOSequence(addr usermem.Addr, length int, opts usermem.IOOpts) (usermem.IOSequence, error) {\n- if length > _MAX_RW_COUNT {\n- length = _MAX_RW_COUNT\n+ if length > MAX_RW_COUNT {\n+ length = MAX_RW_COUNT\n}\nar, ok := t.MemoryManager().CheckIORange(addr, int64(length))\nif !ok {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_file.go", "new_path": "pkg/sentry/syscalls/linux/sys_file.go", "diff": "@@ -2002,6 +2002,10 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc\nreturn 0, nil, syserror.EINVAL\n}\n+ if count > int64(kernel.MAX_RW_COUNT) {\n+ count = int64(kernel.MAX_RW_COUNT)\n+ }\n+\n// Get files.\noutFile := t.FDMap().GetFile(outFD)\nif outFile == nil {\n" } ]
Go
Apache License 2.0
google/gvisor
syscalls: sendfile: limit the count to MAX_RW_COUNT From sendfile spec and also the linux kernel code, we should limit the count arg to 'MAX_RW_COUNT'. This patch export 'MAX_RW_COUNT' in kernel pkg and use it in the implementation of sendfile syscall. Signed-off-by: Li Qiang <[email protected]> Change-Id: I1086fec0685587116984555abd22b07ac233fbd2 PiperOrigin-RevId: 242745831
259,881
09.04.2019 16:25:03
25,200
0e14e48b84fd8f759bb5a0f5261cdb090d1ffe90
Match multi-word State From a recent test failure: "State:\tD (disk sleep)\n" "disk sleep" does not match \w+. We need to allow spaces.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc.cc", "new_path": "test/syscalls/linux/proc.cc", "diff": "@@ -99,8 +99,11 @@ std::vector<std::string> saved_argv; // NOLINT\nvoid CompareProcessState(absl::string_view state, int pid) {\nauto status_file = ASSERT_NO_ERRNO_AND_VALUE(\nGetContents(absl::StrCat(\"/proc/\", pid, \"/status\")));\n- EXPECT_THAT(status_file, ContainsRegex(absl::StrCat(\"State:.[\", state,\n- \"]\\\\s+\\\\(\\\\w+\\\\)\")));\n+ // N.B. POSIX extended regexes don't support shorthand character classes (\\w)\n+ // inside of brackets.\n+ EXPECT_THAT(status_file,\n+ ContainsRegex(absl::StrCat(\"State:.[\", state,\n+ R\"EOL(]\\s+\\([a-zA-Z ]+\\))EOL\")));\n}\n// Run callbacks while a subprocess is running, zombied, and/or exited.\n" } ]
Go
Apache License 2.0
google/gvisor
Match multi-word State From a recent test failure: "State:\tD (disk sleep)\n" "disk sleep" does not match \w+. We need to allow spaces. PiperOrigin-RevId: 242762469 Change-Id: Ic8d05a16669412a72c1e76b498373e5b22fe64c4
259,912
10.04.2019 10:48:28
25,200
7140b1fdca1cc9c9c711955a49e6e7fc41f339d9
Fixed /proc/cpuinfo permissions This also applies these permissions to other static proc files.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/fsutil/inode.go", "new_path": "pkg/sentry/fs/fsutil/inode.go", "diff": "@@ -452,3 +452,15 @@ type InodeGenericChecker struct{}\nfunc (InodeGenericChecker) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool {\nreturn fs.ContextCanAccessFile(ctx, inode, p)\n}\n+\n+// InodeDenyWriteChecker implements fs.InodeOperations.Check which denies all\n+// write operations.\n+type InodeDenyWriteChecker struct{}\n+\n+// Check implements fs.InodeOperations.Check.\n+func (InodeDenyWriteChecker) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool {\n+ if p.Write {\n+ return false\n+ }\n+ return fs.ContextCanAccessFile(ctx, inode, p)\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/proc/inode.go", "new_path": "pkg/sentry/fs/proc/inode.go", "diff": "@@ -53,7 +53,7 @@ func (i *taskOwnedInodeOps) UnstableAttr(ctx context.Context, inode *fs.Inode) (\n//\n// +stateify savable\ntype staticFileInodeOps struct {\n- fsutil.InodeGenericChecker `state:\"nosave\"`\n+ fsutil.InodeDenyWriteChecker `state:\"nosave\"`\nfsutil.InodeNoExtendedAttributes `state:\"nosave\"`\nfsutil.InodeNoopRelease `state:\"nosave\"`\nfsutil.InodeNoopTruncate `state:\"nosave\"`\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc.cc", "new_path": "test/syscalls/linux/proc.cc", "diff": "@@ -725,6 +725,10 @@ TEST(ProcCpuinfo, RequiredFieldsArePresent) {\n}\n}\n+TEST(ProcCpuinfo, DeniesWrite) {\n+ EXPECT_THAT(open(\"/proc/cpuinfo\", O_WRONLY), SyscallFailsWithErrno(EACCES));\n+}\n+\n// Sanity checks that uptime is present.\nTEST(ProcUptime, IsPresent) {\nstd::string proc_uptime = ASSERT_NO_ERRNO_AND_VALUE(GetContents(\"/proc/uptime\"));\n" } ]
Go
Apache License 2.0
google/gvisor
Fixed /proc/cpuinfo permissions This also applies these permissions to other static proc files. Change-Id: I4167e585fed49ad271aa4e1f1260babb3239a73d PiperOrigin-RevId: 242898575
259,891
10.04.2019 12:35:43
25,200
f7aff0aaa4320505933df838cf5b551b69d5e513
Allow threads with CAP_SYS_RESOURCE to raise hard rlimits.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/fd_map_test.go", "new_path": "pkg/sentry/kernel/fd_map_test.go", "diff": "@@ -40,7 +40,7 @@ func newTestFDMap() *FDMap {\nfunc TestFDMapMany(t *testing.T) {\nfile := filetest.NewTestFile(t)\nlimitSet := limits.NewLimitSet()\n- limitSet.Set(limits.NumberOfFiles, limits.Limit{maxFD, maxFD})\n+ limitSet.Set(limits.NumberOfFiles, limits.Limit{maxFD, maxFD}, true /* privileged */)\nf := newTestFDMap()\nfor i := 0; i < maxFD; i++ {\n@@ -64,7 +64,7 @@ func TestFDMapMany(t *testing.T) {\nfunc TestFDMap(t *testing.T) {\nfile := filetest.NewTestFile(t)\nlimitSet := limits.NewLimitSet()\n- limitSet.Set(limits.NumberOfFiles, limits.Limit{1, maxFD})\n+ limitSet.Set(limits.NumberOfFiles, limits.Limit{1, maxFD}, true /* privileged */)\nf := newTestFDMap()\nif _, err := f.NewFDFrom(0, file, FDFlags{}, limitSet); err != nil {\n@@ -76,7 +76,7 @@ func TestFDMap(t *testing.T) {\n}\nlargeLimit := limits.Limit{maxFD, maxFD}\n- limitSet.Set(limits.NumberOfFiles, largeLimit)\n+ limitSet.Set(limits.NumberOfFiles, largeLimit, true /* privileged */)\nif fd, err := f.NewFDFrom(0, file, FDFlags{}, limitSet); err != nil {\nt.Fatalf(\"Adding an FD to a resized map: got %v, want nil\", err)\n@@ -117,7 +117,7 @@ func TestDescriptorFlags(t *testing.T) {\nfile := filetest.NewTestFile(t)\nf := newTestFDMap()\nlimitSet := limits.NewLimitSet()\n- limitSet.Set(limits.NumberOfFiles, limits.Limit{maxFD, maxFD})\n+ limitSet.Set(limits.NumberOfFiles, limits.Limit{maxFD, maxFD}, true /* privileged */)\norigFlags := FDFlags{CloseOnExec: true}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/limits/limits.go", "new_path": "pkg/sentry/limits/limits.go", "diff": "@@ -113,13 +113,17 @@ func (l *LimitSet) SetUnchecked(t LimitType, v Limit) {\n}\n// Set assigns value v to resource of LimitType t and returns the old value.\n-func (l *LimitSet) Set(t LimitType, v Limit) (Limit, error) {\n+// privileged should be true only when either the caller has CAP_SYS_RESOURCE\n+// or when creating limits for a new kernel.\n+func (l *LimitSet) Set(t LimitType, v Limit, privileged bool) (Limit, error) {\nl.mu.Lock()\ndefer l.mu.Unlock()\n+\n// If a limit is already set, make sure the new limit doesn't\n// exceed the previous max limit.\nif _, ok := l.data[t]; ok {\n- if l.data[t].Max < v.Max {\n+ // Unprivileged users can only lower their hard limits.\n+ if l.data[t].Max < v.Max && !privileged {\nreturn Limit{}, syscall.EPERM\n}\nif v.Cur > v.Max {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/limits/limits_test.go", "new_path": "pkg/sentry/limits/limits_test.go", "diff": "@@ -20,18 +20,24 @@ import (\n)\nfunc TestSet(t *testing.T) {\n- ls := NewLimitSet()\n- ls.Set(1, Limit{Cur: 50, Max: 50})\n- if _, err := ls.Set(1, Limit{Cur: 20, Max: 50}); err != nil {\n- t.Fatalf(\"Tried to lower Limit to valid new value: got %v, wanted nil\", err)\n- }\n- if _, err := ls.Set(1, Limit{Cur: 20, Max: 60}); err != syscall.EPERM {\n- t.Fatalf(\"Tried to raise limit.Max to invalid higher value: got %v, wanted syscall.EPERM\", err)\n+ testCases := []struct {\n+ limit Limit\n+ privileged bool\n+ expectedErr error\n+ }{\n+ {limit: Limit{Cur: 50, Max: 50}, privileged: false, expectedErr: nil},\n+ {limit: Limit{Cur: 20, Max: 50}, privileged: false, expectedErr: nil},\n+ {limit: Limit{Cur: 20, Max: 60}, privileged: false, expectedErr: syscall.EPERM},\n+ {limit: Limit{Cur: 60, Max: 50}, privileged: false, expectedErr: syscall.EINVAL},\n+ {limit: Limit{Cur: 11, Max: 10}, privileged: false, expectedErr: syscall.EINVAL},\n+ {limit: Limit{Cur: 20, Max: 60}, privileged: true, expectedErr: nil},\n}\n- if _, err := ls.Set(1, Limit{Cur: 60, Max: 50}); err != syscall.EINVAL {\n- t.Fatalf(\"Tried to raise limit.Cur to invalid higher value: got %v, wanted syscall.EINVAL\", err)\n+\n+ ls := NewLimitSet()\n+ for _, tc := range testCases {\n+ if _, err := ls.Set(1, tc.limit, tc.privileged); err != tc.expectedErr {\n+ t.Fatalf(\"Tried to set Limit to %+v and privilege %t: got %v, wanted %v\", tc.limit, tc.privileged, err, tc.expectedErr)\n}\n- if _, err := ls.Set(1, Limit{Cur: 11, Max: 10}); err != syscall.EINVAL {\n- t.Fatalf(\"Tried to set new limit with Cur > Max: got %v, wanted syscall.EINVAL\", err)\n}\n+\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/limits/linux.go", "new_path": "pkg/sentry/limits/linux.go", "diff": "@@ -95,6 +95,6 @@ func NewLinuxDistroLimitSet() (*LimitSet, error) {\n// 1,048,576 ought to be enough for anyone.\nl := ls.Get(ProcessCount)\nl.Cur = 1 << 20\n- ls.Set(ProcessCount, l)\n+ ls.Set(ProcessCount, l, true /* privileged */)\nreturn ls, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/mm_test.go", "new_path": "pkg/sentry/mm/mm_test.go", "diff": "@@ -70,7 +70,7 @@ func TestUsageASUpdates(t *testing.T) {\nfunc TestBrkDataLimitUpdates(t *testing.T) {\nlimitSet := limits.NewLimitSet()\n- limitSet.Set(limits.Data, limits.Limit{}) // zero RLIMIT_DATA\n+ limitSet.Set(limits.Data, limits.Limit{}, true /* privileged */) // zero RLIMIT_DATA\nctx := contexttest.WithLimitSet(contexttest.Context(t), limitSet)\nmm := testMemoryManager(ctx)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_rlimit.go", "new_path": "pkg/sentry/syscalls/linux/sys_rlimit.go", "diff": "@@ -106,7 +106,13 @@ func prlimit64(t *kernel.Task, resource limits.LimitType, newLim *limits.Limit)\nif _, ok := setableLimits[resource]; !ok {\nreturn limits.Limit{}, syserror.EPERM\n}\n- oldLim, err := t.ThreadGroup().Limits().Set(resource, *newLim)\n+\n+ // \"A privileged process (under Linux: one with the CAP_SYS_RESOURCE\n+ // capability in the initial user namespace) may make arbitrary changes\n+ // to either limit value.\"\n+ privileged := t.HasCapabilityIn(linux.CAP_SYS_RESOURCE, t.Kernel().RootUserNamespace())\n+\n+ oldLim, err := t.ThreadGroup().Limits().Set(resource, *newLim, privileged)\nif err != nil {\nreturn limits.Limit{}, err\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/rlimits.cc", "new_path": "test/syscalls/linux/rlimits.cc", "diff": "@@ -25,15 +25,12 @@ namespace {\nTEST(RlimitTest, SetRlimitHigher) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_RESOURCE)));\n- SKIP_IF(!IsRunningOnGvisor());\nstruct rlimit rl = {};\nEXPECT_THAT(getrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());\n- // TODO: Even with CAP_SYS_RESOURCE, gVisor does not allow\n- // setting a higher rlimit.\nrl.rlim_max++;\n- EXPECT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallFailsWithErrno(EPERM));\n+ EXPECT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());\n}\nTEST(RlimitTest, UnprivilegedSetRlimit) {\n@@ -56,6 +53,16 @@ TEST(RlimitTest, UnprivilegedSetRlimit) {\nEXPECT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallFailsWithErrno(EPERM));\n}\n+TEST(RlimitTest, SetSoftRlimitAboveHard) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_RESOURCE)));\n+\n+ struct rlimit rl = {};\n+ EXPECT_THAT(getrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());\n+\n+ rl.rlim_cur = rl.rlim_max + 1;\n+ EXPECT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallFailsWithErrno(EINVAL));\n+}\n+\n} // namespace\n} // namespace testing\n" } ]
Go
Apache License 2.0
google/gvisor
Allow threads with CAP_SYS_RESOURCE to raise hard rlimits. PiperOrigin-RevId: 242919489 Change-Id: Ie3267b3bcd8a54b54bc16a6556369a19e843376f
259,891
10.04.2019 16:33:44
25,200
c8368e477b8f2dedaadacbd6efbb455879c9b1d6
rlimits test: don't exceed nr_open. Even superuser cannot raise RLIMIT_NOFILE above /proc/sys/fs/nr_open, so start the test by lowering the limits before raising.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/rlimits.cc", "new_path": "test/syscalls/linux/rlimits.cc", "diff": "@@ -29,6 +29,12 @@ TEST(RlimitTest, SetRlimitHigher) {\nstruct rlimit rl = {};\nEXPECT_THAT(getrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());\n+ // Lower the rlimit first, as it may be equal to /proc/sys/fs/nr_open, in\n+ // which case even users with CAP_SYS_RESOURCE can't raise it.\n+ rl.rlim_cur--;\n+ rl.rlim_max--;\n+ ASSERT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());\n+\nrl.rlim_max++;\nEXPECT_THAT(setrlimit(RLIMIT_NOFILE, &rl), SyscallSucceeds());\n}\n" } ]
Go
Apache License 2.0
google/gvisor
rlimits test: don't exceed nr_open. Even superuser cannot raise RLIMIT_NOFILE above /proc/sys/fs/nr_open, so start the test by lowering the limits before raising. Change-Id: Ied6021c64178a6cb9098088a1a3384db523a226f PiperOrigin-RevId: 242965249
259,992
04.04.2019 10:45:10
25,200
644a73cb468acca68a78ae0a881c7eb5bd16e750
Added "Powered by gVisor" image to footer
[ { "change_type": "MODIFY", "old_path": "layouts/partials/footer.html", "new_path": "layouts/partials/footer.html", "diff": "<div class=\"col-6 col-sm-4 text-right text-xs-center order-sm-3\">\n{{ with $links }}\n{{ with index . \"developer\"}}\n+ <ul class=\"list-inline mb-0\">\n{{ template \"footer-links-block\" . }}\n{{ end }}\n+ <li class=\"list-inline-item mx-2\" >&nbsp</li>\n+ <li class=\"list-inline-item mx-2\" >\n+ <img src=\"/img/powered-gvisor.png\" alt=\"Powered by gVisor\">\n+ </li>\n+ </ul>\n{{ end }}\n</div>\n<div class=\"col-12 col-sm-4 text-center py-2 order-sm-2\">\n</div>\n</footer>\n{{ define \"footer-links-block\" }}\n-<ul class=\"list-inline mb-0\">\n{{ range . }}\n<li class=\"list-inline-item mx-2 h3\" data-toggle=\"tooltip\" data-placement=\"top\" title=\"{{ .name }}\">\n<a class=\"text-white\" target=\"_blank\" rel=\"noopener\" href=\"{{ .url }}\">\n</a>\n</li>\n{{ end }}\n-</ul>\n{{ end }}\n" }, { "change_type": "ADD", "old_path": "static/img/powered-gvisor.png", "new_path": "static/img/powered-gvisor.png", "diff": "Binary files /dev/null and b/static/img/powered-gvisor.png differ\n" } ]
Go
Apache License 2.0
google/gvisor
Added "Powered by gVisor" image to footer
259,858
11.04.2019 13:35:34
25,200
fab6352ac8de0b2c1e1e01563602912b69bdb249
README: add build badge
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -36,6 +36,8 @@ be found at [gvisor.dev][gvisor-dev].\n## Installing from source\n+[![Status](https://storage.googleapis.com/gvisor-build-badges/build.svg)](https://storage.googleapis.com/gvisor-build-badges/build.html)\n+\ngVisor currently requires x86\\_64 Linux to build, though support for other\narchitectures may become available in the future.\n" } ]
Go
Apache License 2.0
google/gvisor
README: add build badge Change-Id: Ie6b73ac729c8c85b1229e09da5b113be9780fa95 PiperOrigin-RevId: 243131814
259,858
11.04.2019 14:17:03
25,200
efacb8d900cd7b5ca53e5a96b65ba78a368efc02
CONTRIBUTING: add style guide pointer
[ { "change_type": "MODIFY", "old_path": "CONTRIBUTING.md", "new_path": "CONTRIBUTING.md", "diff": "@@ -32,7 +32,9 @@ directory tree.\n### Coding Guidelines\n-All code should conform to the [Go style guidelines][gostyle].\n+All Go code should conform to the [Go style guidelines][gostyle]. C++ code\n+should conform to the [Google C++ Style Guide][cppstyle] and the guidelines\n+described for [tests][teststyle].\nAs a secure runtime, we need to maintain the safety of all of code included in\ngVisor. The following rules help mitigate issues.\n@@ -130,8 +132,10 @@ Contributions made by corporations are covered by a different agreement than the\none above, the\n[Software Grant and Corporate Contributor License Agreement][gccla].\n+[cppstyle]: https://google.github.io/styleguide/cppguide.html\n[gcla]: https://cla.developers.google.com/about/google-individual\n[gccla]: https://cla.developers.google.com/about/google-corporate\n[gerrit]: https://gvisor-review.googlesource.com\n[gostyle]: https://github.com/golang/go/wiki/CodeReviewComments\n[repo]: https://gvisor.googlesource.com\n+[teststyle]: ./test/\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/README.md", "new_path": "test/syscalls/README.md", "diff": "@@ -92,19 +92,16 @@ fixtures.\nA test utility should be created when there is more than one test that requires\nthat same functionality, otherwise the class should be test local.\n-\n## Save/Restore support in tests\n+\ngVisor supports save/restore, and our syscall tests are written in a way to\nenable saving/restoring at certain points. Hence, there are calls to\n`MaybeSave`, and certain tests that should not trigger saves are named with\n`NoSave`.\nHowever, the current open-source test runner does not yet support triggering\n-save/restore, so these functions and annotations have no effect on the\n-open-source tests.\n-\n-We plan on extending our open-source test runner to trigger save/restore. Until\n-then, these functions and annotations should be ignored.\n-\n+save/restore, so these functions and annotations have no effect on the tests. We\n+plan on extending the test runner to trigger save/restore. Until then, these\n+functions and annotations should be ignored.\n[googletest]: https://github.com/abseil/googletest\n" } ]
Go
Apache License 2.0
google/gvisor
CONTRIBUTING: add style guide pointer Change-Id: I93a78a6b2bb2eaa69046c6cfecee2e4cfcf20e44 PiperOrigin-RevId: 243140359
259,858
11.04.2019 18:54:14
25,200
4e9c131c589d02cb3360d8cd43449e5a58a8e882
Use HUGO_ENV="production" to avoid noindex header
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -43,7 +43,7 @@ $(GO_TARGET): public $(GO_SOURCE)\ncd cmd/gvisor-website && find . -name \"*.go\" -exec cp --parents \\{\\} ../../public \\;\npublic/static: node_modules config.toml $(shell find archetypes assets content themes -type f | sed 's/ /\\\\ /g')\n- $(HUGO)\n+ HUGO_ENV=\"production\" $(HUGO)\nnode_modules: package.json package-lock.json\n# Use npm ci because npm install will update the package-lock.json.\n" } ]
Go
Apache License 2.0
google/gvisor
Use HUGO_ENV="production" to avoid noindex header
259,944
12.04.2019 14:55:11
25,200
14f1de4a45daa75ef016fabb56d86cbd9b902504
Also push the shim v2 binary to the GCS bucket.
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -7,7 +7,9 @@ SOURCES=$(shell find cmd/ pkg/ vendor/ -name '*.go')\nDEPLOY_PATH=cri-containerd-staging/gvisor-containerd-shim\nVERSION=$(shell git rev-parse HEAD)\n-all: bin/gvisor-containerd-shim bin/containerd-shim-runsc-v1\n+all: binaries\n+\n+binaries: bin/gvisor-containerd-shim bin/containerd-shim-runsc-v1\nbin/gvisor-containerd-shim: $(SOURCES)\nCGO_ENABLED=0 go build ${GO_BUILD_FLAGS} -o bin/gvisor-containerd-shim ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/gvisor-containerd-shim\n@@ -27,6 +29,7 @@ uninstall:\nclean:\nrm -rf bin/*\n-push: bin/gvisor-containerd-shim\n+push: binaries\ngsutil cp ./bin/gvisor-containerd-shim gs://$(DEPLOY_PATH)/gvisor-containerd-shim-$(VERSION)\n- echo \"gvisor-containerd-shim-$(VERSION)\" | gsutil cp - \"gs://$(DEPLOY_PATH)/latest\"\n+ gsutil cp ./bin/containerd-shim-runsc-v1 gs://$(DEPLOY_PATH)/containerd-shim-runsc-v1-$(VERSION)\n+ echo \"$(VERSION)\" | gsutil cp - \"gs://$(DEPLOY_PATH)/latest\"\n" } ]
Go
Apache License 2.0
google/gvisor
Also push the shim v2 binary to the GCS bucket. (#23) Signed-off-by: Lantao Liu <[email protected]>
259,884
14.04.2019 18:51:22
14,400
676d4dc5a31ac9e2edec8cd1ee051f61606c66a7
Add troubleshooting section to README (fixes
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -55,10 +55,24 @@ If you have Go installed you can run a local version of the website via the\n`public/` directory.\n```\n-{\ncd public/\ngo run main.go\n-}\n```\nAccess the site at http://localhost:8080\n+\n+## Troubleshooting\n+\n+#### I get errors when building the website.\n+\n+If you get the following errors you should check that you have the \"extended\"\n+version of Hugo. This is the version of hugo named \"hugo\\_extended\" on the\n+[releases page](https://github.com/gohugoio/hugo/releases).\n+\n+```\n+ERROR 2019/04/03 11:25:58 Failed to add template \"partials/navbar.html\" in path \"/home/me/gvisor-website/layouts/partials/navbar.html\": template: partials/navbar.html:5: function \"resources\" not defined\n+ERROR 2019/04/03 11:25:58 partials/navbar.html : template: partials/navbar.html:5: function \"resources\" not defined\n+ERROR 2019/04/03 11:25:58 Unable to locate template for shortcode \"readfile\" in page \"docs/user_guide/docker.md\"\n+ERROR 2019/04/03 11:25:58 Unable to locate template for shortcode \"readfile\" in page \"docs/user_guide/oci.md\"\n+ERROR 2019/04/03 11:25:58 Unable to locate template for shortcode \"blocks\" in page \"_index.html\"\n+```\n" } ]
Go
Apache License 2.0
google/gvisor
Add troubleshooting section to README (fixes #28)
259,884
14.04.2019 18:43:24
14,400
0e00a7d2da990ef8bd846c19c9a286539ad819a3
Add some basic questions to the FAQ Adds some basic questions and answers to the FAQ about supported CPU architectures and binary formats. Adds a variable and shortcode for the minimium required Linux version.
[ { "change_type": "MODIFY", "old_path": "config.toml", "new_path": "config.toml", "diff": "@@ -137,3 +137,6 @@ no = 'Sorry to hear that. Please <a href=\"https://github.com/USERNAME/REPOSITORY\nurl = \"https://groups.google.com/forum/#!forum/gvisor-dev\"\nicon = \"fa fa-envelope\"\ndesc = \"Get info on the development of gVisor\"\n+\n+[params.vars]\n+required_linux = \"3.17+\"\n" }, { "change_type": "MODIFY", "old_path": "content/docs/includes/install_gvisor.md", "new_path": "content/docs/includes/install_gvisor.md", "diff": "-> Note: gVisor supports only x86\\_64 and requires Linux 3.17+.\n-\nThe easiest way to get `runsc` is from the [latest nightly\nbuild][latest-nightly]. After you download the binary, check it against the\nSHA512 [checksum file][latest-hash].\n" }, { "change_type": "MODIFY", "old_path": "content/docs/user_guide/docker.md", "new_path": "content/docs/user_guide/docker.md", "diff": "@@ -7,6 +7,8 @@ gVisor.\n## Install gVisor\n+> Note: gVisor supports only x86\\_64 and requires Linux {{< required_linux >}}.\n+\n{{% readfile file=\"docs/includes/install_gvisor.md\" markdown=\"true\" %}}\n## Configuring Docker\n" }, { "change_type": "MODIFY", "old_path": "content/docs/user_guide/oci.md", "new_path": "content/docs/user_guide/oci.md", "diff": "@@ -7,6 +7,8 @@ container using the runtime directly with the default platform.\n## Install gVisor\n+> Note: gVisor supports only x86\\_64 and requires Linux {{< required_linux >}}.\n+\n{{% readfile file=\"docs/includes/install_gvisor.md\" markdown=\"true\" %}}\n## Run an OCI compatible container\n" }, { "change_type": "ADD", "old_path": null, "new_path": "layouts/shortcodes/required_linux.html", "diff": "+{{ .Site.Params.vars.required_linux }}\n" } ]
Go
Apache License 2.0
google/gvisor
Add some basic questions to the FAQ - Adds some basic questions and answers to the FAQ about supported CPU architectures and binary formats. - Adds a variable and shortcode for the minimium required Linux version.
259,884
12.04.2019 23:19:26
14,400
80036845c18e1e41e3610cf2ff4a25345f444070
Install gVisor using a subshell. If the user copy-and-pastes the commands to install gVisor in one go, the commands should stop early if there is an error. Particularly, if the runsc sha does not match.
[ { "change_type": "MODIFY", "old_path": "content/docs/includes/install_gvisor.md", "new_path": "content/docs/includes/install_gvisor.md", "diff": "@@ -14,11 +14,14 @@ as user `nobody` to avoid unnecessary privileges. The `/usr/local/bin` directory\na good place to put the `runsc` binary.\n```bash\n+(\n+ set -e\nwget https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc\nwget https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc.sha512\nsha512sum -c runsc.sha512\nchmod a+x runsc\nsudo mv runsc /usr/local/bin\n+)\n```\n[latest-nightly]: https://storage.googleapis.com/gvisor/releases/nightly/latest/runsc\n" } ]
Go
Apache License 2.0
google/gvisor
Install gVisor using a subshell. If the user copy-and-pastes the commands to install gVisor in one go, the commands should stop early if there is an error. Particularly, if the runsc sha does not match.
259,992
17.04.2019 11:14:24
25,200
9f8c89fc7fb7c4588713eb376fa56c4c3026d43c
Return error from fdbased.New RELNOTES: n/a
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/fdbased/endpoint.go", "new_path": "pkg/tcpip/link/fdbased/endpoint.go", "diff": "@@ -136,10 +136,9 @@ type Options struct {\n//\n// Makes fd non-blocking, but does not take ownership of fd, which must remain\n// open for the lifetime of the returned endpoint.\n-func New(opts *Options) tcpip.LinkEndpointID {\n+func New(opts *Options) (tcpip.LinkEndpointID, error) {\nif err := syscall.SetNonblock(opts.FD, true); err != nil {\n- // TODO : replace panic with an error return.\n- panic(fmt.Sprintf(\"syscall.SetNonblock(%v) failed: %v\", opts.FD, err))\n+ return 0, fmt.Errorf(\"syscall.SetNonblock(%v) failed: %v\", opts.FD, err)\n}\ncaps := stack.LinkEndpointCapabilities(0)\n@@ -175,28 +174,35 @@ func New(opts *Options) tcpip.LinkEndpointID {\npacketDispatchMode: opts.PacketDispatchMode,\n}\n- if opts.GSOMaxSize != 0 && isSocketFD(opts.FD) {\n+ // For non-socket FDs we read one packet a time (e.g. TAP devices).\n+ msgsPerRecv := 1\n+ e.inboundDispatcher = e.dispatch\n+\n+ isSocket, err := isSocketFD(opts.FD)\n+ if err != nil {\n+ return 0, err\n+ }\n+ if isSocket {\n+ if opts.GSOMaxSize != 0 {\ne.caps |= stack.CapabilityGSO\ne.gsoMaxSize = opts.GSOMaxSize\n}\n- if isSocketFD(opts.FD) && e.packetDispatchMode == PacketMMap {\n+\n+ switch e.packetDispatchMode {\n+ case PacketMMap:\nif err := e.setupPacketRXRing(); err != nil {\n- // TODO: replace panic with an error return.\n- panic(fmt.Sprintf(\"e.setupPacketRXRing failed: %v\", err))\n+ return 0, fmt.Errorf(\"e.setupPacketRXRing failed: %v\", err)\n}\ne.inboundDispatcher = e.packetMMapDispatch\n- return stack.RegisterLinkEndpoint(e)\n- }\n+ return stack.RegisterLinkEndpoint(e), nil\n- // For non-socket FDs we read one packet a time (e.g. TAP devices)\n- msgsPerRecv := 1\n- e.inboundDispatcher = e.dispatch\n+ case RecvMMsg:\n// If the provided FD is a socket then we optimize packet reads by\n// using recvmmsg() instead of read() to read packets in a batch.\n- if isSocketFD(opts.FD) && e.packetDispatchMode == RecvMMsg {\ne.inboundDispatcher = e.recvMMsgDispatch\nmsgsPerRecv = MaxMsgsPerRecv\n}\n+ }\ne.views = make([][]buffer.View, msgsPerRecv)\nfor i := range e.views {\n@@ -217,16 +223,15 @@ func New(opts *Options) tcpip.LinkEndpointID {\ne.msgHdrs[i].Msg.Iovlen = uint64(iovLen)\n}\n- return stack.RegisterLinkEndpoint(e)\n+ return stack.RegisterLinkEndpoint(e), nil\n}\n-func isSocketFD(fd int) bool {\n+func isSocketFD(fd int) (bool, error) {\nvar stat syscall.Stat_t\nif err := syscall.Fstat(fd, &stat); err != nil {\n- // TODO : replace panic with an error return.\n- panic(fmt.Sprintf(\"syscall.Fstat(%v,...) failed: %v\", fd, err))\n+ return false, fmt.Errorf(\"syscall.Fstat(%v,...) failed: %v\", fd, err)\n}\n- return (stat.Mode & syscall.S_IFSOCK) == syscall.S_IFSOCK\n+ return (stat.Mode & syscall.S_IFSOCK) == syscall.S_IFSOCK, nil\n}\n// Attach launches the goroutine that reads packets from the file descriptor and\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/fdbased/endpoint_test.go", "new_path": "pkg/tcpip/link/fdbased/endpoint_test.go", "diff": "@@ -68,7 +68,11 @@ func newContext(t *testing.T, opt *Options) *context {\n}\nopt.FD = fds[1]\n- ep := stack.FindLinkEndpoint(New(opt)).(*endpoint)\n+ epID, err := New(opt)\n+ if err != nil {\n+ t.Fatalf(\"Failed to create FD endpoint: %v\", err)\n+ }\n+ ep := stack.FindLinkEndpoint(epID).(*endpoint)\nc := &context{\nt: t,\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/sample/tun_tcp_connect/main.go", "new_path": "pkg/tcpip/sample/tun_tcp_connect/main.go", "diff": "@@ -137,7 +137,10 @@ func main() {\nlog.Fatal(err)\n}\n- linkID := fdbased.New(&fdbased.Options{FD: fd, MTU: mtu})\n+ linkID, err := fdbased.New(&fdbased.Options{FD: fd, MTU: mtu})\n+ if err != nil {\n+ log.Fatal(err)\n+ }\nif err := s.CreateNIC(1, sniffer.New(linkID)); err != nil {\nlog.Fatal(err)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/sample/tun_tcp_echo/main.go", "new_path": "pkg/tcpip/sample/tun_tcp_echo/main.go", "diff": "@@ -128,12 +128,15 @@ func main() {\nlog.Fatal(err)\n}\n- linkID := fdbased.New(&fdbased.Options{\n+ linkID, err := fdbased.New(&fdbased.Options{\nFD: fd,\nMTU: mtu,\nEthernetHeader: *tap,\nAddress: tcpip.LinkAddress(maddr),\n})\n+ if err != nil {\n+ log.Fatal(err)\n+ }\nif err := s.CreateNIC(1, linkID); err != nil {\nlog.Fatal(err)\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/network.go", "new_path": "runsc/boot/network.go", "diff": "@@ -135,7 +135,7 @@ func (n *Network) CreateLinksAndRoutes(args *CreateLinksAndRoutesArgs, _ *struct\n}\nmac := tcpip.LinkAddress(generateRndMac())\n- linkEP := fdbased.New(&fdbased.Options{\n+ linkEP, err := fdbased.New(&fdbased.Options{\nFD: newFD,\nMTU: uint32(link.MTU),\nEthernetHeader: true,\n@@ -144,6 +144,9 @@ func (n *Network) CreateLinksAndRoutes(args *CreateLinksAndRoutesArgs, _ *struct\nGSOMaxSize: link.GSOMaxSize,\nRXChecksumOffload: true,\n})\n+ if err != nil {\n+ return err\n+ }\nlog.Infof(\"Enabling interface %q with id %d on addresses %+v (%v)\", link.Name, nicID, link.Addresses, mac)\nif err := n.createNICWithAddrs(nicID, link.Name, linkEP, link.Addresses, false /* loopback */); err != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
Return error from fdbased.New RELNOTES: n/a PiperOrigin-RevId: 244031742 Change-Id: Id0cdb73194018fb5979e67b58510ead19b5a2b81
259,992
16.04.2019 15:44:35
25,200
45028bdd82ca3f54d8bb0e99330b1bab6e64cb78
Update Linux version requirement for GSO
[ { "change_type": "MODIFY", "old_path": "config.toml", "new_path": "config.toml", "diff": "@@ -139,4 +139,4 @@ no = 'Sorry to hear that. Please <a href=\"https://github.com/USERNAME/REPOSITORY\ndesc = \"Get info on the development of gVisor\"\n[params.vars]\n-required_linux = \"3.17+\"\n+required_linux = \"4.14.77+\"\n" }, { "change_type": "MODIFY", "old_path": "content/docs/user_guide/FAQ.md", "new_path": "content/docs/user_guide/FAQ.md", "diff": "@@ -5,7 +5,7 @@ weight = 1000\n### What operating systems are supported?\n-gVisor requires Linux {{< required_linux >}}.\n+gVisor requires Linux {{< required_linux >}} ([older Linux][old-linux]).\n### What CPU architectures are supported?\n@@ -57,3 +57,5 @@ directories.\n### What's the security model?\nSee the [Security Model](../../architecture_guide/security/).\n+\n+[old-linux]: /docs/user_guide/networking/#gso\n" }, { "change_type": "MODIFY", "old_path": "content/docs/user_guide/docker.md", "new_path": "content/docs/user_guide/docker.md", "diff": "@@ -7,7 +7,8 @@ gVisor.\n## Install gVisor\n-> Note: gVisor supports only x86\\_64 and requires Linux {{< required_linux >}}.\n+> Note: gVisor supports only x86\\_64 and requires Linux {{< required_linux >}}\n+> ([older Linux][old-linux]).\n{{% readfile file=\"docs/includes/install_gvisor.md\" markdown=\"true\" %}}\n@@ -87,4 +88,5 @@ Next, look at the different options available for gVisor: [platform](../platform\n[network](../networking/), [filesystem](../filesystem/).\n[docker]: https://docs.docker.com/install/\n+[old-linux]: /docs/user_guide/networking/#gso\n[storage-driver]: https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-storage-driver\n" }, { "change_type": "MODIFY", "old_path": "content/docs/user_guide/networking.md", "new_path": "content/docs/user_guide/networking.md", "diff": "@@ -60,4 +60,26 @@ Add the following `runtimeArgs` to your Docker configuration\n}\n```\n+### Disable GSO {#gso}\n+\n+If your Linux is older than {{< required_linux >}}, you can disable Generic\n+Segmentation Offload (GSO) to run with a kernel that is newer than 3.17. Add the\n+`--gso=false` flag to your Docker runtime configuration (`/etc/docker/daemon.json`)\n+and restart the Docker daemon:\n+\n+> Note: Network performance, especially for large payloads, will be greatly reduced.\n+\n+```json\n+{\n+ \"runtimes\": {\n+ \"runsc\": {\n+ \"path\": \"/usr/local/bin/runsc\",\n+ \"runtimeArgs\": [\n+ \"--gso=false\"\n+ ]\n+ }\n+ }\n+}\n+```\n+\n[netstack]: https://github.com/google/netstack\n" }, { "change_type": "MODIFY", "old_path": "content/docs/user_guide/oci.md", "new_path": "content/docs/user_guide/oci.md", "diff": "@@ -7,7 +7,8 @@ container using the runtime directly with the default platform.\n## Install gVisor\n-> Note: gVisor supports only x86\\_64 and requires Linux {{< required_linux >}}.\n+> Note: gVisor supports only x86\\_64 and requires Linux {{< required_linux >}}\n+> ([older Linux][old-linux]).\n{{% readfile file=\"docs/includes/install_gvisor.md\" markdown=\"true\" %}}\n@@ -47,3 +48,4 @@ sudo runsc run hello\nNext try [running gVisor using Docker](../docker/).\n[oci]: https://opencontainers.org/\n+[old-linux]: /docs/user_guide/networking/#gso\n" } ]
Go
Apache License 2.0
google/gvisor
Update Linux version requirement for GSO
259,992
17.04.2019 12:56:23
25,200
c8cee7108f1a1b37e89961c6dd69ccab97952c86
Use FD limit and file size limit from host FD limit and file size limit is read from the host, instead of using hard-coded defaults, given that they effect the sandbox process. Also limit the direct cache to use no more than half if the available FDs.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/BUILD", "new_path": "pkg/sentry/fs/BUILD", "diff": "@@ -12,6 +12,7 @@ go_library(\n\"dentry.go\",\n\"dirent.go\",\n\"dirent_cache.go\",\n+ \"dirent_cache_limiter.go\",\n\"dirent_list.go\",\n\"dirent_state.go\",\n\"event_list.go\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/context.go", "new_path": "pkg/sentry/fs/context.go", "diff": "@@ -26,6 +26,9 @@ type contextID int\nconst (\n// CtxRoot is a Context.Value key for a Dirent.\nCtxRoot contextID = iota\n+\n+ // CtxDirentCacheLimiter is a Context.Value key for DirentCacheLimiter.\n+ CtxDirentCacheLimiter\n)\n// ContextCanAccessFile determines whether `file` can be accessed in the requested way\n@@ -100,3 +103,12 @@ func RootFromContext(ctx context.Context) *Dirent {\n}\nreturn nil\n}\n+\n+// DirentCacheLimiterFromContext returns the DirentCacheLimiter used by ctx, or\n+// nil if ctx does not have a dirent cache limiter.\n+func DirentCacheLimiterFromContext(ctx context.Context) *DirentCacheLimiter {\n+ if v := ctx.Value(CtxDirentCacheLimiter); v != nil {\n+ return v.(*DirentCacheLimiter)\n+ }\n+ return nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/dirent_cache.go", "new_path": "pkg/sentry/fs/dirent_cache.go", "diff": "@@ -32,6 +32,10 @@ type DirentCache struct {\n// when cache is nil.\nmaxSize uint64\n+ // limit restricts the number of entries in the cache amoung multiple caches.\n+ // It may be nil if there are no global limit for this cache.\n+ limit *DirentCacheLimiter\n+\n// mu protects currentSize and direntList.\nmu sync.Mutex `state:\"nosave\"`\n@@ -45,8 +49,7 @@ type DirentCache struct {\nlist direntList `state:\"zerovalue\"`\n}\n-// NewDirentCache returns a new DirentCache with the given maxSize. If maxSize\n-// is 0, nil is returned.\n+// NewDirentCache returns a new DirentCache with the given maxSize.\nfunc NewDirentCache(maxSize uint64) *DirentCache {\nreturn &DirentCache{\nmaxSize: maxSize,\n@@ -71,15 +74,24 @@ func (c *DirentCache) Add(d *Dirent) {\nreturn\n}\n+ // First check against the global limit.\n+ for c.limit != nil && !c.limit.tryInc() {\n+ if c.currentSize == 0 {\n+ // If the global limit is reached, but there is nothing more to drop from\n+ // this cache, there is not much else to do.\n+ c.mu.Unlock()\n+ return\n+ }\n+ c.remove(c.list.Back())\n+ }\n+\n// d is not in cache. Add it and take a reference.\nc.list.PushFront(d)\nd.IncRef()\nc.currentSize++\n- // Remove the oldest until we are under the size limit.\n- for c.maxSize > 0 && c.currentSize > c.maxSize {\n- c.remove(c.list.Back())\n- }\n+ c.maybeShrink()\n+\nc.mu.Unlock()\n}\n@@ -92,6 +104,9 @@ func (c *DirentCache) remove(d *Dirent) {\nd.SetNext(nil)\nd.DecRef()\nc.currentSize--\n+ if c.limit != nil {\n+ c.limit.dec()\n+ }\n}\n// Remove removes the element from the cache and decrements its refCount. It\n@@ -142,3 +157,19 @@ func (c *DirentCache) Invalidate() {\n}\nc.mu.Unlock()\n}\n+\n+// setMaxSize sets cache max size. If current size is larger than max size, the\n+// cache shrinks to acommodate the new max.\n+func (c *DirentCache) setMaxSize(max uint64) {\n+ c.mu.Lock()\n+ c.maxSize = max\n+ c.maybeShrink()\n+ c.mu.Unlock()\n+}\n+\n+// shrink removes the oldest element until the list is under the size limit.\n+func (c *DirentCache) maybeShrink() {\n+ for c.maxSize > 0 && c.currentSize > c.maxSize {\n+ c.remove(c.list.Back())\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/fs/dirent_cache_limiter.go", "diff": "+// Copyright 2018 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package fs\n+\n+import (\n+ \"fmt\"\n+ \"sync\"\n+)\n+\n+// DirentCacheLimiter acts as a global limit for all dirent caches in the\n+// process.\n+//\n+// +stateify savable\n+type DirentCacheLimiter struct {\n+ mu sync.Mutex `state:\"nosave\"`\n+ max uint64\n+ count uint64 `state:\"zerovalue\"`\n+}\n+\n+// NewDirentCacheLimiter creates a new DirentCacheLimiter.\n+func NewDirentCacheLimiter(max uint64) *DirentCacheLimiter {\n+ return &DirentCacheLimiter{max: max}\n+}\n+\n+func (d *DirentCacheLimiter) tryInc() bool {\n+ d.mu.Lock()\n+ if d.count >= d.max {\n+ d.mu.Unlock()\n+ return false\n+ }\n+ d.count++\n+ d.mu.Unlock()\n+ return true\n+}\n+\n+func (d *DirentCacheLimiter) dec() {\n+ d.mu.Lock()\n+ if d.count == 0 {\n+ panic(fmt.Sprintf(\"underflowing DirentCacheLimiter count: %+v\", d))\n+ }\n+ d.count--\n+ d.mu.Unlock()\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/dirent_cache_test.go", "new_path": "pkg/sentry/fs/dirent_cache_test.go", "diff": "@@ -120,6 +120,96 @@ func TestDirentCache(t *testing.T) {\n}\n}\n+func TestDirentCacheLimiter(t *testing.T) {\n+ const (\n+ globalMaxSize = 5\n+ maxSize = 3\n+ )\n+\n+ limit := NewDirentCacheLimiter(globalMaxSize)\n+ c1 := NewDirentCache(maxSize)\n+ c1.limit = limit\n+ c2 := NewDirentCache(maxSize)\n+ c2.limit = limit\n+\n+ // Create a Dirent d.\n+ d := NewNegativeDirent(\"\")\n+\n+ // Add d to the cache.\n+ c1.Add(d)\n+ if got, want := c1.Size(), uint64(1); got != want {\n+ t.Errorf(\"c1.Size() got %v, want %v\", got, want)\n+ }\n+\n+ // Add maxSize-1 more elements. d should be oldest element.\n+ for i := 0; i < maxSize-1; i++ {\n+ c1.Add(NewNegativeDirent(\"\"))\n+ }\n+ if got, want := c1.Size(), uint64(maxSize); got != want {\n+ t.Errorf(\"c1.Size() got %v, want %v\", got, want)\n+ }\n+\n+ // Check that d is still there.\n+ if got, want := c1.contains(d), true; got != want {\n+ t.Errorf(\"c1.contains(d) got %v want %v\", got, want)\n+ }\n+\n+ // Fill up the other cache, it will start dropping old entries from the cache\n+ // when the global limit is reached.\n+ for i := 0; i < maxSize; i++ {\n+ c2.Add(NewNegativeDirent(\"\"))\n+ }\n+\n+ // Check is what's remaining from global max.\n+ if got, want := c2.Size(), globalMaxSize-maxSize; int(got) != want {\n+ t.Errorf(\"c2.Size() got %v, want %v\", got, want)\n+ }\n+\n+ // Check that d was not dropped.\n+ if got, want := c1.contains(d), true; got != want {\n+ t.Errorf(\"c1.contains(d) got %v want %v\", got, want)\n+ }\n+\n+ // Add an entry that will eventually be dropped. Check is done later...\n+ drop := NewNegativeDirent(\"\")\n+ c1.Add(drop)\n+\n+ // Check that d is bumped to front even when global limit is reached.\n+ c1.Add(d)\n+ if got, want := c1.contains(d), true; got != want {\n+ t.Errorf(\"c1.contains(d) got %v want %v\", got, want)\n+ }\n+\n+ // Add 2 more element and check that:\n+ // - d is still in the list: to verify that d was bumped\n+ // - d2/d3 are in the list: older entries are dropped when global limit is\n+ // reached.\n+ // - drop is not in the list: indeed older elements are dropped.\n+ d2 := NewNegativeDirent(\"\")\n+ c1.Add(d2)\n+ d3 := NewNegativeDirent(\"\")\n+ c1.Add(d3)\n+ if got, want := c1.contains(d), true; got != want {\n+ t.Errorf(\"c1.contains(d) got %v want %v\", got, want)\n+ }\n+ if got, want := c1.contains(d2), true; got != want {\n+ t.Errorf(\"c1.contains(d2) got %v want %v\", got, want)\n+ }\n+ if got, want := c1.contains(d3), true; got != want {\n+ t.Errorf(\"c1.contains(d3) got %v want %v\", got, want)\n+ }\n+ if got, want := c1.contains(drop), false; got != want {\n+ t.Errorf(\"c1.contains(drop) got %v want %v\", got, want)\n+ }\n+\n+ // Drop all entries from one cache. The other will be allowed to grow.\n+ c1.Invalidate()\n+ c2.Add(NewNegativeDirent(\"\"))\n+ if got, want := c2.Size(), uint64(maxSize); got != want {\n+ t.Errorf(\"c2.Size() got %v, want %v\", got, want)\n+ }\n+}\n+\n// TestNilDirentCache tests that a nil cache supports all cache operations, but\n// treats them as noop.\nfunc TestNilDirentCache(t *testing.T) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/gofer/session.go", "new_path": "pkg/sentry/fs/gofer/session.go", "diff": "@@ -28,6 +28,10 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/unet\"\n)\n+// DefaultDirentCacheSize is the default dirent cache size for 9P mounts. It can\n+// be adjusted independentely from the other dirent caches.\n+var DefaultDirentCacheSize uint64 = fs.DefaultDirentCacheSize\n+\n// +stateify savable\ntype endpointMaps struct {\n// mu protexts the direntMap, the keyMap, and the pathMap below.\n@@ -249,6 +253,11 @@ func Root(ctx context.Context, dev string, filesystem fs.Filesystem, superBlockF\n// Construct the MountSource with the session and superBlockFlags.\nm := fs.NewMountSource(s, filesystem, superBlockFlags)\n+ // Given that gofer files can consume host FDs, restrict the number\n+ // of files that can be held by the cache.\n+ m.SetDirentCacheMaxSize(DefaultDirentCacheSize)\n+ m.SetDirentCacheLimiter(fs.DirentCacheLimiterFromContext(ctx))\n+\n// Send the Tversion request.\ns.client, err = p9.NewClient(conn, s.msize, s.version)\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/mount.go", "new_path": "pkg/sentry/fs/mount.go", "diff": "@@ -151,9 +151,9 @@ type MountSource struct {\nchildren map[*MountSource]struct{}\n}\n-// defaultDirentCacheSize is the number of Dirents that the VFS can hold an extra\n-// reference on.\n-const defaultDirentCacheSize uint64 = 1000\n+// DefaultDirentCacheSize is the number of Dirents that the VFS can hold an\n+// extra reference on.\n+const DefaultDirentCacheSize uint64 = 1000\n// NewMountSource returns a new MountSource. Filesystem may be nil if there is no\n// filesystem backing the mount.\n@@ -162,7 +162,7 @@ func NewMountSource(mops MountSourceOperations, filesystem Filesystem, flags Mou\nMountSourceOperations: mops,\nFlags: flags,\nFilesystem: filesystem,\n- fscache: NewDirentCache(defaultDirentCacheSize),\n+ fscache: NewDirentCache(DefaultDirentCacheSize),\nchildren: make(map[*MountSource]struct{}),\n}\n}\n@@ -246,6 +246,18 @@ func (msrc *MountSource) FlushDirentRefs() {\nmsrc.fscache.Invalidate()\n}\n+// SetDirentCacheMaxSize sets the max size to the dirent cache associated with\n+// this mount source.\n+func (msrc *MountSource) SetDirentCacheMaxSize(max uint64) {\n+ msrc.fscache.setMaxSize(max)\n+}\n+\n+// SetDirentCacheLimiter sets the limiter objcet to the dirent cache associated\n+// with this mount source.\n+func (msrc *MountSource) SetDirentCacheLimiter(l *DirentCacheLimiter) {\n+ msrc.fscache.limit = l\n+}\n+\n// NewCachingMountSource returns a generic mount that will cache dirents\n// aggressively.\nfunc NewCachingMountSource(filesystem Filesystem, flags MountSourceFlags) *MountSource {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/mount_overlay.go", "new_path": "pkg/sentry/fs/mount_overlay.go", "diff": "@@ -31,10 +31,19 @@ type overlayMountSourceOperations struct {\nfunc newOverlayMountSource(upper, lower *MountSource, flags MountSourceFlags) *MountSource {\nupper.IncRef()\nlower.IncRef()\n- return NewMountSource(&overlayMountSourceOperations{\n+ msrc := NewMountSource(&overlayMountSourceOperations{\nupper: upper,\nlower: lower,\n}, &overlayFilesystem{}, flags)\n+\n+ // Use the minimum number to keep resource usage under limits.\n+ size := lower.fscache.maxSize\n+ if size > upper.fscache.maxSize {\n+ size = upper.fscache.maxSize\n+ }\n+ msrc.fscache.setMaxSize(size)\n+\n+ return msrc\n}\n// Revalidate implements MountSourceOperations.Revalidate for an overlay by\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/kernel.go", "new_path": "pkg/sentry/kernel/kernel.go", "diff": "@@ -188,6 +188,11 @@ type Kernel struct {\n// deviceRegistry is used to save/restore device.SimpleDevices.\ndeviceRegistry struct{} `state:\".(*device.Registry)\"`\n+\n+ // DirentCacheLimiter controls the number of total dirent entries can be in\n+ // caches. Not all caches use it, only the caches that use host resources use\n+ // the limiter. It may be nil if disabled.\n+ DirentCacheLimiter *fs.DirentCacheLimiter\n}\n// InitKernelArgs holds arguments to Init.\n@@ -626,6 +631,8 @@ func (ctx *createProcessContext) Value(key interface{}) interface{} {\nreturn ctx.k.mounts.Root()\n}\nreturn nil\n+ case fs.CtxDirentCacheLimiter:\n+ return ctx.k.DirentCacheLimiter\ncase ktime.CtxRealtimeClock:\nreturn ctx.k.RealtimeClock()\ncase limits.CtxLimits:\n@@ -1170,6 +1177,8 @@ func (ctx supervisorContext) Value(key interface{}) interface{} {\nreturn auth.NewRootCredentials(ctx.k.rootUserNamespace)\ncase fs.CtxRoot:\nreturn ctx.k.mounts.Root()\n+ case fs.CtxDirentCacheLimiter:\n+ return ctx.k.DirentCacheLimiter\ncase ktime.CtxRealtimeClock:\nreturn ctx.k.RealtimeClock()\ncase limits.CtxLimits:\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task.go", "new_path": "pkg/sentry/kernel/task.go", "diff": "@@ -601,6 +601,8 @@ func (t *Task) Value(key interface{}) interface{} {\nreturn int32(t.ThreadGroup().ID())\ncase fs.CtxRoot:\nreturn t.fsc.RootDirectory()\n+ case fs.CtxDirentCacheLimiter:\n+ return t.k.DirentCacheLimiter\ncase inet.CtxStack:\nreturn t.NetworkContext()\ncase ktime.CtxRealtimeClock:\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/fs.go", "new_path": "runsc/boot/fs.go", "diff": "@@ -20,10 +20,10 @@ import (\n\"path/filepath\"\n\"strconv\"\n\"strings\"\n+ \"syscall\"\n// Include filesystem types that OCI spec might mount.\n_ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/dev\"\n- _ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/gofer\"\n_ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/host\"\n_ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc\"\n_ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/sys\"\n@@ -38,6 +38,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/gofer\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs\"\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n@@ -81,6 +82,22 @@ func (f *fdDispenser) empty() bool {\nreturn len(f.fds) == 0\n}\n+func adjustDirentCache(k *kernel.Kernel) error {\n+ var hl syscall.Rlimit\n+ if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &hl); err != nil {\n+ return fmt.Errorf(\"getting RLIMIT_NOFILE: %v\", err)\n+ }\n+ if int64(hl.Cur) != syscall.RLIM_INFINITY {\n+ newSize := hl.Cur / 2\n+ if newSize < gofer.DefaultDirentCacheSize {\n+ log.Infof(\"Setting gofer dirent cache size to %d\", newSize)\n+ gofer.DefaultDirentCacheSize = newSize\n+ k.DirentCacheLimiter = fs.NewDirentCacheLimiter(newSize)\n+ }\n+ }\n+ return nil\n+}\n+\n// setupRootContainerFS creates a mount namespace containing the root filesystem\n// and all mounts. 'rootCtx' is used to walk directories to find mount points.\n// 'setMountNS' is called after namespace is created. It must set the mount NS\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/limits.go", "new_path": "runsc/boot/limits.go", "diff": "@@ -16,8 +16,11 @@ package boot\nimport (\n\"fmt\"\n+ \"sync\"\n+ \"syscall\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/limits\"\n)\n@@ -41,10 +44,43 @@ var fromLinuxResource = map[string]limits.LimitType{\n\"RLIMIT_STACK\": limits.Stack,\n}\n-func createLimitSet(spec *specs.Spec) (*limits.LimitSet, error) {\n+func findName(lt limits.LimitType) string {\n+ for k, v := range fromLinuxResource {\n+ if v == lt {\n+ return k\n+ }\n+ }\n+ return \"unknown\"\n+}\n+\n+var defaults defs\n+\n+type defs struct {\n+ mu sync.Mutex\n+ set *limits.LimitSet\n+ err error\n+}\n+\n+func (d *defs) get() (*limits.LimitSet, error) {\n+ d.mu.Lock()\n+ defer d.mu.Unlock()\n+\n+ if d.err != nil {\n+ return nil, d.err\n+ }\n+ if d.set == nil {\n+ if err := d.initDefaults(); err != nil {\n+ d.err = err\n+ return nil, err\n+ }\n+ }\n+ return d.set, nil\n+}\n+\n+func (d *defs) initDefaults() error {\nls, err := limits.NewLinuxLimitSet()\nif err != nil {\n- return nil, err\n+ return err\n}\n// Set default limits based on what containers get by default, ex:\n@@ -66,6 +102,43 @@ func createLimitSet(spec *specs.Spec) (*limits.LimitSet, error) {\nls.SetUnchecked(limits.SignalsPending, limits.Limit{Cur: 0, Max: 0})\nls.SetUnchecked(limits.Stack, limits.Limit{Cur: 8388608, Max: limits.Infinity})\n+ // Read host limits that directly affect the sandbox and adjust the defaults\n+ // based on them.\n+ for _, res := range []int{syscall.RLIMIT_FSIZE, syscall.RLIMIT_NOFILE} {\n+ var hl syscall.Rlimit\n+ if err := syscall.Getrlimit(res, &hl); err != nil {\n+ return err\n+ }\n+\n+ lt, ok := limits.FromLinuxResource[res]\n+ if !ok {\n+ return fmt.Errorf(\"unknown rlimit type %v\", res)\n+ }\n+ hostLimit := limits.Limit{\n+ Cur: limits.FromLinux(hl.Cur),\n+ Max: limits.FromLinux(hl.Max),\n+ }\n+\n+ defaultLimit := ls.Get(lt)\n+ if hostLimit.Cur != limits.Infinity && hostLimit.Cur < defaultLimit.Cur {\n+ log.Warningf(\"Host limit is lower than recommended, resource: %q, host: %d, recommended: %d\", findName(lt), hostLimit.Cur, defaultLimit.Cur)\n+ }\n+ if hostLimit.Cur != defaultLimit.Cur || hostLimit.Max != defaultLimit.Max {\n+ log.Infof(\"Setting limit from host, resource: %q {soft: %d, hard: %d}\", findName(lt), hostLimit.Cur, hostLimit.Max)\n+ ls.SetUnchecked(lt, hostLimit)\n+ }\n+ }\n+\n+ d.set = ls\n+ return nil\n+}\n+\n+func createLimitSet(spec *specs.Spec) (*limits.LimitSet, error) {\n+ ls, err := defaults.get()\n+ if err != nil {\n+ return nil, err\n+ }\n+\n// Then apply overwrites on top of defaults.\nfor _, rl := range spec.Process.Rlimits {\nlt, ok := fromLinuxResource[rl.Type]\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -274,6 +274,10 @@ func New(args Args) (*Loader, error) {\nreturn nil, fmt.Errorf(\"initializing kernel: %v\", err)\n}\n+ if err := adjustDirentCache(k); err != nil {\n+ return nil, err\n+ }\n+\n// Turn on packet logging if enabled.\nif args.Conf.LogPackets {\nlog.Infof(\"Packet logging enabled\")\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/poll.cc", "new_path": "test/syscalls/linux/poll.cc", "diff": "@@ -255,7 +255,16 @@ TEST_F(PollTest, Nfds) {\n// Stash value of RLIMIT_NOFILES.\nstruct rlimit rlim;\nTEST_PCHECK(getrlimit(RLIMIT_NOFILE, &rlim) == 0);\n+\n+ // gVisor caps the number of FDs that epoll can use beyond RLIMIT_NOFILE.\n+ constexpr rlim_t gVisorMax = 1048576;\n+ if (rlim.rlim_cur > gVisorMax) {\n+ rlim.rlim_cur = gVisorMax;\n+ TEST_PCHECK(setrlimit(RLIMIT_NOFILE, &rlim) == 0);\n+ }\n+\nrlim_t max_fds = rlim.rlim_cur;\n+ LOG(INFO) << \"Using limit: \" << max_fds;\n// Create an eventfd. Since its value is initially zero, it is writable.\nFileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD());\n" } ]
Go
Apache License 2.0
google/gvisor
Use FD limit and file size limit from host FD limit and file size limit is read from the host, instead of using hard-coded defaults, given that they effect the sandbox process. Also limit the direct cache to use no more than half if the available FDs. PiperOrigin-RevId: 244050323 Change-Id: I787ad0fdf07c49d589e51aebfeae477324fe26e6
259,854
18.04.2019 11:50:26
25,200
133700007a8495c7d8df53801b1d34345d6c5cf8
Only emit unimplemented syscall events for unsupported values. Only emit unimplemented syscall events for setting SO_OOBINLINE and SO_LINGER when attempting to set unsupported values.
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/socket.go", "new_path": "pkg/abi/linux/socket.go", "diff": "@@ -247,6 +247,15 @@ type SockAddrUnix struct {\nPath [UnixPathMax]int8\n}\n+// Linger is struct linger, from include/linux/socket.h.\n+type Linger struct {\n+ OnOff int32\n+ Linger int32\n+}\n+\n+// SizeOfLinger is the binary size of a Linger struct.\n+const SizeOfLinger = 8\n+\n// TCPInfo is a collection of TCP statistics.\n//\n// From uapi/linux/tcp.h.\n@@ -322,8 +331,8 @@ type TCPInfo struct {\nSndBufLimited uint64\n}\n-// SizeOfTCPInfo is the binary size of a TCPInfo struct (104 bytes).\n-var SizeOfTCPInfo = binary.Size(TCPInfo{})\n+// SizeOfTCPInfo is the binary size of a TCPInfo struct.\n+const SizeOfTCPInfo = 104\n// Control message types, from linux/socket.h.\nconst (\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/epsocket/epsocket.go", "new_path": "pkg/sentry/socket/epsocket/epsocket.go", "diff": "@@ -783,10 +783,10 @@ func getSockOptSocket(t *kernel.Task, s socket.Socket, ep commonEndpoint, family\nreturn int32(v), nil\ncase linux.SO_LINGER:\n- if outLen < syscall.SizeofLinger {\n+ if outLen < linux.SizeOfLinger {\nreturn nil, syserr.ErrInvalidArgument\n}\n- return syscall.Linger{}, nil\n+ return linux.Linger{}, nil\ncase linux.SO_SNDTIMEO:\n// TODO: Linux allows shorter lengths for partial results.\n@@ -1126,6 +1126,33 @@ func setSockOptSocket(t *kernel.Task, s socket.Socket, ep commonEndpoint, name i\ns.SetRecvTimeout(v.ToNsecCapped())\nreturn nil\n+ case linux.SO_OOBINLINE:\n+ if len(optVal) < sizeOfInt32 {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ v := usermem.ByteOrder.Uint32(optVal)\n+\n+ if v == 0 {\n+ socket.SetSockOptEmitUnimplementedEvent(t, name)\n+ }\n+\n+ return syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.OutOfBandInlineOption(v)))\n+\n+ case linux.SO_LINGER:\n+ if len(optVal) < linux.SizeOfLinger {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ var v linux.Linger\n+ binary.Unmarshal(optVal[:linux.SizeOfLinger], usermem.ByteOrder, &v)\n+\n+ if v != (linux.Linger{}) {\n+ socket.SetSockOptEmitUnimplementedEvent(t, name)\n+ }\n+\n+ return nil\n+\ndefault:\nsocket.SetSockOptEmitUnimplementedEvent(t, name)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Only emit unimplemented syscall events for unsupported values. Only emit unimplemented syscall events for setting SO_OOBINLINE and SO_LINGER when attempting to set unsupported values. PiperOrigin-RevId: 244229675 Change-Id: Icc4562af8f733dd75a90404621711f01a32a9fc1
259,881
18.04.2019 15:22:47
25,200
c931c8e0829914718a729e20d7db0c2bf4e73f0b
Format struct pollfd in poll(2)/ppoll(2) I0410 15:40:38.854295 3776 x:0] [ 1] poll_test E poll(0x2b00bfb5c020 [{FD: 0x3 anon_inode:[eventfd], Events: POLLOUT, REvents: ...}], 0x1, 0x1) I0410 15:40:38.854348 3776 x:0] [ 1] poll_test X poll(0x2b00bfb5c020 [{FD: 0x3 anon_inode:[eventfd], Events: POLLOUT|POLLERR|POLLHUP, REvents: POLLOUT}], 0x1, 0x1) = 0x1 (10.765?s)
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/BUILD", "new_path": "pkg/sentry/strace/BUILD", "diff": "@@ -11,6 +11,7 @@ go_library(\n\"futex.go\",\n\"linux64.go\",\n\"open.go\",\n+ \"poll.go\",\n\"ptrace.go\",\n\"signal.go\",\n\"socket.go\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/linux64.go", "new_path": "pkg/sentry/strace/linux64.go", "diff": "@@ -24,7 +24,7 @@ var linuxAMD64 = SyscallMap{\n4: makeSyscallInfo(\"stat\", Path, Stat),\n5: makeSyscallInfo(\"fstat\", FD, Stat),\n6: makeSyscallInfo(\"lstat\", Path, Stat),\n- 7: makeSyscallInfo(\"poll\", Hex, Hex, Hex),\n+ 7: makeSyscallInfo(\"poll\", PollFDs, Hex, Hex),\n8: makeSyscallInfo(\"lseek\", Hex, Hex, Hex),\n9: makeSyscallInfo(\"mmap\", Hex, Hex, Hex, Hex, FD, Hex),\n10: makeSyscallInfo(\"mprotect\", Hex, Hex, Hex),\n@@ -288,7 +288,7 @@ var linuxAMD64 = SyscallMap{\n268: makeSyscallInfo(\"fchmodat\", FD, Path, Mode),\n269: makeSyscallInfo(\"faccessat\", FD, Path, Oct, Hex),\n270: makeSyscallInfo(\"pselect6\", Hex, Hex, Hex, Hex, Hex, Hex),\n- 271: makeSyscallInfo(\"ppoll\", Hex, Hex, Timespec, SigSet, Hex),\n+ 271: makeSyscallInfo(\"ppoll\", PollFDs, Hex, Timespec, SigSet, Hex),\n272: makeSyscallInfo(\"unshare\", CloneFlags),\n273: makeSyscallInfo(\"set_robust_list\", Hex, Hex),\n274: makeSyscallInfo(\"get_robust_list\", Hex, Hex, Hex),\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/strace/poll.go", "diff": "+// Copyright 2019 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package strace\n+\n+import (\n+ \"fmt\"\n+ \"strings\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/abi\"\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs\"\n+ slinux \"gvisor.googlesource.com/gvisor/pkg/sentry/syscalls/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n+)\n+\n+// PollEventSet is the set of poll(2) event flags.\n+var PollEventSet = abi.FlagSet{\n+ {Flag: linux.POLLIN, Name: \"POLLIN\"},\n+ {Flag: linux.POLLPRI, Name: \"POLLPRI\"},\n+ {Flag: linux.POLLOUT, Name: \"POLLOUT\"},\n+ {Flag: linux.POLLERR, Name: \"POLLERR\"},\n+ {Flag: linux.POLLHUP, Name: \"POLLHUP\"},\n+ {Flag: linux.POLLNVAL, Name: \"POLLNVAL\"},\n+ {Flag: linux.POLLRDNORM, Name: \"POLLRDNORM\"},\n+ {Flag: linux.POLLRDBAND, Name: \"POLLRDBAND\"},\n+ {Flag: linux.POLLWRNORM, Name: \"POLLWRNORM\"},\n+ {Flag: linux.POLLWRBAND, Name: \"POLLWRBAND\"},\n+ {Flag: linux.POLLMSG, Name: \"POLLMSG\"},\n+ {Flag: linux.POLLREMOVE, Name: \"POLLREMOVE\"},\n+ {Flag: linux.POLLRDHUP, Name: \"POLLRDHUP\"},\n+ {Flag: linux.POLLFREE, Name: \"POLLFREE\"},\n+ {Flag: linux.POLL_BUSY_LOOP, Name: \"POLL_BUSY_LOOP\"},\n+}\n+\n+func pollFD(t *kernel.Task, pfd *linux.PollFD, post bool) string {\n+ revents := \"...\"\n+ if post {\n+ revents = PollEventSet.Parse(uint64(pfd.REvents))\n+ }\n+ return fmt.Sprintf(\"{FD: %s, Events: %s, REvents: %s}\", fd(t, kdefs.FD(pfd.FD)), PollEventSet.Parse(uint64(pfd.Events)), revents)\n+}\n+\n+func pollFDs(t *kernel.Task, addr usermem.Addr, nfds uint, post bool) string {\n+ if addr == 0 {\n+ return \"null\"\n+ }\n+\n+ pfds, err := slinux.CopyInPollFDs(t, addr, nfds)\n+ if err != nil {\n+ return fmt.Sprintf(\"%#x (error decoding pollfds: %s)\", addr, err)\n+ }\n+\n+ s := make([]string, 0, len(pfds))\n+ for i := range pfds {\n+ s = append(s, pollFD(t, &pfds[i], post))\n+ }\n+\n+ return fmt.Sprintf(\"%#x [%s]\", addr, strings.Join(s, \", \"))\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/strace.go", "new_path": "pkg/sentry/strace/strace.go", "diff": "@@ -438,6 +438,8 @@ func (i *SyscallInfo) pre(t *kernel.Task, args arch.SyscallArguments, maximumBlo\noutput = append(output, capHeader(t, args[arg].Pointer()))\ncase CapData:\noutput = append(output, capData(t, args[arg-1].Pointer(), args[arg].Pointer()))\n+ case PollFDs:\n+ output = append(output, pollFDs(t, args[arg].Pointer(), uint(args[arg+1].Uint()), false))\ncase Oct:\noutput = append(output, \"0o\"+strconv.FormatUint(args[arg].Uint64(), 8))\ncase Hex:\n@@ -502,6 +504,8 @@ func (i *SyscallInfo) post(t *kernel.Task, args arch.SyscallArguments, rval uint\noutput[arg] = sigAction(t, args[arg].Pointer())\ncase PostCapData:\noutput[arg] = capData(t, args[arg-1].Pointer(), args[arg].Pointer())\n+ case PollFDs:\n+ output[arg] = pollFDs(t, args[arg].Pointer(), uint(args[arg+1].Uint()), true)\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/strace/syscalls.go", "new_path": "pkg/sentry/strace/syscalls.go", "diff": "@@ -202,6 +202,10 @@ const (\n// PostCapData is the data argument to capget(2)/capset(2), formatted\n// after syscall execution. The previous argument must be CapHeader.\nPostCapData\n+\n+ // PollFDs is an array of struct pollfd. The number of entries in the\n+ // array is in the next argument.\n+ PollFDs\n)\n// defaultFormat is the syscall argument format to use if the actual format is\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_poll.go", "new_path": "pkg/sentry/syscalls/linux/sys_poll.go", "diff": "@@ -155,16 +155,26 @@ func pollBlock(t *kernel.Task, pfd []linux.PollFD, timeout time.Duration) (time.\nreturn timeout, n, nil\n}\n-func doPoll(t *kernel.Task, pfdAddr usermem.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) {\n+// CopyInPollFDs copies an array of struct pollfd unless nfds exceeds the max.\n+func CopyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD, error) {\nif uint64(nfds) > t.ThreadGroup().Limits().GetCapped(limits.NumberOfFiles, fileCap) {\n- return timeout, 0, syserror.EINVAL\n+ return nil, syserror.EINVAL\n}\npfd := make([]linux.PollFD, nfds)\nif nfds > 0 {\n- if _, err := t.CopyIn(pfdAddr, &pfd); err != nil {\n- return timeout, 0, err\n+ if _, err := t.CopyIn(addr, &pfd); err != nil {\n+ return nil, err\n+ }\n+ }\n+\n+ return pfd, nil\n}\n+\n+func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) {\n+ pfd, err := CopyInPollFDs(t, addr, nfds)\n+ if err != nil {\n+ return timeout, 0, err\n}\n// Compatibility warning: Linux adds POLLHUP and POLLERR just before\n@@ -180,7 +190,7 @@ func doPoll(t *kernel.Task, pfdAddr usermem.Addr, nfds uint, timeout time.Durati\n// The poll entries are copied out regardless of whether\n// any are set or not. This aligns with the Linux behavior.\nif nfds > 0 && err == nil {\n- if _, err := t.CopyOut(pfdAddr, pfd); err != nil {\n+ if _, err := t.CopyOut(addr, pfd); err != nil {\nreturn remainingTimeout, 0, err\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Format struct pollfd in poll(2)/ppoll(2) I0410 15:40:38.854295 3776 x:0] [ 1] poll_test E poll(0x2b00bfb5c020 [{FD: 0x3 anon_inode:[eventfd], Events: POLLOUT, REvents: ...}], 0x1, 0x1) I0410 15:40:38.854348 3776 x:0] [ 1] poll_test X poll(0x2b00bfb5c020 [{FD: 0x3 anon_inode:[eventfd], Events: POLLOUT|POLLERR|POLLHUP, REvents: POLLOUT}], 0x1, 0x1) = 0x1 (10.765?s) PiperOrigin-RevId: 244269879 Change-Id: If07ba54a486fdeaaedfc0123769b78d1da862307
259,974
18.04.2019 16:20:45
25,200
f4d434c18002c96511decf8ff1ebdbede46ca6a1
Enable vDSO support on arm64.
[ { "change_type": "MODIFY", "old_path": "vdso/BUILD", "new_path": "vdso/BUILD", "diff": "@@ -12,6 +12,11 @@ config_setting(\nconstraint_values = [\"@bazel_tools//platforms:x86_64\"],\n)\n+config_setting(\n+ name = \"aarch64\",\n+ constraint_values = [\"@bazel_tools//platforms:aarch64\"],\n+)\n+\ngenrule(\nname = \"vdso\",\nsrcs = [\n@@ -21,7 +26,8 @@ genrule(\n\"seqlock.h\",\n\"syscalls.h\",\n\"vdso.cc\",\n- \"vdso.lds\",\n+ \"vdso_amd64.lds\",\n+ \"vdso_arm64.lds\",\n\"vdso_time.h\",\n\"vdso_time.cc\",\n],\n@@ -49,7 +55,13 @@ genrule(\n\"-Wl,-Bsymbolic \" +\n\"-Wl,-z,max-page-size=4096 \" +\n\"-Wl,-z,common-page-size=4096 \" +\n- \"-Wl,-T$(location vdso.lds) \" +\n+ select(\n+ {\n+ \":x86_64\": \"-Wl,-T$(location vdso_amd64.lds) \",\n+ \":aarch64\": \"-Wl,-T$(location vdso_arm64.lds) \",\n+ },\n+ no_match_error = \"Unsupported architecture\",\n+ ) +\n\"-o $(location vdso.so) \" +\n\"$(location vdso.cc) \" +\n\"$(location vdso_time.cc) \" +\n" }, { "change_type": "MODIFY", "old_path": "vdso/barrier.h", "new_path": "vdso/barrier.h", "diff": "@@ -21,11 +21,25 @@ namespace vdso {\ninline void barrier(void) { __asm__ __volatile__(\"\" ::: \"memory\"); }\n#if __x86_64__\n+\ninline void memory_barrier(void) {\n__asm__ __volatile__(\"mfence\" ::: \"memory\");\n}\ninline void read_barrier(void) { barrier(); }\ninline void write_barrier(void) { barrier(); }\n+\n+#elif __aarch64__\n+\n+inline void memory_barrier(void) {\n+ __asm__ __volatile__(\"dmb ish\" ::: \"memory\");\n+}\n+inline void read_barrier(void) {\n+ __asm__ __volatile__(\"dmb ishld\" ::: \"memory\");\n+}\n+inline void write_barrier(void) {\n+ __asm__ __volatile__(\"dmb ishst\" ::: \"memory\");\n+}\n+\n#else\n#error \"unsupported architecture\"\n#endif\n" }, { "change_type": "MODIFY", "old_path": "vdso/cycle_clock.h", "new_path": "vdso/cycle_clock.h", "diff": "@@ -33,6 +33,15 @@ static inline uint64_t cycle_clock(void) {\nasm volatile(\"rdtsc\" : \"=a\"(lo), \"=d\"(hi));\nreturn ((uint64_t)hi << 32) | lo;\n}\n+\n+#elif __aarch64__\n+\n+static inline uint64_t cycle_clock(void) {\n+ uint64_t val;\n+ asm volatile(\"mrs %0, CNTVCT_EL0\" : \"=r\"(val)::\"memory\");\n+ return val;\n+}\n+\n#else\n#error \"unsupported architecture\"\n#endif\n" }, { "change_type": "MODIFY", "old_path": "vdso/syscalls.h", "new_path": "vdso/syscalls.h", "diff": "#include <stddef.h>\n#include <sys/types.h>\n-struct getcpu_cache;\n-\nnamespace vdso {\n+#if __x86_64__\n+\n+struct getcpu_cache;\n+\nstatic inline int sys_clock_gettime(clockid_t clock, struct timespec* ts) {\nint num = __NR_clock_gettime;\nasm volatile(\"syscall\\n\"\n@@ -49,6 +51,49 @@ static inline int sys_getcpu(unsigned* cpu, unsigned* node,\nreturn num;\n}\n+#elif __aarch64__\n+\n+static inline int sys_rt_sigreturn(void) {\n+ int num = __NR_rt_sigreturn;\n+\n+ asm volatile(\n+ \"mov x8, %0\\n\"\n+ \"svc #0 \\n\"\n+ : \"+r\"(num)\n+ :\n+ :);\n+ return num;\n+}\n+\n+static inline int sys_clock_gettime(clockid_t _clkid, struct timespec *_ts) {\n+ register struct timespec *ts asm(\"x1\") = _ts;\n+ register clockid_t clkid asm(\"x0\") = _clkid;\n+ register long ret asm(\"x0\");\n+ register long nr asm(\"x8\") = __NR_clock_gettime;\n+\n+ asm volatile(\"svc #0\\n\"\n+ : \"=r\"(ret)\n+ : \"r\"(clkid), \"r\"(ts), \"r\"(nr)\n+ : \"memory\");\n+ return ret;\n+}\n+\n+static inline int sys_clock_getres(clockid_t _clkid, struct timespec *_ts) {\n+ register struct timespec *ts asm(\"x1\") = _ts;\n+ register clockid_t clkid asm(\"x0\") = _clkid;\n+ register long ret asm(\"x0\");\n+ register long nr asm(\"x8\") = __NR_clock_getres;\n+\n+ asm volatile(\"svc #0\\n\"\n+ : \"=r\"(ret)\n+ : \"r\"(clkid), \"r\"(ts), \"r\"(nr)\n+ : \"memory\");\n+ return ret;\n+}\n+\n+#else\n+#error \"unsupported architecture\"\n+#endif\n} // namespace vdso\n#endif // VDSO_SYSCALLS_H_\n" }, { "change_type": "MODIFY", "old_path": "vdso/vdso.cc", "new_path": "vdso/vdso.cc", "diff": "#include \"vdso/vdso_time.h\"\nnamespace vdso {\n+namespace {\n-// __vdso_clock_gettime() implements clock_gettime()\n-extern \"C\" int __vdso_clock_gettime(clockid_t clock, struct timespec* ts) {\n+int __common_clock_gettime(clockid_t clock, struct timespec* ts) {\nint ret;\nswitch (clock) {\n@@ -44,11 +44,8 @@ extern \"C\" int __vdso_clock_gettime(clockid_t clock, struct timespec* ts) {\nreturn ret;\n}\n-extern \"C\" int clock_gettime(clockid_t clock, struct timespec* ts)\n- __attribute__((weak, alias(\"__vdso_clock_gettime\")));\n-// __vdso_gettimeofday() implements gettimeofday()\n-extern \"C\" int __vdso_gettimeofday(struct timeval* tv, struct timezone* tz) {\n+int __common_gettimeofday(struct timeval* tv, struct timezone* tz) {\nif (tv) {\nstruct timespec ts;\nint ret = ClockRealtime(&ts);\n@@ -68,6 +65,21 @@ extern \"C\" int __vdso_gettimeofday(struct timeval* tv, struct timezone* tz) {\nreturn 0;\n}\n+} // namespace\n+\n+#if __x86_64__\n+\n+// __vdso_clock_gettime() implements clock_gettime()\n+extern \"C\" int __vdso_clock_gettime(clockid_t clock, struct timespec* ts) {\n+ return __common_clock_gettime(clock, ts);\n+}\n+extern \"C\" int clock_gettime(clockid_t clock, struct timespec* ts)\n+ __attribute__((weak, alias(\"__vdso_clock_gettime\")));\n+\n+// __vdso_gettimeofday() implements gettimeofday()\n+extern \"C\" int __vdso_gettimeofday(struct timeval* tv, struct timezone* tz) {\n+ return __common_gettimeofday(tv, tz);\n+}\nextern \"C\" int gettimeofday(struct timeval* tv, struct timezone* tz)\n__attribute__((weak, alias(\"__vdso_gettimeofday\")));\n@@ -92,4 +104,45 @@ extern \"C\" long getcpu(unsigned* cpu, unsigned* node,\nstruct getcpu_cache* cache)\n__attribute__((weak, alias(\"__vdso_getcpu\")));\n+#elif __aarch64__\n+\n+// __kernel_clock_gettime() implements clock_gettime()\n+extern \"C\" int __kernel_clock_gettime(clockid_t clock, struct timespec* ts) {\n+ return __common_clock_gettime(clock, ts);\n+}\n+\n+// __kernel_gettimeofday() implements gettimeofday()\n+extern \"C\" int __kernel_gettimeofday(struct timeval* tv, struct timezone* tz) {\n+ return __common_gettimeofday(tv, tz);\n+}\n+\n+// __kernel_clock_getres() implements clock_getres()\n+extern \"C\" int __kernel_clock_getres(clockid_t clock, struct timespec* res) {\n+ int ret = 0;\n+\n+ switch (clock) {\n+ case CLOCK_REALTIME:\n+ case CLOCK_MONOTONIC: {\n+ res->tv_sec = 0;\n+ res->tv_nsec = 1;\n+ break;\n+ }\n+\n+ default:\n+ ret = sys_clock_getres(clock, res);\n+ break;\n+ }\n+\n+ return ret;\n+}\n+\n+// __kernel_rt_sigreturn() implements gettimeofday()\n+extern \"C\" int __kernel_rt_sigreturn(unsigned long unused) {\n+ // No optimizations yet, just make the real system call.\n+ return sys_rt_sigreturn();\n+}\n+\n+#else\n+#error \"unsupported architecture\"\n+#endif\n} // namespace vdso\n" }, { "change_type": "RENAME", "old_path": "vdso/vdso.lds", "new_path": "vdso/vdso_amd64.lds", "diff": "" }, { "change_type": "ADD", "old_path": null, "new_path": "vdso/vdso_arm64.lds", "diff": "+/*\n+ * Linker script for the VDSO.\n+ *\n+ * The VDSO is essentially a normal ELF shared library that is mapped into the\n+ * address space of the process that is going to use it. The address of the\n+ * VDSO is passed to the runtime linker in the AT_SYSINFO_EHDR entry of the aux\n+ * vector.\n+ *\n+ * There are, however, three ways in which the VDSO differs from a normal\n+ * shared library:\n+ *\n+ * - The runtime linker does not attempt to process any relocations for the\n+ * VDSO so it is the responsibility of whoever loads the VDSO into the\n+ * address space to do this if necessary. Because of this restriction we are\n+ * careful to ensure that the VDSO does not need to have any relocations\n+ * applied to it.\n+ *\n+ * - Although the VDSO is position independent and would normally be linked at\n+ * virtual address 0, the Linux kernel VDSO is actually linked at a non zero\n+ * virtual address and the code in the system runtime linker that handles the\n+ * VDSO expects this to be the case so we have to explicitly link this VDSO\n+ * at a non zero address. The actual address is arbitrary, but we use the\n+ * same one as the Linux kernel VDSO.\n+ *\n+ * - The VDSO will be directly mmapped by the sentry, rather than going through\n+ * a normal ELF loading process. The VDSO must be carefully constructed such\n+ * that the layout in the ELF file is identical to the layout in memory.\n+ */\n+\n+VDSO_PRELINK = 0xffffffffff700000;\n+\n+OUTPUT_FORMAT(\"elf64-littleaarch64\", \"elf64-bigaarch64\", \"elf64-littleaarch64\")\n+OUTPUT_ARCH(aarch64)\n+\n+SECTIONS {\n+ /* The parameter page is mapped just before the VDSO. */\n+ _params = VDSO_PRELINK - 0x1000;\n+\n+ . = VDSO_PRELINK + SIZEOF_HEADERS;\n+\n+ .hash : { *(.hash) } :text\n+ .gnu.hash : { *(.gnu.hash) }\n+ .dynsym : { *(.dynsym) }\n+ .dynstr : { *(.dynstr) }\n+ .gnu.version : { *(.gnu.version) }\n+ .gnu.version_d : { *(.gnu.version_d) }\n+ .gnu.version_r : { *(.gnu.version_r) }\n+\n+ .note : { *(.note.*) } :text :note\n+\n+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr\n+ .eh_frame : { KEEP (*(.eh_frame)) } :text\n+\n+ .dynamic : { *(.dynamic) } :text :dynamic\n+\n+ .rodata : { *(.rodata*) } :text\n+\n+ .altinstructions : { *(.altinstructions) }\n+ .altinstr_replacement : { *(.altinstr_replacement) }\n+\n+ /*\n+ * TODO: Remove this alignment? Then the VDSO would fit\n+ * in a single page.\n+ */\n+ . = ALIGN(0x1000);\n+ .text : { *(.text*) } :text =0xd503201f\n+\n+ /*\n+ * N.B. There is no data/bss section. This VDSO neither needs nor uses a data\n+ * section. We omit it entirely because some gcc/clang and gold/bfd version\n+ * combinations struggle to handle an empty data PHDR segment (internal\n+ * linker assertion failures result).\n+ *\n+ * If the VDSO does incorrectly include a data section, the linker will\n+ * include it in the text segment. check_vdso.py looks for this degenerate\n+ * case.\n+ */\n+}\n+\n+PHDRS {\n+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R | PF_X */\n+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */\n+ note PT_NOTE FLAGS(4); /* PF_R */\n+ eh_frame_hdr PT_GNU_EH_FRAME;\n+}\n+\n+/*\n+ * Define the symbols that are to be exported.\n+ */\n+VERSION {\n+ LINUX_2.6.39 {\n+ global:\n+ __kernel_clock_getres;\n+ __kernel_clock_gettime;\n+ __kernel_gettimeofday;\n+ __kernel_rt_sigreturn;\n+ local: *;\n+ };\n+}\n" }, { "change_type": "MODIFY", "old_path": "vdso/vdso_time.cc", "new_path": "vdso/vdso_time.cc", "diff": "@@ -55,12 +55,26 @@ struct params {\n//\n// So instead, we use inline assembly with a construct that seems to have wide\n// compatibility across many toolchains.\n+#if __x86_64__\n+\n+inline struct params* get_params() {\n+ struct params* p = nullptr;\n+ asm(\"leaq _params(%%rip), %0\" : \"=r\"(p) : :);\n+ return p;\n+}\n+\n+#elif __aarch64__\n+\ninline struct params* get_params() {\nstruct params* p = nullptr;\n- asm volatile(\"leaq _params(%%rip), %0\" : \"=r\"(p) : :);\n+ asm(\"adr %0, _params\" : \"=r\"(p) : :);\nreturn p;\n}\n+#else\n+#error \"unsupported architecture\"\n+#endif\n+\nnamespace vdso {\nconst uint64_t kNsecsPerSec = 1000000000UL;\n" } ]
Go
Apache License 2.0
google/gvisor
Enable vDSO support on arm64. Signed-off-by: Haibo Xu <[email protected]> Change-Id: I20103cd6d193431ab7e8120005da1f567b9bc2eb PiperOrigin-RevId: 244280119
259,899
18.04.2019 17:48:54
25,200
cec2cdc12f30e87e5b0f6750fe1c132d89fcfb6d
tcpip/transport/udp: add Forwarder type Add a UDP forwarder for intercepting and forwarding UDP sessions.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/adapters/gonet/gonet_test.go", "new_path": "pkg/tcpip/adapters/gonet/gonet_test.go", "diff": "@@ -222,6 +222,62 @@ func TestCloseReaderWithForwarder(t *testing.T) {\nsender.close()\n}\n+func TestUDPForwarder(t *testing.T) {\n+ s, terr := newLoopbackStack()\n+ if terr != nil {\n+ t.Fatalf(\"newLoopbackStack() = %v\", terr)\n+ }\n+\n+ ip1 := tcpip.Address(net.IPv4(169, 254, 10, 1).To4())\n+ addr1 := tcpip.FullAddress{NICID, ip1, 11211}\n+ s.AddAddress(NICID, ipv4.ProtocolNumber, ip1)\n+ ip2 := tcpip.Address(net.IPv4(169, 254, 10, 2).To4())\n+ addr2 := tcpip.FullAddress{NICID, ip2, 11311}\n+ s.AddAddress(NICID, ipv4.ProtocolNumber, ip2)\n+\n+ done := make(chan struct{})\n+ fwd := udp.NewForwarder(s, func(r *udp.ForwarderRequest) {\n+ defer close(done)\n+\n+ var wq waiter.Queue\n+ ep, err := r.CreateEndpoint(&wq)\n+ if err != nil {\n+ t.Fatalf(\"r.CreateEndpoint() = %v\", err)\n+ }\n+ defer ep.Close()\n+\n+ c := NewConn(&wq, ep)\n+\n+ buf := make([]byte, 256)\n+ n, e := c.Read(buf)\n+ if e != nil {\n+ t.Errorf(\"c.Read() = %v\", e)\n+ }\n+\n+ if _, e := c.Write(buf[:n]); e != nil {\n+ t.Errorf(\"c.Write() = %v\", e)\n+ }\n+ })\n+ s.SetTransportProtocolHandler(udp.ProtocolNumber, fwd.HandlePacket)\n+\n+ c2, err := NewPacketConn(s, addr2, ipv4.ProtocolNumber)\n+ if err != nil {\n+ t.Fatal(\"NewPacketConn(port 5):\", err)\n+ }\n+\n+ sent := \"abc123\"\n+ sendAddr := fullToUDPAddr(addr1)\n+ if n, err := c2.WriteTo([]byte(sent), sendAddr); err != nil || n != len(sent) {\n+ t.Errorf(\"c1.WriteTo(%q, %v) = %d, %v, want = %d, %v\", sent, sendAddr, n, err, len(sent), nil)\n+ }\n+\n+ buf := make([]byte, 256)\n+ n, recvAddr, err := c2.ReadFrom(buf)\n+ if err != nil || recvAddr.String() != sendAddr.String() {\n+ t.Errorf(\"c1.ReadFrom() = %d, %v, %v, want = %d, %v, %v\", n, recvAddr, err, len(sent), sendAddr, nil)\n+ }\n+}\n+\n// TestDeadlineChange tests that changing the deadline affects currently blocked reads.\nfunc TestDeadlineChange(t *testing.T) {\ns, err := newLoopbackStack()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/BUILD", "new_path": "pkg/tcpip/transport/udp/BUILD", "diff": "@@ -20,6 +20,7 @@ go_library(\nsrcs = [\n\"endpoint.go\",\n\"endpoint_state.go\",\n+ \"forwarder.go\",\n\"protocol.go\",\n\"udp_packet_list.go\",\n],\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/tcpip/transport/udp/forwarder.go", "diff": "+// Copyright 2019 Google LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package udp\n+\n+import (\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/stack\"\n+ \"gvisor.googlesource.com/gvisor/pkg/waiter\"\n+)\n+\n+// Forwarder is a session request forwarder, which allows clients to decide\n+// what to do with a session request, for example: ignore it, or process it.\n+//\n+// The canonical way of using it is to pass the Forwarder.HandlePacket function\n+// to stack.SetTransportProtocolHandler.\n+type Forwarder struct {\n+ handler func(*ForwarderRequest)\n+\n+ stack *stack.Stack\n+}\n+\n+// NewForwarder allocates and initializes a new forwarder.\n+func NewForwarder(s *stack.Stack, handler func(*ForwarderRequest)) *Forwarder {\n+ return &Forwarder{\n+ stack: s,\n+ handler: handler,\n+ }\n+}\n+\n+// HandlePacket handles all packets.\n+//\n+// This function is expected to be passed as an argument to the\n+// stack.SetTransportProtocolHandler function.\n+func (f *Forwarder) HandlePacket(r *stack.Route, id stack.TransportEndpointID, netHeader buffer.View, vv buffer.VectorisedView) bool {\n+ f.handler(&ForwarderRequest{\n+ stack: f.stack,\n+ route: r,\n+ id: id,\n+ vv: vv,\n+ })\n+\n+ return true\n+}\n+\n+// ForwarderRequest represents a session request received by the forwarder and\n+// passed to the client. Clients may optionally create an endpoint to represent\n+// it via CreateEndpoint.\n+type ForwarderRequest struct {\n+ stack *stack.Stack\n+ route *stack.Route\n+ id stack.TransportEndpointID\n+ vv buffer.VectorisedView\n+}\n+\n+// ID returns the 4-tuple (src address, src port, dst address, dst port) that\n+// represents the session request.\n+func (r *ForwarderRequest) ID() stack.TransportEndpointID {\n+ return r.id\n+}\n+\n+// CreateEndpoint creates a connected UDP endpoint for the session request.\n+func (r *ForwarderRequest) CreateEndpoint(queue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) {\n+ ep := newEndpoint(r.stack, r.route.NetProto, queue)\n+ if err := r.stack.RegisterTransportEndpoint(r.route.NICID(), []tcpip.NetworkProtocolNumber{r.route.NetProto}, ProtocolNumber, r.id, ep, ep.reusePort); err != nil {\n+ ep.Close()\n+ return nil, err\n+ }\n+\n+ ep.id = r.id\n+ ep.route = r.route.Clone()\n+ ep.dstPort = r.id.RemotePort\n+ ep.regNICID = r.route.NICID()\n+\n+ ep.state = stateConnected\n+\n+ ep.rcvMu.Lock()\n+ ep.rcvReady = true\n+ ep.rcvMu.Unlock()\n+\n+ ep.HandlePacket(r.route, r.id, r.vv)\n+\n+ return ep, nil\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
tcpip/transport/udp: add Forwarder type Add a UDP forwarder for intercepting and forwarding UDP sessions. Change-Id: I2d83c900c1931adfc59a532dd4f6b33a0db406c9 PiperOrigin-RevId: 244293576
259,854
19.04.2019 16:15:37
25,200
358eb52a76ebd41baf52972f901af0ff398e131b
Add support for the MSG_TRUNC msghdr flag. The MSG_TRUNC flag is set in the msghdr when a message is truncated. Fixes google/gvisor#200
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/epsocket/epsocket.go", "new_path": "pkg/sentry/socket/epsocket/epsocket.go", "diff": "@@ -376,7 +376,7 @@ func (s *SocketOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOS\nif dst.NumBytes() == 0 {\nreturn 0, nil\n}\n- n, _, _, _, err := s.nonBlockingRead(ctx, dst, false, false, false)\n+ n, _, _, _, _, err := s.nonBlockingRead(ctx, dst, false, false, false)\nif err == syserr.ErrWouldBlock {\nreturn int64(n), syserror.ErrWouldBlock\n}\n@@ -1696,7 +1696,7 @@ func (s *SocketOperations) coalescingRead(ctx context.Context, dst usermem.IOSeq\n// nonBlockingRead issues a non-blocking read.\n//\n// TODO: Support timestamps for stream sockets.\n-func (s *SocketOperations) nonBlockingRead(ctx context.Context, dst usermem.IOSequence, peek, trunc, senderRequested bool) (int, interface{}, uint32, socket.ControlMessages, *syserr.Error) {\n+func (s *SocketOperations) nonBlockingRead(ctx context.Context, dst usermem.IOSequence, peek, trunc, senderRequested bool) (int, int, interface{}, uint32, socket.ControlMessages, *syserr.Error) {\nisPacket := s.isPacketBased()\n// Fast path for regular reads from stream (e.g., TCP) endpoints. Note\n@@ -1712,14 +1712,14 @@ func (s *SocketOperations) nonBlockingRead(ctx context.Context, dst usermem.IOSe\ns.readMu.Lock()\nn, err := s.coalescingRead(ctx, dst, trunc)\ns.readMu.Unlock()\n- return n, nil, 0, socket.ControlMessages{}, err\n+ return n, 0, nil, 0, socket.ControlMessages{}, err\n}\ns.readMu.Lock()\ndefer s.readMu.Unlock()\nif err := s.fetchReadView(); err != nil {\n- return 0, nil, 0, socket.ControlMessages{}, err\n+ return 0, 0, nil, 0, socket.ControlMessages{}, err\n}\nif !isPacket && peek && trunc {\n@@ -1727,14 +1727,14 @@ func (s *SocketOperations) nonBlockingRead(ctx context.Context, dst usermem.IOSe\n// amount that could be read.\nvar rql tcpip.ReceiveQueueSizeOption\nif err := s.Endpoint.GetSockOpt(&rql); err != nil {\n- return 0, nil, 0, socket.ControlMessages{}, syserr.TranslateNetstackError(err)\n+ return 0, 0, nil, 0, socket.ControlMessages{}, syserr.TranslateNetstackError(err)\n}\navailable := len(s.readView) + int(rql)\nbufLen := int(dst.NumBytes())\nif available < bufLen {\n- return available, nil, 0, socket.ControlMessages{}, nil\n+ return available, 0, nil, 0, socket.ControlMessages{}, nil\n}\n- return bufLen, nil, 0, socket.ControlMessages{}, nil\n+ return bufLen, 0, nil, 0, socket.ControlMessages{}, nil\n}\nn, err := dst.CopyOut(ctx, s.readView)\n@@ -1751,11 +1751,11 @@ func (s *SocketOperations) nonBlockingRead(ctx context.Context, dst usermem.IOSe\nif peek {\nif l := len(s.readView); trunc && l > n {\n// isPacket must be true.\n- return l, addr, addrLen, s.controlMessages(), syserr.FromError(err)\n+ return l, linux.MSG_TRUNC, addr, addrLen, s.controlMessages(), syserr.FromError(err)\n}\nif isPacket || err != nil {\n- return int(n), addr, addrLen, s.controlMessages(), syserr.FromError(err)\n+ return n, 0, addr, addrLen, s.controlMessages(), syserr.FromError(err)\n}\n// We need to peek beyond the first message.\n@@ -1773,7 +1773,7 @@ func (s *SocketOperations) nonBlockingRead(ctx context.Context, dst usermem.IOSe\n// We got some data, so no need to return an error.\nerr = nil\n}\n- return int(n), nil, 0, s.controlMessages(), syserr.FromError(err)\n+ return n, 0, nil, 0, s.controlMessages(), syserr.FromError(err)\n}\nvar msgLen int\n@@ -1785,11 +1785,16 @@ func (s *SocketOperations) nonBlockingRead(ctx context.Context, dst usermem.IOSe\ns.readView.TrimFront(int(n))\n}\n+ var flags int\n+ if msgLen > int(n) {\n+ flags |= linux.MSG_TRUNC\n+ }\n+\nif trunc {\n- return msgLen, addr, addrLen, s.controlMessages(), syserr.FromError(err)\n+ n = msgLen\n}\n- return int(n), addr, addrLen, s.controlMessages(), syserr.FromError(err)\n+ return n, flags, addr, addrLen, s.controlMessages(), syserr.FromError(err)\n}\nfunc (s *SocketOperations) controlMessages() socket.ControlMessages {\n@@ -1810,7 +1815,7 @@ func (s *SocketOperations) updateTimestamp() {\n// RecvMsg implements the linux syscall recvmsg(2) for sockets backed by\n// tcpip.Endpoint.\n-func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, senderAddr interface{}, senderAddrLen uint32, controlMessages socket.ControlMessages, err *syserr.Error) {\n+func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, msgFlags int, senderAddr interface{}, senderAddrLen uint32, controlMessages socket.ControlMessages, err *syserr.Error) {\ntrunc := flags&linux.MSG_TRUNC != 0\npeek := flags&linux.MSG_PEEK != 0\ndontWait := flags&linux.MSG_DONTWAIT != 0\n@@ -1819,16 +1824,16 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\n// Stream sockets ignore the sender address.\nsenderRequested = false\n}\n- n, senderAddr, senderAddrLen, controlMessages, err = s.nonBlockingRead(t, dst, peek, trunc, senderRequested)\n+ n, msgFlags, senderAddr, senderAddrLen, controlMessages, err = s.nonBlockingRead(t, dst, peek, trunc, senderRequested)\nif s.isPacketBased() && err == syserr.ErrClosedForReceive && flags&linux.MSG_DONTWAIT != 0 {\n// In this situation we should return EAGAIN.\n- return 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n+ return 0, 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n}\nif err != nil && (err != syserr.ErrWouldBlock || dontWait) {\n// Read failed and we should not retry.\n- return 0, nil, 0, socket.ControlMessages{}, err\n+ return 0, 0, nil, 0, socket.ControlMessages{}, err\n}\nif err == nil && (dontWait || !waitAll || s.isPacketBased() || int64(n) >= dst.NumBytes()) {\n@@ -1847,7 +1852,7 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nfor {\nvar rn int\n- rn, senderAddr, senderAddrLen, controlMessages, err = s.nonBlockingRead(t, dst, peek, trunc, senderRequested)\n+ rn, msgFlags, senderAddr, senderAddrLen, controlMessages, err = s.nonBlockingRead(t, dst, peek, trunc, senderRequested)\nn += rn\nif err != nil && err != syserr.ErrWouldBlock {\n// Always stop on errors other than would block as we generally\n@@ -1866,12 +1871,12 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nif err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {\nif n > 0 {\n- return n, senderAddr, senderAddrLen, controlMessages, nil\n+ return n, msgFlags, senderAddr, senderAddrLen, controlMessages, nil\n}\nif err == syserror.ETIMEDOUT {\n- return 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n+ return 0, 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n}\n- return 0, nil, 0, socket.ControlMessages{}, syserr.FromError(err)\n+ return 0, 0, nil, 0, socket.ControlMessages{}, syserr.FromError(err)\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/hostinet/socket.go", "new_path": "pkg/sentry/socket/hostinet/socket.go", "diff": "@@ -345,14 +345,14 @@ func (s *socketOperations) SetSockOpt(t *kernel.Task, level int, name int, opt [\n}\n// RecvMsg implements socket.Socket.RecvMsg.\n-func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (int, interface{}, uint32, socket.ControlMessages, *syserr.Error) {\n+func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (int, int, interface{}, uint32, socket.ControlMessages, *syserr.Error) {\n// Whitelist flags.\n//\n// FIXME: We can't support MSG_ERRQUEUE because it uses ancillary\n// messages that netstack/tcpip/transport/unix doesn't understand. Kill the\n// Socket interface's dependence on netstack.\nif flags&^(syscall.MSG_DONTWAIT|syscall.MSG_PEEK|syscall.MSG_TRUNC) != 0 {\n- return 0, nil, 0, socket.ControlMessages{}, syserr.ErrInvalidArgument\n+ return 0, 0, nil, 0, socket.ControlMessages{}, syserr.ErrInvalidArgument\n}\nvar senderAddr []byte\n@@ -360,6 +360,8 @@ func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nsenderAddr = make([]byte, sizeofSockaddr)\n}\n+ var msgFlags int\n+\nrecvmsgToBlocks := safemem.ReaderFunc(func(dsts safemem.BlockSeq) (uint64, error) {\n// Refuse to do anything if any part of dst.Addrs was unusable.\nif uint64(dst.NumBytes()) != dsts.NumBytes() {\n@@ -391,6 +393,7 @@ func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nreturn 0, err\n}\nsenderAddr = senderAddr[:msg.Namelen]\n+ msgFlags = int(msg.Flags)\nreturn n, nil\n})\n@@ -417,7 +420,10 @@ func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\n}\n}\n- return int(n), senderAddr, uint32(len(senderAddr)), socket.ControlMessages{}, syserr.FromError(err)\n+ // We don't allow control messages.\n+ msgFlags &^= linux.MSG_CTRUNC\n+\n+ return int(n), msgFlags, senderAddr, uint32(len(senderAddr)), socket.ControlMessages{}, syserr.FromError(err)\n}\n// SendMsg implements socket.Socket.SendMsg.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netlink/socket.go", "new_path": "pkg/sentry/socket/netlink/socket.go", "diff": "@@ -397,7 +397,7 @@ func (s *Socket) GetPeerName(t *kernel.Task) (interface{}, uint32, *syserr.Error\n}\n// RecvMsg implements socket.Socket.RecvMsg.\n-func (s *Socket) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (int, interface{}, uint32, socket.ControlMessages, *syserr.Error) {\n+func (s *Socket) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (int, int, interface{}, uint32, socket.ControlMessages, *syserr.Error) {\nfrom := linux.SockAddrNetlink{\nFamily: linux.AF_NETLINK,\nPortID: 0,\n@@ -412,10 +412,14 @@ func (s *Socket) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, have\n}\nif n, err := dst.CopyOutFrom(t, &r); err != syserror.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 {\n+ var mflags int\n+ if n < int64(r.MsgSize) {\n+ mflags |= linux.MSG_TRUNC\n+ }\nif trunc {\nn = int64(r.MsgSize)\n}\n- return int(n), from, fromLen, socket.ControlMessages{}, syserr.FromError(err)\n+ return int(n), mflags, from, fromLen, socket.ControlMessages{}, syserr.FromError(err)\n}\n// We'll have to block. Register for notification and keep trying to\n@@ -426,17 +430,21 @@ func (s *Socket) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, have\nfor {\nif n, err := dst.CopyOutFrom(t, &r); err != syserror.ErrWouldBlock {\n+ var mflags int\n+ if n < int64(r.MsgSize) {\n+ mflags |= linux.MSG_TRUNC\n+ }\nif trunc {\nn = int64(r.MsgSize)\n}\n- return int(n), from, fromLen, socket.ControlMessages{}, syserr.FromError(err)\n+ return int(n), mflags, from, fromLen, socket.ControlMessages{}, syserr.FromError(err)\n}\nif err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {\nif err == syserror.ETIMEDOUT {\n- return 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n+ return 0, 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n}\n- return 0, nil, 0, socket.ControlMessages{}, syserr.FromError(err)\n+ return 0, 0, nil, 0, socket.ControlMessages{}, syserr.FromError(err)\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/rpcinet/socket.go", "new_path": "pkg/sentry/socket/rpcinet/socket.go", "diff": "@@ -673,7 +673,7 @@ func (s *socketOperations) extractControlMessages(payload *pb.RecvmsgResponse_Re\n}\n// RecvMsg implements socket.Socket.RecvMsg.\n-func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (int, interface{}, uint32, socket.ControlMessages, *syserr.Error) {\n+func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (int, int, interface{}, uint32, socket.ControlMessages, *syserr.Error) {\nreq := &pb.SyscallRequest_Recvmsg{&pb.RecvmsgRequest{\nFd: s.fd,\nLength: uint32(dst.NumBytes()),\n@@ -694,10 +694,10 @@ func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\n}\n}\nc := s.extractControlMessages(res)\n- return int(res.Length), res.Address.GetAddress(), res.Address.GetLength(), c, syserr.FromError(e)\n+ return int(res.Length), 0, res.Address.GetAddress(), res.Address.GetLength(), c, syserr.FromError(e)\n}\nif err != syserr.ErrWouldBlock && err != syserr.ErrTryAgain || flags&linux.MSG_DONTWAIT != 0 {\n- return 0, nil, 0, socket.ControlMessages{}, err\n+ return 0, 0, nil, 0, socket.ControlMessages{}, err\n}\n// We'll have to block. Register for notifications and keep trying to\n@@ -718,23 +718,23 @@ func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\n}\n}\nc := s.extractControlMessages(res)\n- return int(res.Length), res.Address.GetAddress(), res.Address.GetLength(), c, syserr.FromError(e)\n+ return int(res.Length), 0, res.Address.GetAddress(), res.Address.GetLength(), c, syserr.FromError(e)\n}\nif err != syserr.ErrWouldBlock && err != syserr.ErrTryAgain {\n- return 0, nil, 0, socket.ControlMessages{}, err\n+ return 0, 0, nil, 0, socket.ControlMessages{}, err\n}\nif s.isShutRdSet() {\n// Blocking would have caused us to block indefinitely so we return 0,\n// this is the same behavior as Linux.\n- return 0, nil, 0, socket.ControlMessages{}, nil\n+ return 0, 0, nil, 0, socket.ControlMessages{}, nil\n}\nif err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {\nif err == syserror.ETIMEDOUT {\n- return 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n+ return 0, 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n}\n- return 0, nil, 0, socket.ControlMessages{}, syserr.FromError(err)\n+ return 0, 0, nil, 0, socket.ControlMessages{}, syserr.FromError(err)\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/socket.go", "new_path": "pkg/sentry/socket/socket.go", "diff": "@@ -88,7 +88,7 @@ type Socket interface {\n// not necessarily the actual length of the address.\n//\n// If err != nil, the recv was not successful.\n- RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, senderAddr interface{}, senderAddrLen uint32, controlMessages ControlMessages, err *syserr.Error)\n+ RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, msgFlags int, senderAddr interface{}, senderAddrLen uint32, controlMessages ControlMessages, err *syserr.Error)\n// SendMsg implements the sendmsg(2) linux syscall. SendMsg does not take\n// ownership of the ControlMessage on error.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/unix.go", "new_path": "pkg/sentry/socket/unix/unix.go", "diff": "@@ -477,7 +477,7 @@ func (s *SocketOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOS\n// RecvMsg implements the linux syscall recvmsg(2) for sockets backed by\n// a transport.Endpoint.\n-func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, senderAddr interface{}, senderAddrLen uint32, controlMessages socket.ControlMessages, err *syserr.Error) {\n+func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, msgFlags int, senderAddr interface{}, senderAddrLen uint32, controlMessages socket.ControlMessages, err *syserr.Error) {\ntrunc := flags&linux.MSG_TRUNC != 0\npeek := flags&linux.MSG_PEEK != 0\ndontWait := flags&linux.MSG_DONTWAIT != 0\n@@ -515,11 +515,17 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nif r.From != nil {\nfrom, fromLen = epsocket.ConvertAddress(linux.AF_UNIX, *r.From)\n}\n+\n+ if err != nil || dontWait || !waitAll || s.isPacket || n >= dst.NumBytes() {\n+ if s.isPacket && n < int64(r.MsgSize) {\n+ msgFlags |= linux.MSG_TRUNC\n+ }\n+\nif trunc {\nn = int64(r.MsgSize)\n}\n- if err != nil || dontWait || !waitAll || s.isPacket || n >= dst.NumBytes() {\n- return int(n), from, fromLen, socket.ControlMessages{Unix: r.Control}, syserr.FromError(err)\n+\n+ return int(n), msgFlags, from, fromLen, socket.ControlMessages{Unix: r.Control}, syserr.FromError(err)\n}\n// Don't overwrite any data we received.\n@@ -541,14 +547,19 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nfrom, fromLen = epsocket.ConvertAddress(linux.AF_UNIX, *r.From)\n}\nif trunc {\n- n = int64(r.MsgSize)\n- }\n+ // n and r.MsgSize are the same for streams.\n+ total += int64(r.MsgSize)\n+ } else {\ntotal += n\n+ }\nif err != nil || !waitAll || s.isPacket || n >= dst.NumBytes() {\nif total > 0 {\nerr = nil\n}\n- return int(total), from, fromLen, socket.ControlMessages{Unix: r.Control}, syserr.FromError(err)\n+ if s.isPacket && n < int64(r.MsgSize) {\n+ msgFlags |= linux.MSG_TRUNC\n+ }\n+ return int(total), msgFlags, from, fromLen, socket.ControlMessages{Unix: r.Control}, syserr.FromError(err)\n}\n// Don't overwrite any data we received.\n@@ -560,9 +571,9 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nerr = nil\n}\nif err == syserror.ETIMEDOUT {\n- return int(total), nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n+ return int(total), msgFlags, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain\n}\n- return int(total), nil, 0, socket.ControlMessages{}, syserr.FromError(err)\n+ return int(total), msgFlags, nil, 0, socket.ControlMessages{}, syserr.FromError(err)\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_socket.go", "new_path": "pkg/sentry/syscalls/linux/sys_socket.go", "diff": "@@ -742,17 +742,15 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i\n// Fast path when no control message nor name buffers are provided.\nif msg.ControlLen == 0 && msg.NameLen == 0 {\n- n, _, _, cms, err := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, false, 0)\n+ n, mflags, _, _, cms, err := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, false, 0)\nif err != nil {\nreturn 0, syserror.ConvertIntr(err.ToError(), kernel.ERESTARTSYS)\n}\ncms.Unix.Release()\n- if msg.Flags != 0 {\n+ if int(msg.Flags) != mflags {\n// Copy out the flags to the caller.\n- //\n- // TODO: Plumb through actual flags.\n- if _, err := t.CopyOut(msgPtr+flagsOffset, int32(0)); err != nil {\n+ if _, err := t.CopyOut(msgPtr+flagsOffset, int32(mflags)); err != nil {\nreturn 0, err\n}\n}\n@@ -763,7 +761,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i\nif msg.ControlLen > maxControlLen {\nreturn 0, syscall.ENOBUFS\n}\n- n, sender, senderLen, cms, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, msg.NameLen != 0, msg.ControlLen)\n+ n, mflags, sender, senderLen, cms, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, msg.NameLen != 0, msg.ControlLen)\nif e != nil {\nreturn 0, syserror.ConvertIntr(e.ToError(), kernel.ERESTARTSYS)\n}\n@@ -802,9 +800,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i\n}\n// Copy out the flags to the caller.\n- //\n- // TODO: Plumb through actual flags.\n- if _, err := t.CopyOut(msgPtr+flagsOffset, int32(0)); err != nil {\n+ if _, err := t.CopyOut(msgPtr+flagsOffset, int32(mflags)); err != nil {\nreturn 0, err\n}\n@@ -856,7 +852,7 @@ func recvFrom(t *kernel.Task, fd kdefs.FD, bufPtr usermem.Addr, bufLen uint64, f\nflags |= linux.MSG_DONTWAIT\n}\n- n, sender, senderLen, cm, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, nameLenPtr != 0, 0)\n+ n, _, sender, senderLen, cm, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, nameLenPtr != 0, 0)\ncm.Unix.Release()\nif e != nil {\nreturn 0, syserror.ConvertIntr(e.ToError(), kernel.ERESTARTSYS)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_netlink_route.cc", "new_path": "test/syscalls/linux/socket_netlink_route.cc", "diff": "@@ -220,6 +220,86 @@ TEST(NetlinkRouteTest, GetLinkDump) {\nEXPECT_TRUE(loopbackFound);\n}\n+TEST(NetlinkRouteTest, MsgHdrMsgTrunc) {\n+ FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket());\n+\n+ struct request {\n+ struct nlmsghdr hdr;\n+ struct ifinfomsg ifm;\n+ };\n+\n+ constexpr uint32_t kSeq = 12345;\n+\n+ struct request req = {};\n+ req.hdr.nlmsg_len = sizeof(req);\n+ req.hdr.nlmsg_type = RTM_GETLINK;\n+ req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;\n+ req.hdr.nlmsg_seq = kSeq;\n+ req.ifm.ifi_family = AF_UNSPEC;\n+\n+ struct iovec iov = {};\n+ iov.iov_base = &req;\n+ iov.iov_len = sizeof(req);\n+\n+ struct msghdr msg = {};\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+ // No destination required; it defaults to pid 0, the kernel.\n+\n+ ASSERT_THAT(RetryEINTR(sendmsg)(fd.get(), &msg, 0), SyscallSucceeds());\n+\n+ // Small enough to ensure that the response doesn't fit.\n+ constexpr size_t kBufferSize = 10;\n+ std::vector<char> buf(kBufferSize);\n+ iov.iov_base = buf.data();\n+ iov.iov_len = buf.size();\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(fd.get(), &msg, 0),\n+ SyscallSucceedsWithValue(kBufferSize));\n+ EXPECT_EQ((msg.msg_flags & MSG_TRUNC), MSG_TRUNC);\n+}\n+\n+TEST(NetlinkRouteTest, MsgTruncMsgHdrMsgTrunc) {\n+ FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket());\n+\n+ struct request {\n+ struct nlmsghdr hdr;\n+ struct ifinfomsg ifm;\n+ };\n+\n+ constexpr uint32_t kSeq = 12345;\n+\n+ struct request req = {};\n+ req.hdr.nlmsg_len = sizeof(req);\n+ req.hdr.nlmsg_type = RTM_GETLINK;\n+ req.hdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;\n+ req.hdr.nlmsg_seq = kSeq;\n+ req.ifm.ifi_family = AF_UNSPEC;\n+\n+ struct iovec iov = {};\n+ iov.iov_base = &req;\n+ iov.iov_len = sizeof(req);\n+\n+ struct msghdr msg = {};\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+ // No destination required; it defaults to pid 0, the kernel.\n+\n+ ASSERT_THAT(RetryEINTR(sendmsg)(fd.get(), &msg, 0), SyscallSucceeds());\n+\n+ // Small enough to ensure that the response doesn't fit.\n+ constexpr size_t kBufferSize = 10;\n+ std::vector<char> buf(kBufferSize);\n+ iov.iov_base = buf.data();\n+ iov.iov_len = buf.size();\n+\n+ int res = 0;\n+ ASSERT_THAT(res = RetryEINTR(recvmsg)(fd.get(), &msg, MSG_TRUNC),\n+ SyscallSucceeds());\n+ EXPECT_GT(res, kBufferSize);\n+ EXPECT_EQ((msg.msg_flags & MSG_TRUNC), MSG_TRUNC);\n+}\n+\nTEST(NetlinkRouteTest, ControlMessageIgnored) {\nFileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket());\nuint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get()));\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_non_stream.cc", "new_path": "test/syscalls/linux/socket_non_stream.cc", "diff": "#include \"test/syscalls/linux/socket_non_stream.h\"\n#include <stdio.h>\n+#include <sys/socket.h>\n#include <sys/un.h>\n#include \"gtest/gtest.h\"\n@@ -89,6 +90,33 @@ TEST_P(NonStreamSocketPairTest, SingleRecv) {\nEXPECT_EQ(0, memcmp(sent_data1, received_data, sizeof(sent_data1)));\n}\n+TEST_P(NonStreamSocketPairTest, RecvmsgMsghdrFlagMsgTrunc) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[10];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+ ASSERT_THAT(\n+ RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),\n+ SyscallSucceedsWithValue(sizeof(sent_data)));\n+\n+ char received_data[sizeof(sent_data) / 2] = {};\n+\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ struct msghdr msg = {};\n+ msg.msg_flags = -1;\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),\n+ SyscallSucceedsWithValue(sizeof(received_data)));\n+ EXPECT_EQ(0, memcmp(received_data, sent_data, sizeof(received_data)));\n+\n+ // Check that msghdr flags were updated.\n+ EXPECT_EQ(msg.msg_flags, MSG_TRUNC);\n+}\n+\n// Stream sockets allow data sent with multiple sends to be peeked at in a\n// single recv. Datagram sockets (except for unix sockets) do not.\n//\n@@ -142,6 +170,33 @@ TEST_P(NonStreamSocketPairTest, MsgTruncTruncation) {\nsizeof(sent_data) / 2));\n}\n+TEST_P(NonStreamSocketPairTest, MsgTruncTruncationRecvmsgMsghdrFlagMsgTrunc) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[10];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+ ASSERT_THAT(\n+ RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),\n+ SyscallSucceedsWithValue(sizeof(sent_data)));\n+\n+ char received_data[sizeof(sent_data) / 2] = {};\n+\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ struct msghdr msg = {};\n+ msg.msg_flags = -1;\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_TRUNC),\n+ SyscallSucceedsWithValue(sizeof(sent_data)));\n+ EXPECT_EQ(0, memcmp(received_data, sent_data, sizeof(received_data)));\n+\n+ // Check that msghdr flags were updated.\n+ EXPECT_EQ(msg.msg_flags, MSG_TRUNC);\n+}\n+\nTEST_P(NonStreamSocketPairTest, MsgTruncSameSize) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nchar sent_data[512];\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_stream.cc", "new_path": "test/syscalls/linux/socket_stream.cc", "diff": "@@ -81,6 +81,33 @@ TEST_P(StreamSocketPairTest, WriteOneSideClosed) {\nSyscallFailsWithErrno(EPIPE));\n}\n+TEST_P(StreamSocketPairTest, RecvmsgMsghdrFlagsNoMsgTrunc) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[10];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+ ASSERT_THAT(\n+ RetryEINTR(send)(sockets->first_fd(), sent_data, sizeof(sent_data), 0),\n+ SyscallSucceedsWithValue(sizeof(sent_data)));\n+\n+ char received_data[sizeof(sent_data) / 2] = {};\n+\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ struct msghdr msg = {};\n+ msg.msg_flags = -1;\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),\n+ SyscallSucceedsWithValue(sizeof(received_data)));\n+ EXPECT_EQ(0, memcmp(received_data, sent_data, sizeof(received_data)));\n+\n+ // Check that msghdr flags were cleared (MSG_TRUNC was not set).\n+ EXPECT_EQ(msg.msg_flags, 0);\n+}\n+\nTEST_P(StreamSocketPairTest, MsgTrunc) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nchar sent_data[512];\n" } ]
Go
Apache License 2.0
google/gvisor
Add support for the MSG_TRUNC msghdr flag. The MSG_TRUNC flag is set in the msghdr when a message is truncated. Fixes google/gvisor#200 PiperOrigin-RevId: 244440486 Change-Id: I03c7d5e7f5935c0c6b8d69b012db1780ac5b8456
259,881
22.04.2019 18:17:25
25,200
f86c35a51ff92718e36ff6075339300be11e09b3
Clean up state error handling
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/control/state.go", "new_path": "pkg/sentry/control/state.go", "diff": "@@ -64,7 +64,7 @@ func (s *State) Save(o *SaveOpts, _ *struct{}) error {\nlog.Infof(\"Save succeeded: exiting...\")\n} else {\nlog.Warningf(\"Save failed: exiting...\")\n- s.Kernel.SetExitError(err)\n+ s.Kernel.SetSaveError(err)\n}\ns.Kernel.Kill(kernel.ExitStatus{})\n},\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/kernel.go", "new_path": "pkg/sentry/kernel/kernel.go", "diff": "@@ -175,9 +175,9 @@ type Kernel struct {\n// netlinkPorts manages allocation of netlink socket port IDs.\nnetlinkPorts *port.Manager\n- // exitErr is the error causing the sandbox to exit, if any. It is\n- // protected by extMu.\n- exitErr error `state:\"nosave\"`\n+ // saveErr is the error causing the sandbox to exit during save, if\n+ // any. It is protected by extMu.\n+ saveErr error `state:\"nosave\"`\n// danglingEndpoints is used to save / restore tcpip.DanglingEndpoints.\ndanglingEndpoints struct{} `state:\".([]tcpip.Endpoint)\"`\n@@ -1029,20 +1029,21 @@ func (k *Kernel) NetlinkPorts() *port.Manager {\nreturn k.netlinkPorts\n}\n-// ExitError returns the sandbox error that caused the kernel to exit.\n-func (k *Kernel) ExitError() error {\n+// SaveError returns the sandbox error that caused the kernel to exit during\n+// save.\n+func (k *Kernel) SaveError() error {\nk.extMu.Lock()\ndefer k.extMu.Unlock()\n- return k.exitErr\n+ return k.saveErr\n}\n-// SetExitError sets the sandbox error that caused the kernel to exit, if one is\n-// not already set.\n-func (k *Kernel) SetExitError(err error) {\n+// SetSaveError sets the sandbox error that caused the kernel to exit during\n+// save, if one is not already set.\n+func (k *Kernel) SetSaveError(err error) {\nk.extMu.Lock()\ndefer k.extMu.Unlock()\n- if k.exitErr == nil {\n- k.exitErr = err\n+ if k.saveErr == nil {\n+ k.saveErr = err\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/state/state.go", "new_path": "pkg/state/state.go", "diff": "@@ -60,8 +60,8 @@ import (\n// ErrState is returned when an error is encountered during encode/decode.\ntype ErrState struct {\n- // Err is the underlying error.\n- Err error\n+ // err is the underlying error.\n+ err error\n// path is the visit path from root to the current object.\npath string\n@@ -72,7 +72,17 @@ type ErrState struct {\n// Error returns a sensible description of the state error.\nfunc (e *ErrState) Error() string {\n- return fmt.Sprintf(\"%v:\\nstate path: %s\\n%s\", e.Err, e.path, e.trace)\n+ return fmt.Sprintf(\"%v:\\nstate path: %s\\n%s\", e.err, e.path, e.trace)\n+}\n+\n+// UnwrapErrState returns the underlying error in ErrState.\n+//\n+// If err is not *ErrState, err is returned directly.\n+func UnwrapErrState(err error) error {\n+ if e, ok := err.(*ErrState); ok {\n+ return e.err\n+ }\n+ return err\n}\n// Save saves the given object state.\n@@ -318,9 +328,9 @@ func (sr *recoverable) safely(fn func()) (err error) {\nif r := recover(); r != nil {\nes := new(ErrState)\nif e, ok := r.(error); ok {\n- es.Err = e\n+ es.err = e\n} else {\n- es.Err = fmt.Errorf(\"%v\", r)\n+ es.err = fmt.Errorf(\"%v\", r)\n}\nes.path = sr.path()\n" } ]
Go
Apache License 2.0
google/gvisor
Clean up state error handling PiperOrigin-RevId: 244773836 Change-Id: I32223f79d2314fe1ac4ddfc63004fc22ff634adf
259,884
22.04.2019 01:07:33
14,400
e986ad2f37767bea14e86f0cb388aeba051fd9eb
Redirect www.gvisor.dev to gvisor.dev
[ { "change_type": "MODIFY", "old_path": "cmd/gvisor-website/main.go", "new_path": "cmd/gvisor-website/main.go", "diff": "@@ -23,6 +23,7 @@ import (\n\"net/http\"\n\"os\"\n\"regexp\"\n+ \"strings\"\n)\nvar redirects = map[string]string{\n@@ -56,6 +57,20 @@ func redirectWithQuery(w http.ResponseWriter, r *http.Request, target string) {\nhttp.Redirect(w, r, url, http.StatusFound)\n}\n+// hostRedirectHandler redirects the www. domain to the naked domain.\n+func hostRedirectHandler(h http.Handler) http.Handler {\n+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n+ if strings.HasPrefix(r.Host, \"www.\") {\n+ // Redirect to the naked domain.\n+ r.URL.Scheme = \"https\" // Assume https.\n+ r.URL.Host = r.Host[4:] // Remove the 'www.'\n+ http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently)\n+ return\n+ }\n+ h.ServeHTTP(w, r)\n+ })\n+}\n+\n// prefixRedirectHandler returns a handler that redirects to the given formated url.\nfunc prefixRedirectHandler(prefix, baseURL string) http.Handler {\nreturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n@@ -89,11 +104,11 @@ func registerRedirects(mux *http.ServeMux) {\nfor prefix, baseURL := range prefixHelpers {\np := \"/\" + prefix + \"/\"\n- mux.Handle(p, prefixRedirectHandler(p, baseURL))\n+ mux.Handle(p, hostRedirectHandler(prefixRedirectHandler(p, baseURL)))\n}\nfor path, redirect := range redirects {\n- mux.Handle(path, redirectHandler(redirect))\n+ mux.Handle(path, hostRedirectHandler(redirectHandler(redirect)))\n}\n}\n@@ -102,7 +117,7 @@ func registerStatic(mux *http.ServeMux, staticDir string) {\nif mux == nil {\nmux = http.DefaultServeMux\n}\n- mux.Handle(\"/\", http.FileServer(http.Dir(staticDir)))\n+ mux.Handle(\"/\", hostRedirectHandler(http.FileServer(http.Dir(staticDir))))\n}\nfunc envFlagString(name, def string) string {\n" } ]
Go
Apache License 2.0
google/gvisor
Redirect www.gvisor.dev to gvisor.dev
259,884
22.04.2019 22:42:46
14,400
7853b971d43f773002ea92ecc95af7becd9dac55
Fix some html errors (refs Fix duplicate main_navbar id on top page. Fix missing ';' in nbsp entity in footer. Fix nested <a> tags in checkpoint/restore doc. Fix nested <p> tags on top page feature blocks. Add '--check-html' option to htmlproofer build step.
[ { "change_type": "MODIFY", "old_path": "cloudbuild.yaml", "new_path": "cloudbuild.yaml", "diff": "@@ -18,7 +18,7 @@ steps:\nargs: [\"make\"]\n# Test the HTML for issues.\n- name: 'gcr.io/gvisor-website/html-proofer:3.10.2'\n- args: [\"htmlproofer\", \"--disable-external\", \"public/static\"]\n+ args: [\"htmlproofer\", \"--disable-external\", \"--check-html\", \"public/static\"]\n# Deploy to App Engine only for master branch.\n- name: 'gcr.io/cloud-builders/gcloud'\nentrypoint: 'bash'\n" }, { "change_type": "MODIFY", "old_path": "content/_index.html", "new_path": "content/_index.html", "diff": "@@ -20,32 +20,32 @@ gVisor integrates with <a href=\"https://www.docker.com/\" target=\"_blank\" rel=\"no\n{{< blocks/section color=\"dark\" >}}\n-{{% blocks/feature icon=\"fas fa-lock\" title=\"Defense in Depth\" %}}\n+{{< blocks/feature icon=\"fas fa-lock\" title=\"Defense in Depth\" >}}\nEach sandbox has its own user-space kernel, providing additional protection from host kernel vulnerabilities.\n-{{% /blocks/feature %}}\n+{{< /blocks/feature >}}\n-{{% blocks/feature icon=\"fas fa-feather-alt\" title=\"Lightweight\" %}}\n+{{< blocks/feature icon=\"fas fa-feather-alt\" title=\"Lightweight\" >}}\nRuns as a normal process and uses the host kernel for memory management and scheduling.\n-{{% /blocks/feature %}}\n+{{< /blocks/feature >}}\n-{{% blocks/feature icon=\"fab fa-linux\" title=\"Zero Configuration\" %}}\n+{{< blocks/feature icon=\"fab fa-linux\" title=\"Zero Configuration\" >}}\nCapable of running most Linux applications unmodified, with zero configuration.\n-{{% /blocks/feature %}}\n+{{< /blocks/feature >}}\n{{< /blocks/section >}}\n{{< blocks/section color=\"white\" >}}\n-{{% blocks/feature icon=\"fas fa-book\" title=\"Read the Docs\" %}}\n+{{< blocks/feature icon=\"fas fa-book\" title=\"Read the Docs\" >}}\nRead the [documentation](./docs/) to understand gVisor, its architecture and trade-offs, and how to use it.\n-{{% /blocks/feature %}}\n+{{< /blocks/feature >}}\n-{{% blocks/feature icon=\"fas fa-code-branch\" title=\"Contribute to gVisor\" %}}\n+{{< blocks/feature icon=\"fas fa-code-branch\" title=\"Contribute to gVisor\" >}}\nAnyone is welcome to be a gVisor contributor. Please check out the [community information](./docs/community) to get started.\n-{{% /blocks/feature %}}\n+{{< /blocks/feature >}}\n-{{% blocks/feature icon=\"fab fa-github\" title=\"Give Feedback\" %}}\n+{{< blocks/feature icon=\"fab fa-github\" title=\"Give Feedback\" >}}\nFile feature requests, bugs, and compatibility issues on <a href=\"https://github.com/google/gvisor/issues\" target=\"_blank\" rel=\"noopener\">GitHub</a>.\n-{{% /blocks/feature %}}\n+{{< /blocks/feature >}}\n{{< /blocks/section >}}\n" }, { "change_type": "MODIFY", "old_path": "content/docs/user_guide/checkpoint_restore.md", "new_path": "content/docs/user_guide/checkpoint_restore.md", "diff": "@@ -83,25 +83,19 @@ docker start --checkpoint --checkpoint-dir=<directory> <container>\n### Issues Preventing Compatibility with Docker\n-#### [Moby #37360][leave-running]\n-\n-Docker version 18.03.0-ce and earlier hangs when checkpointing and does not\n-create the checkpoint. To successfully use this feature, install a custom\n-version of docker-ce from the moby repository. This issue is caused by an\n-improper implementation of the `--leave-running` flag. This issue is fixed in\n-newer releases.\n-\n-#### Docker does not support restoration into new containers.\n-\n-Docker currently expects the container which created the checkpoint to be the\n-same container used to restore which is not possible in runsc. When Docker\n-supports container migration and therefore restoration into new containers, this\n-will be the flow.\n-\n-#### [Moby #37344][checkpoint-dir]\n-\n-Docker does not currently support the `--checkpoint-dir` flag but this will be\n-required when restoring from a checkpoint made in another container.\n+- **[Moby #37360][leave-running]:** Docker version 18.03.0-ce and earlier hangs\n+ when checkpointing and does not create the checkpoint. To successfully use\n+ this feature, install a custom version of docker-ce from the moby repository.\n+ This issue is caused by an improper implementation of the `--leave-running`\n+ flag. This issue is fixed in newer releases.\n+- **Docker does not support restoration into new containers:** Docker currently\n+ expects the container which created the checkpoint to be the same container\n+ used to restore which is not possible in runsc. When Docker supports container\n+ migration and therefore restoration into new containers, this will be the\n+ flow.\n+- **[Moby #37344][checkpoint-dir]:** Docker does not currently support the\n+ `--checkpoint-dir` flag but this will be required when restoring from a\n+ checkpoint made in another container.\n[leave-running]: https://github.com/moby/moby/pull/37360\n[checkpoint-dir]: https://github.com/moby/moby/issues/37344\n" }, { "change_type": "MODIFY", "old_path": "layouts/partials/footer.html", "new_path": "layouts/partials/footer.html", "diff": "<ul class=\"list-inline mb-0\">\n{{ template \"footer-links-block\" . }}\n{{ end }}\n- <li class=\"list-inline-item mx-2\" >&nbsp</li>\n+ <li class=\"list-inline-item mx-2\" >&nbsp;</li>\n<li class=\"list-inline-item mx-2\" >\n<img src=\"/img/powered-gvisor.png\" alt=\"Powered by gVisor\">\n</li>\n" }, { "change_type": "MODIFY", "old_path": "layouts/shortcodes/blocks/cover.html", "new_path": "layouts/shortcodes/blocks/cover.html", "diff": "<div class=\"row\">\n<div class=\"cover-content col-12\">\n<nav class=\"navbar navbar-expand navbar-dark flex-column flex-md-row\">\n- <div class=\"td-navbar-nav-scroll ml-md-auto\" id=\"main_navbar\">\n+ <div class=\"td-navbar-nav-scroll ml-md-auto\" id=\"cover_navbar\">\n<ul class=\"navbar-nav mt-2 mt-lg-0\">\n{{ $p := . }}\n{{ range .Site.Menus.main }}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix some html errors (refs #46 #51) - Fix duplicate main_navbar id on top page. - Fix missing ';' in nbsp entity in footer. - Fix nested <a> tags in checkpoint/restore doc. - Fix nested <p> tags on top page feature blocks. - Add '--check-html' option to htmlproofer build step.
259,891
23.04.2019 11:32:34
25,200
df21460cfdf589299e98171407741e3c253debe4
Fix container_test flakes. Create, Start, and Destroy were racing to create and destroy the metadata directory of containers. This is a re-upload of but with the correct account.
[ { "change_type": "MODIFY", "old_path": "runsc/container/container.go", "new_path": "runsc/container/container.go", "diff": "@@ -99,7 +99,9 @@ type Container struct {\n// BundleDir is the directory containing the container bundle.\nBundleDir string `json:\"bundleDir\"`\n- // Root is the directory containing the container metadata file.\n+ // Root is the directory containing the container metadata file. If this\n+ // container is the root container, Root and RootContainerDir will be the\n+ // same.\nRoot string `json:\"root\"`\n// CreatedAt is the time the container was created.\n@@ -128,6 +130,12 @@ type Container struct {\n// Sandbox is the sandbox this container is running in. It's set when the\n// container is created and reset when the sandbox is destroyed.\nSandbox *sandbox.Sandbox `json:\"sandbox\"`\n+\n+ // RootContainerDir is the root directory containing the metadata file of the\n+ // sandbox root container. It's used to lock in order to serialize creating\n+ // and deleting this Container's metadata directory. If this container is the\n+ // root container, this is the same as Root.\n+ RootContainerDir string\n}\n// Load loads a container with the given id from a metadata file. id may be an\n@@ -243,6 +251,12 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\nreturn nil, err\n}\n+ unlockRoot, err := maybeLockRootContainer(spec, conf.RootDir)\n+ if err != nil {\n+ return nil, err\n+ }\n+ defer unlockRoot()\n+\n// Lock the container metadata file to prevent concurrent creations of\n// containers with the same id.\ncontainerRoot := filepath.Join(conf.RootDir, id)\n@@ -269,6 +283,7 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\nStatus: Creating,\nCreatedAt: time.Now(),\nOwner: os.Getenv(\"USER\"),\n+ RootContainerDir: conf.RootDir,\n}\n// The Cleanup object cleans up partially created containers when an error occurs.\n// Any errors occuring during cleanup itself are ignored.\n@@ -279,7 +294,7 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\n// started in an existing sandbox, we must do so. The metadata will\n// indicate the ID of the sandbox, which is the same as the ID of the\n// init container in the sandbox.\n- if specutils.ShouldCreateSandbox(spec) {\n+ if isRoot(spec) {\nlog.Debugf(\"Creating new sandbox for container %q\", id)\n// Create and join cgroup before processes are created to ensure they are\n@@ -354,6 +369,13 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\n// Start starts running the containerized process inside the sandbox.\nfunc (c *Container) Start(conf *boot.Config) error {\nlog.Debugf(\"Start container %q\", c.ID)\n+\n+ unlockRoot, err := maybeLockRootContainer(c.Spec, c.RootContainerDir)\n+ if err != nil {\n+ return err\n+ }\n+ defer unlockRoot()\n+\nunlock, err := c.lock()\nif err != nil {\nreturn err\n@@ -371,7 +393,7 @@ func (c *Container) Start(conf *boot.Config) error {\n}\n}\n- if specutils.ShouldCreateSandbox(c.Spec) {\n+ if isRoot(c.Spec) {\nif err := c.Sandbox.StartRoot(c.Spec, conf); err != nil {\nreturn err\n}\n@@ -418,6 +440,7 @@ func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile str\nreturn err\n}\ndefer unlock()\n+\nif err := c.requireStatus(\"restore\", Created); err != nil {\nreturn err\n}\n@@ -644,6 +667,12 @@ func (c *Container) Destroy() error {\n// of errors return their concatenation.\nvar errs []string\n+ unlock, err := maybeLockRootContainer(c.Spec, c.RootContainerDir)\n+ if err != nil {\n+ return err\n+ }\n+ defer unlock()\n+\nif err := c.stop(); err != nil {\nerr = fmt.Errorf(\"stopping container: %v\", err)\nlog.Warningf(\"%v\", err)\n@@ -960,6 +989,33 @@ func lockContainerMetadata(containerRootDir string) (func() error, error) {\nreturn l.Unlock, nil\n}\n+// maybeLockRootContainer locks the sandbox root container. It is used to\n+// prevent races to create and delete child container sandboxes.\n+func maybeLockRootContainer(spec *specs.Spec, rootDir string) (func() error, error) {\n+ if isRoot(spec) {\n+ return func() error { return nil }, nil\n+ }\n+\n+ sbid, ok := specutils.SandboxID(spec)\n+ if !ok {\n+ return nil, fmt.Errorf(\"no sandbox ID found when locking root container\")\n+ }\n+ sb, err := Load(rootDir, sbid)\n+ if err != nil {\n+ return nil, err\n+ }\n+\n+ unlock, err := sb.lock()\n+ if err != nil {\n+ return nil, err\n+ }\n+ return unlock, nil\n+}\n+\n+func isRoot(spec *specs.Spec) bool {\n+ return specutils.ShouldCreateSandbox(spec)\n+}\n+\n// runInCgroup executes fn inside the specified cgroup. If cg is nil, execute\n// it in the current context.\nfunc runInCgroup(cg *cgroup.Cgroup, fn func() error) error {\n" } ]
Go
Apache License 2.0
google/gvisor
Fix container_test flakes. Create, Start, and Destroy were racing to create and destroy the metadata directory of containers. This is a re-upload of https://gvisor-review.googlesource.com/c/gvisor/+/16260, but with the correct account. Change-Id: I16b7a9d0971f0df873e7f4145e6ac8f72730a4f1 PiperOrigin-RevId: 244892991
259,992
23.04.2019 16:10:05
25,200
908edee04f92055a8c53a63d1b8d57ffe56aa682
Replace os.File with fd.FD in fsgofer os.NewFile() accounts for 38% of CPU time in localFile.Walk(). This change switchs to use fd.FD which is much cheaper to create. Now, fd.New() in localFile.Walk() accounts for only 4%.
[ { "change_type": "MODIFY", "old_path": "pkg/fd/fd.go", "new_path": "pkg/fd/fd.go", "diff": "@@ -167,6 +167,24 @@ func NewFromFile(file *os.File) (*FD, error) {\nreturn New(fd), nil\n}\n+// Open is equivallent to open(2).\n+func Open(path string, openmode int, perm uint32) (*FD, error) {\n+ f, err := syscall.Open(path, openmode|syscall.O_LARGEFILE, perm)\n+ if err != nil {\n+ return nil, err\n+ }\n+ return New(f), nil\n+}\n+\n+// OpenAt is equivallent to openat(2).\n+func OpenAt(dir *FD, path string, flags int, mode uint32) (*FD, error) {\n+ f, err := syscall.Openat(dir.FD(), path, flags, mode)\n+ if err != nil {\n+ return nil, err\n+ }\n+ return New(f), nil\n+}\n+\n// Close closes the file descriptor contained in the FD.\n//\n// Close is safe to call multiple times, but will return an error after the\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer.go", "new_path": "runsc/fsgofer/fsgofer.go", "diff": "@@ -27,6 +27,7 @@ import (\n\"os\"\n\"path\"\n\"path/filepath\"\n+ \"runtime\"\n\"sync\"\n\"syscall\"\n@@ -122,13 +123,13 @@ func (a *attachPoint) Attach() (p9.File, error) {\nif err != nil {\nreturn nil, fmt.Errorf(\"stat file %q, err: %v\", a.prefix, err)\n}\n- mode := os.O_RDWR\n+ mode := syscall.O_RDWR\nif a.conf.ROMount || stat.Mode&syscall.S_IFDIR != 0 {\n- mode = os.O_RDONLY\n+ mode = syscall.O_RDONLY\n}\n// Open the root directory.\n- f, err := os.OpenFile(a.prefix, mode|openFlags, 0)\n+ f, err := fd.Open(a.prefix, openFlags|mode, 0)\nif err != nil {\nreturn nil, fmt.Errorf(\"unable to open file %q, err: %v\", a.prefix, err)\n}\n@@ -201,8 +202,9 @@ type localFile struct {\nhostPath string\n// file is opened when localFile is created and it's never nil. It may be\n- // reopened...\n- file *os.File\n+ // reopened if the Open() mode is wider than the mode the file was originally\n+ // opened with.\n+ file *fd.FD\n// mode is the mode in which the file was opened. Set to invalidMode\n// if localFile isn't opened.\n@@ -215,14 +217,10 @@ type localFile struct {\nreadDirMu sync.Mutex\n}\n-func openAnyFileFromParent(parent *localFile, name string) (*os.File, string, error) {\n+func openAnyFileFromParent(parent *localFile, name string) (*fd.FD, string, error) {\npath := path.Join(parent.hostPath, name)\n- f, err := openAnyFile(path, func(mode int) (*os.File, error) {\n- fd, err := syscall.Openat(parent.fd(), name, openFlags|mode, 0)\n- if err != nil {\n- return nil, err\n- }\n- return os.NewFile(uintptr(fd), path), nil\n+ f, err := openAnyFile(path, func(mode int) (*fd.FD, error) {\n+ return fd.OpenAt(parent.file, name, openFlags|mode, 0)\n})\nreturn f, path, err\n}\n@@ -230,7 +228,7 @@ func openAnyFileFromParent(parent *localFile, name string) (*os.File, string, er\n// openAnyFile attempts to open the file in O_RDONLY and if it fails fallsback\n// to O_PATH. 'path' is used for logging messages only. 'fn' is what does the\n// actual file open and is customizable by the caller.\n-func openAnyFile(path string, fn func(mode int) (*os.File, error)) (*os.File, error) {\n+func openAnyFile(path string, fn func(mode int) (*fd.FD, error)) (*fd.FD, error) {\n// Attempt to open file in the following mode in order:\n// 1. RDONLY | NONBLOCK: for all files, works for directories and ro mounts too.\n// Use non-blocking to prevent getting stuck inside open(2) for FIFOs. This option\n@@ -239,7 +237,7 @@ func openAnyFile(path string, fn func(mode int) (*os.File, error)) (*os.File, er\nmodes := []int{syscall.O_RDONLY | syscall.O_NONBLOCK, unix.O_PATH}\nvar err error\n- var file *os.File\n+ var file *fd.FD\nfor i, mode := range modes {\nfile, err = fn(mode)\nif err == nil {\n@@ -279,7 +277,7 @@ func getSupportedFileType(stat syscall.Stat_t) (fileType, error) {\nreturn ft, nil\n}\n-func newLocalFile(a *attachPoint, file *os.File, path string, stat syscall.Stat_t) (*localFile, error) {\n+func newLocalFile(a *attachPoint, file *fd.FD, path string, stat syscall.Stat_t) (*localFile, error) {\nft, err := getSupportedFileType(stat)\nif err != nil {\nreturn nil, err\n@@ -297,18 +295,22 @@ func newLocalFile(a *attachPoint, file *os.File, path string, stat syscall.Stat_\n// newFDMaybe creates a fd.FD from a file, dup'ing the FD and setting it as\n// non-blocking. If anything fails, returns nil. It's better to have a file\n// without host FD, than to fail the operation.\n-func newFDMaybe(file *os.File) *fd.FD {\n- fd, err := fd.NewFromFile(file)\n+func newFDMaybe(file *fd.FD) *fd.FD {\n+ dupFD, err := syscall.Dup(file.FD())\n+ // Technically, the runtime may call the finalizer on file as soon as\n+ // FD() returns.\n+ runtime.KeepAlive(file)\nif err != nil {\nreturn nil\n}\n+ dup := fd.New(dupFD)\n// fd is blocking; non-blocking is required.\n- if err := syscall.SetNonblock(fd.FD(), true); err != nil {\n- fd.Close()\n+ if err := syscall.SetNonblock(dup.FD(), true); err != nil {\n+ dup.Close()\nreturn nil\n}\n- return fd\n+ return dup\n}\nfunc stat(fd int) (syscall.Stat_t, error) {\n@@ -323,35 +325,30 @@ func fchown(fd int, uid p9.UID, gid p9.GID) error {\nreturn syscall.Fchownat(fd, \"\", int(uid), int(gid), linux.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW)\n}\n-func (l *localFile) fd() int {\n- return int(l.file.Fd())\n-}\n-\n// Open implements p9.File.\nfunc (l *localFile) Open(mode p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) {\nif l.isOpen() {\n- panic(fmt.Sprintf(\"attempting to open already opened file: %q\", l.file.Name()))\n+ panic(fmt.Sprintf(\"attempting to open already opened file: %q\", l.hostPath))\n}\n// Check if control file can be used or if a new open must be created.\n- var newFile *os.File\n+ var newFile *fd.FD\nif mode == p9.ReadOnly {\n- log.Debugf(\"Open reusing control file, mode: %v, %q\", mode, l.file.Name())\n+ log.Debugf(\"Open reusing control file, mode: %v, %q\", mode, l.hostPath)\nnewFile = l.file\n} else {\n// Ideally reopen would call name_to_handle_at (with empty name) and\n// open_by_handle_at to reopen the file without using 'hostPath'. However,\n// name_to_handle_at and open_by_handle_at aren't supported by overlay2.\n- log.Debugf(\"Open reopening file, mode: %v, %q\", mode, l.file.Name())\n+ log.Debugf(\"Open reopening file, mode: %v, %q\", mode, l.hostPath)\nvar err error\n-\n- newFile, err = os.OpenFile(l.hostPath, openFlags|mode.OSFlags(), 0)\n+ newFile, err = fd.Open(l.hostPath, openFlags|mode.OSFlags(), 0)\nif err != nil {\nreturn nil, p9.QID{}, 0, extractErrno(err)\n}\n}\n- stat, err := stat(int(newFile.Fd()))\n+ stat, err := stat(newFile.FD())\nif err != nil {\nif newFile != l.file {\nnewFile.Close()\n@@ -368,7 +365,7 @@ func (l *localFile) Open(mode p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) {\n// Close old file in case a new one was created.\nif newFile != l.file {\nif err := l.file.Close(); err != nil {\n- log.Warningf(\"Error closing file %q: %v\", l.file.Name(), err)\n+ log.Warningf(\"Error closing file %q: %v\", l.hostPath, err)\n}\nl.file = newFile\n}\n@@ -396,33 +393,31 @@ func (l *localFile) Create(name string, mode p9.OpenFlags, perm p9.FileMode, uid\nflags |= mode.OSFlags()\n}\n- fd, err := syscall.Openat(l.fd(), name, flags, uint32(perm.Permissions()))\n+ child, err := fd.OpenAt(l.file, name, flags, uint32(perm.Permissions()))\nif err != nil {\nreturn nil, nil, p9.QID{}, 0, extractErrno(err)\n}\ncu := specutils.MakeCleanup(func() {\n- syscall.Close(fd)\n+ child.Close()\n// Best effort attempt to remove the file in case of failure.\n- if err := syscall.Unlinkat(l.fd(), name); err != nil {\n+ if err := syscall.Unlinkat(l.file.FD(), name); err != nil {\nlog.Warningf(\"error unlinking file %q after failure: %v\", path.Join(l.hostPath, name), err)\n}\n})\ndefer cu.Clean()\n- if err := fchown(fd, uid, gid); err != nil {\n+ if err := fchown(child.FD(), uid, gid); err != nil {\nreturn nil, nil, p9.QID{}, 0, extractErrno(err)\n}\n- stat, err := stat(fd)\n+ stat, err := stat(child.FD())\nif err != nil {\nreturn nil, nil, p9.QID{}, 0, extractErrno(err)\n}\n- cPath := path.Join(l.hostPath, name)\n- f := os.NewFile(uintptr(fd), cPath)\nc := &localFile{\nattachPoint: l.attachPoint,\n- hostPath: cPath,\n- file: f,\n+ hostPath: path.Join(l.hostPath, name),\n+ file: child,\nmode: mode,\n}\n@@ -440,12 +435,12 @@ func (l *localFile) Mkdir(name string, perm p9.FileMode, uid p9.UID, gid p9.GID)\nreturn p9.QID{}, syscall.EBADF\n}\n- if err := syscall.Mkdirat(l.fd(), name, uint32(perm.Permissions())); err != nil {\n+ if err := syscall.Mkdirat(l.file.FD(), name, uint32(perm.Permissions())); err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\ncu := specutils.MakeCleanup(func() {\n// Best effort attempt to remove the dir in case of failure.\n- if err := unix.Unlinkat(l.fd(), name, unix.AT_REMOVEDIR); err != nil {\n+ if err := unix.Unlinkat(l.file.FD(), name, unix.AT_REMOVEDIR); err != nil {\nlog.Warningf(\"error unlinking dir %q after failure: %v\", path.Join(l.hostPath, name), err)\n}\n})\n@@ -453,16 +448,16 @@ func (l *localFile) Mkdir(name string, perm p9.FileMode, uid p9.UID, gid p9.GID)\n// Open directory to change ownership and stat it.\nflags := syscall.O_DIRECTORY | syscall.O_RDONLY | openFlags\n- fd, err := syscall.Openat(l.fd(), name, flags, 0)\n+ f, err := fd.OpenAt(l.file, name, flags, 0)\nif err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\n- defer syscall.Close(fd)\n+ defer f.Close()\n- if err := fchown(fd, uid, gid); err != nil {\n+ if err := fchown(f.FD(), uid, gid); err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\n- stat, err := stat(fd)\n+ stat, err := stat(f.FD())\nif err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\n@@ -475,25 +470,25 @@ func (l *localFile) Mkdir(name string, perm p9.FileMode, uid p9.UID, gid p9.GID)\nfunc (l *localFile) Walk(names []string) ([]p9.QID, p9.File, error) {\n// Duplicate current file if 'names' is empty.\nif len(names) == 0 {\n- var newFile *os.File\n+ var newFile *fd.FD\nif l.isOpen() {\n// File mode may have changed when it was opened, so open a new one.\nvar err error\n- newFile, err = openAnyFile(l.hostPath, func(mode int) (*os.File, error) {\n- return os.OpenFile(l.hostPath, openFlags|mode, 0)\n+ newFile, err = openAnyFile(l.hostPath, func(mode int) (*fd.FD, error) {\n+ return fd.Open(l.hostPath, openFlags|mode, 0)\n})\nif err != nil {\nreturn nil, nil, extractErrno(err)\n}\n} else {\n- newFd, err := syscall.Dup(l.fd())\n+ newFd, err := syscall.Dup(l.file.FD())\nif err != nil {\nreturn nil, nil, extractErrno(err)\n}\n- newFile = os.NewFile(uintptr(newFd), l.hostPath)\n+ newFile = fd.New(newFd)\n}\n- stat, err := stat(int(newFile.Fd()))\n+ stat, err := stat(int(newFile.FD()))\nif err != nil {\nnewFile.Close()\nreturn nil, nil, extractErrno(err)\n@@ -515,7 +510,7 @@ func (l *localFile) Walk(names []string) ([]p9.QID, p9.File, error) {\nif err != nil {\nreturn nil, nil, extractErrno(err)\n}\n- stat, err := stat(int(f.Fd()))\n+ stat, err := stat(f.FD())\nif err != nil {\nf.Close()\nreturn nil, nil, extractErrno(err)\n@@ -535,7 +530,7 @@ func (l *localFile) Walk(names []string) ([]p9.QID, p9.File, error) {\n// StatFS implements p9.File.\nfunc (l *localFile) StatFS() (p9.FSStat, error) {\nvar s syscall.Statfs_t\n- if err := syscall.Fstatfs(l.fd(), &s); err != nil {\n+ if err := syscall.Fstatfs(l.file.FD(), &s); err != nil {\nreturn p9.FSStat{}, extractErrno(err)\n}\n@@ -557,7 +552,7 @@ func (l *localFile) FSync() error {\nif !l.isOpen() {\nreturn syscall.EBADF\n}\n- if err := l.file.Sync(); err != nil {\n+ if err := syscall.Fsync(l.file.FD()); err != nil {\nreturn extractErrno(err)\n}\nreturn nil\n@@ -565,7 +560,7 @@ func (l *localFile) FSync() error {\n// GetAttr implements p9.File.\nfunc (l *localFile) GetAttr(_ p9.AttrMask) (p9.QID, p9.AttrMask, p9.Attr, error) {\n- stat, err := stat(l.fd())\n+ stat, err := stat(l.file.FD())\nif err != nil {\nreturn p9.QID{}, p9.AttrMask{}, p9.Attr{}, extractErrno(err)\n}\n@@ -633,20 +628,20 @@ func (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\n// Handle all the sanity checks up front so that the client gets a\n// consistent result that is not attribute dependent.\nif !valid.IsSubsetOf(allowed) {\n- log.Warningf(\"SetAttr() failed for %q, mask: %v\", l.file.Name(), valid)\n+ log.Warningf(\"SetAttr() failed for %q, mask: %v\", l.hostPath, valid)\nreturn syscall.EPERM\n}\n// Check if it's possible to use cached file, or if another one needs to be\n// opened for write.\n- fd := l.fd()\n+ f := l.file\nif l.ft == regular && l.mode != p9.WriteOnly && l.mode != p9.ReadWrite {\n- f, err := os.OpenFile(l.hostPath, openFlags|os.O_WRONLY, 0)\n+ var err error\n+ f, err = fd.Open(l.hostPath, openFlags|syscall.O_WRONLY, 0)\nif err != nil {\nreturn extractErrno(err)\n}\ndefer f.Close()\n- fd = int(f.Fd())\n}\n// The semantics are to either return an error if no changes were made,\n@@ -661,14 +656,14 @@ func (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\n// over another.\nvar err error\nif valid.Permissions {\n- if cerr := syscall.Fchmod(fd, uint32(attr.Permissions)); cerr != nil {\n+ if cerr := syscall.Fchmod(f.FD(), uint32(attr.Permissions)); cerr != nil {\nlog.Debugf(\"SetAttr fchmod failed %q, err: %v\", l.hostPath, cerr)\nerr = extractErrno(cerr)\n}\n}\nif valid.Size {\n- if terr := syscall.Ftruncate(fd, int64(attr.Size)); terr != nil {\n+ if terr := syscall.Ftruncate(f.FD(), int64(attr.Size)); terr != nil {\nlog.Debugf(\"SetAttr ftruncate failed %q, err: %v\", l.hostPath, terr)\nerr = extractErrno(terr)\n}\n@@ -700,20 +695,20 @@ func (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\n// utimensat operates different that other syscalls. To operate on a\n// symlink it *requires* AT_SYMLINK_NOFOLLOW with dirFD and a non-empty\n// name.\n- f, err := os.OpenFile(path.Dir(l.hostPath), openFlags|unix.O_PATH, 0)\n+ parent, err := syscall.Open(path.Dir(l.hostPath), openFlags|unix.O_PATH, 0)\nif err != nil {\nreturn extractErrno(err)\n}\n- defer f.Close()\n+ defer syscall.Close(parent)\n- if terr := utimensat(int(f.Fd()), path.Base(l.hostPath), utimes, linux.AT_SYMLINK_NOFOLLOW); terr != nil {\n+ if terr := utimensat(parent, path.Base(l.hostPath), utimes, linux.AT_SYMLINK_NOFOLLOW); terr != nil {\nlog.Debugf(\"SetAttr utimens failed %q, err: %v\", l.hostPath, terr)\nerr = extractErrno(terr)\n}\n} else {\n// Directories and regular files can operate directly on the fd\n// using empty name.\n- if terr := utimensat(fd, \"\", utimes, 0); terr != nil {\n+ if terr := utimensat(f.FD(), \"\", utimes, 0); terr != nil {\nlog.Debugf(\"SetAttr utimens failed %q, err: %v\", l.hostPath, terr)\nerr = extractErrno(terr)\n}\n@@ -729,7 +724,7 @@ func (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\nif valid.GID {\ngid = int(attr.GID)\n}\n- if oerr := syscall.Fchownat(fd, \"\", uid, gid, linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW); oerr != nil {\n+ if oerr := syscall.Fchownat(f.FD(), \"\", uid, gid, linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW); oerr != nil {\nlog.Debugf(\"SetAttr fchownat failed %q, err: %v\", l.hostPath, oerr)\nerr = extractErrno(oerr)\n}\n@@ -754,7 +749,7 @@ func (l *localFile) RenameAt(oldName string, directory p9.File, newName string)\n}\nnewParent := directory.(*localFile)\n- if err := renameat(l.fd(), oldName, newParent.fd(), newName); err != nil {\n+ if err := renameat(l.file.FD(), oldName, newParent.file.FD(), newName); err != nil {\nreturn extractErrno(err)\n}\nreturn nil\n@@ -804,28 +799,28 @@ func (l *localFile) Symlink(target, newName string, uid p9.UID, gid p9.GID) (p9.\nreturn p9.QID{}, syscall.EBADF\n}\n- if err := unix.Symlinkat(target, l.fd(), newName); err != nil {\n+ if err := unix.Symlinkat(target, l.file.FD(), newName); err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\ncu := specutils.MakeCleanup(func() {\n// Best effort attempt to remove the symlink in case of failure.\n- if err := syscall.Unlinkat(l.fd(), newName); err != nil {\n+ if err := syscall.Unlinkat(l.file.FD(), newName); err != nil {\nlog.Warningf(\"error unlinking file %q after failure: %v\", path.Join(l.hostPath, newName), err)\n}\n})\ndefer cu.Clean()\n// Open symlink to change ownership and stat it.\n- fd, err := syscall.Openat(l.fd(), newName, unix.O_PATH|openFlags, 0)\n+ f, err := fd.OpenAt(l.file, newName, unix.O_PATH|openFlags, 0)\nif err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\n- defer syscall.Close(fd)\n+ defer f.Close()\n- if err := fchown(fd, uid, gid); err != nil {\n+ if err := fchown(f.FD(), uid, gid); err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\n- stat, err := stat(fd)\n+ stat, err := stat(f.FD())\nif err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\n@@ -845,7 +840,7 @@ func (l *localFile) Link(target p9.File, newName string) error {\n}\ntargetFile := target.(*localFile)\n- if err := unix.Linkat(targetFile.fd(), \"\", l.fd(), newName, linux.AT_EMPTY_PATH); err != nil {\n+ if err := unix.Linkat(targetFile.file.FD(), \"\", l.file.FD(), newName, linux.AT_EMPTY_PATH); err != nil {\nreturn extractErrno(err)\n}\nreturn nil\n@@ -868,7 +863,7 @@ func (l *localFile) UnlinkAt(name string, flags uint32) error {\nreturn syscall.EBADF\n}\n- if err := unix.Unlinkat(l.fd(), name, int(flags)); err != nil {\n+ if err := unix.Unlinkat(l.file.FD(), name, int(flags)); err != nil {\nreturn extractErrno(err)\n}\nreturn nil\n@@ -887,31 +882,68 @@ func (l *localFile) Readdir(offset uint64, count uint32) ([]p9.Dirent, error) {\n// reading all directory contents. Take a lock because this operation is\n// stateful.\nl.readDirMu.Lock()\n- if _, err := l.file.Seek(0, 0); err != nil {\n- l.readDirMu.Unlock()\n+ defer l.readDirMu.Unlock()\n+\n+ if _, err := syscall.Seek(l.file.FD(), 0, 0); err != nil {\nreturn nil, extractErrno(err)\n}\n- names, err := l.file.Readdirnames(-1)\n+\n+ return l.readDirent(l.file.FD(), offset, count)\n+}\n+\n+func (l *localFile) readDirent(f int, offset uint64, count uint32) ([]p9.Dirent, error) {\n+ // Limit 'count' to cap the slice size that is returned.\n+ const maxCount = 100000\n+ if count > maxCount {\n+ count = maxCount\n+ }\n+\n+ dirents := make([]p9.Dirent, 0, count)\n+\n+ // Pre-allocate buffers that will be reused to get partial results.\n+ direntsBuf := make([]byte, 8192)\n+ names := make([]string, 0, 100)\n+\n+ skip := offset // Tracks the number of entries to skip.\n+ end := offset + uint64(count)\n+ for offset < end {\n+ dirSize, err := syscall.ReadDirent(f, direntsBuf)\nif err != nil {\n- l.readDirMu.Unlock()\n- return nil, extractErrno(err)\n+ return dirents, err\n+ }\n+ if dirSize <= 0 {\n+ return dirents, nil // EOF\n}\n- l.readDirMu.Unlock()\n- var dirents []p9.Dirent\n- for i := int(offset); i >= 0 && i < len(names); i++ {\n- stat, err := statAt(l.fd(), names[i])\n+ names := names[:0]\n+ _, _, names = syscall.ParseDirent(direntsBuf[:dirSize], -1, names)\n+\n+ // Skip over entries that the caller is not interested in.\n+ if skip > 0 {\n+ if skip > uint64(len(names)) {\n+ skip -= uint64(len(names))\n+ names = names[:0]\n+ } else {\n+ names = names[skip:]\n+ skip = 0\n+ }\n+ }\n+ for _, name := range names {\n+ stat, err := statAt(l.file.FD(), name)\nif err != nil {\n+ log.Warningf(\"Readdir is skipping file with failed stat %q, err: %v\", l.hostPath, err)\ncontinue\n}\nqid := l.attachPoint.makeQID(stat)\n+ offset++\ndirents = append(dirents, p9.Dirent{\nQID: qid,\nType: qid.Type,\n- Name: names[i],\n- Offset: uint64(i + 1),\n+ Name: name,\n+ Offset: offset,\n})\n}\n+ }\nreturn dirents, nil\n}\n@@ -921,7 +953,7 @@ func (l *localFile) Readlink() (string, error) {\nconst limit = 1024 * 1024\nfor len := 128; len < limit; len *= 2 {\nb := make([]byte, len)\n- n, err := unix.Readlinkat(l.fd(), \"\", b)\n+ n, err := unix.Readlinkat(l.file.FD(), \"\", b)\nif err != nil {\nreturn \"\", extractErrno(err)\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/sandbox/network.go", "new_path": "runsc/sandbox/network.go", "diff": "@@ -257,6 +257,8 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, enableGSO\nreturn fmt.Errorf(\"unable to enable the PACKET_VNET_HDR option: %v\", err)\n}\nlink.GSOMaxSize = ifaceLink.Attrs().GSOMaxSize\n+ } else {\n+ log.Infof(\"GSO not available in host.\")\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Replace os.File with fd.FD in fsgofer os.NewFile() accounts for 38% of CPU time in localFile.Walk(). This change switchs to use fd.FD which is much cheaper to create. Now, fd.New() in localFile.Walk() accounts for only 4%. PiperOrigin-RevId: 244944983 Change-Id: Ic892df96cf2633e78ad379227a213cb93ee0ca46
259,992
23.04.2019 16:24:51
25,200
db334f7154a1d4beedd8bd418a88c3f043ebe173
Remove reflection from 9P serving path p9.messageByType was taking 7% of p9.recv before, spending time with reflection and map lookup. Now it's reduced to 1%.
[ { "change_type": "MODIFY", "old_path": "pkg/p9/messages.go", "new_path": "pkg/p9/messages.go", "diff": "@@ -16,7 +16,7 @@ package p9\nimport (\n\"fmt\"\n- \"reflect\"\n+ \"math\"\n\"gvisor.googlesource.com/gvisor/pkg/fd\"\n)\n@@ -2140,7 +2140,7 @@ func (r *Rlconnect) String() string {\n}\n// messageRegistry indexes all messages by type.\n-var messageRegistry = make(map[MsgType]func() message)\n+var messageRegistry = make([]func() message, math.MaxUint8)\n// messageByType creates a new message by type.\n//\n@@ -2149,8 +2149,8 @@ var messageRegistry = make(map[MsgType]func() message)\n// This takes, and ignores, a message tag so that it may be used directly as a\n// lookupTagAndType function for recv (by design).\nfunc messageByType(_ Tag, t MsgType) (message, error) {\n- fn, ok := messageRegistry[t]\n- if !ok {\n+ fn := messageRegistry[t]\n+ if fn == nil {\nreturn nil, &ErrInvalidMsgType{t}\n}\nreturn fn(), nil\n@@ -2158,18 +2158,15 @@ func messageByType(_ Tag, t MsgType) (message, error) {\n// register registers the given message type.\n//\n-// This uses reflection and records only the type. This may cause panic on\n-// failure and should only be used from init.\n-func register(m message) {\n- t := m.Type()\n- if fn, ok := messageRegistry[t]; ok {\n- panic(fmt.Sprintf(\"duplicate message type %d: first is %#v, second is %#v\", t, fn(), m))\n+// This may cause panic on failure and should only be used from init.\n+func register(t MsgType, fn func() message) {\n+ if int(t) >= len(messageRegistry) {\n+ panic(fmt.Sprintf(\"message type %d is too large. It must be smaller than %d\", t, len(messageRegistry)))\n}\n-\n- to := reflect.ValueOf(m).Elem().Type()\n- messageRegistry[t] = func() message {\n- return reflect.New(to).Interface().(message)\n+ if messageRegistry[t] != nil {\n+ panic(fmt.Sprintf(\"duplicate message type %d: first is %T, second is %T\", t, messageRegistry[t](), fn()))\n}\n+ messageRegistry[t] = fn\n}\nfunc calculateSize(m message) uint32 {\n@@ -2192,80 +2189,82 @@ var largestFixedSize uint32\n// calculateLargestFixedSize is called from within init.\nfunc calculateLargestFixedSize() {\nfor _, fn := range messageRegistry {\n+ if fn != nil {\nif size := calculateSize(fn()); size > largestFixedSize {\nlargestFixedSize = size\n}\n}\n}\n+}\nfunc init() {\n- register(&Rlerror{})\n- register(&Tstatfs{})\n- register(&Rstatfs{})\n- register(&Tlopen{})\n- register(&Rlopen{})\n- register(&Tlcreate{})\n- register(&Rlcreate{})\n- register(&Tsymlink{})\n- register(&Rsymlink{})\n- register(&Tmknod{})\n- register(&Rmknod{})\n- register(&Trename{})\n- register(&Rrename{})\n- register(&Treadlink{})\n- register(&Rreadlink{})\n- register(&Tgetattr{})\n- register(&Rgetattr{})\n- register(&Tsetattr{})\n- register(&Rsetattr{})\n- register(&Txattrwalk{})\n- register(&Rxattrwalk{})\n- register(&Txattrcreate{})\n- register(&Rxattrcreate{})\n- register(&Treaddir{})\n- register(&Rreaddir{})\n- register(&Tfsync{})\n- register(&Rfsync{})\n- register(&Tlink{})\n- register(&Rlink{})\n- register(&Tmkdir{})\n- register(&Rmkdir{})\n- register(&Trenameat{})\n- register(&Rrenameat{})\n- register(&Tunlinkat{})\n- register(&Runlinkat{})\n- register(&Tversion{})\n- register(&Rversion{})\n- register(&Tauth{})\n- register(&Rauth{})\n- register(&Tattach{})\n- register(&Rattach{})\n- register(&Tflush{})\n- register(&Rflush{})\n- register(&Twalk{})\n- register(&Rwalk{})\n- register(&Tread{})\n- register(&Rread{})\n- register(&Twrite{})\n- register(&Rwrite{})\n- register(&Tclunk{})\n- register(&Rclunk{})\n- register(&Tremove{})\n- register(&Rremove{})\n- register(&Tflushf{})\n- register(&Rflushf{})\n- register(&Twalkgetattr{})\n- register(&Rwalkgetattr{})\n- register(&Tucreate{})\n- register(&Rucreate{})\n- register(&Tumkdir{})\n- register(&Rumkdir{})\n- register(&Tumknod{})\n- register(&Rumknod{})\n- register(&Tusymlink{})\n- register(&Rusymlink{})\n- register(&Tlconnect{})\n- register(&Rlconnect{})\n+ register(MsgRlerror, func() message { return &Rlerror{} })\n+ register(MsgTstatfs, func() message { return &Tstatfs{} })\n+ register(MsgRstatfs, func() message { return &Rstatfs{} })\n+ register(MsgTlopen, func() message { return &Tlopen{} })\n+ register(MsgRlopen, func() message { return &Rlopen{} })\n+ register(MsgTlcreate, func() message { return &Tlcreate{} })\n+ register(MsgRlcreate, func() message { return &Rlcreate{} })\n+ register(MsgTsymlink, func() message { return &Tsymlink{} })\n+ register(MsgRsymlink, func() message { return &Rsymlink{} })\n+ register(MsgTmknod, func() message { return &Tmknod{} })\n+ register(MsgRmknod, func() message { return &Rmknod{} })\n+ register(MsgTrename, func() message { return &Trename{} })\n+ register(MsgRrename, func() message { return &Rrename{} })\n+ register(MsgTreadlink, func() message { return &Treadlink{} })\n+ register(MsgRreadlink, func() message { return &Rreadlink{} })\n+ register(MsgTgetattr, func() message { return &Tgetattr{} })\n+ register(MsgRgetattr, func() message { return &Rgetattr{} })\n+ register(MsgTsetattr, func() message { return &Tsetattr{} })\n+ register(MsgRsetattr, func() message { return &Rsetattr{} })\n+ register(MsgTxattrwalk, func() message { return &Txattrwalk{} })\n+ register(MsgRxattrwalk, func() message { return &Rxattrwalk{} })\n+ register(MsgTxattrcreate, func() message { return &Txattrcreate{} })\n+ register(MsgRxattrcreate, func() message { return &Rxattrcreate{} })\n+ register(MsgTreaddir, func() message { return &Treaddir{} })\n+ register(MsgRreaddir, func() message { return &Rreaddir{} })\n+ register(MsgTfsync, func() message { return &Tfsync{} })\n+ register(MsgRfsync, func() message { return &Rfsync{} })\n+ register(MsgTlink, func() message { return &Tlink{} })\n+ register(MsgRlink, func() message { return &Rlink{} })\n+ register(MsgTmkdir, func() message { return &Tmkdir{} })\n+ register(MsgRmkdir, func() message { return &Rmkdir{} })\n+ register(MsgTrenameat, func() message { return &Trenameat{} })\n+ register(MsgRrenameat, func() message { return &Rrenameat{} })\n+ register(MsgTunlinkat, func() message { return &Tunlinkat{} })\n+ register(MsgRunlinkat, func() message { return &Runlinkat{} })\n+ register(MsgTversion, func() message { return &Tversion{} })\n+ register(MsgRversion, func() message { return &Rversion{} })\n+ register(MsgTauth, func() message { return &Tauth{} })\n+ register(MsgRauth, func() message { return &Rauth{} })\n+ register(MsgTattach, func() message { return &Tattach{} })\n+ register(MsgRattach, func() message { return &Rattach{} })\n+ register(MsgTflush, func() message { return &Tflush{} })\n+ register(MsgRflush, func() message { return &Rflush{} })\n+ register(MsgTwalk, func() message { return &Twalk{} })\n+ register(MsgRwalk, func() message { return &Rwalk{} })\n+ register(MsgTread, func() message { return &Tread{} })\n+ register(MsgRread, func() message { return &Rread{} })\n+ register(MsgTwrite, func() message { return &Twrite{} })\n+ register(MsgRwrite, func() message { return &Rwrite{} })\n+ register(MsgTclunk, func() message { return &Tclunk{} })\n+ register(MsgRclunk, func() message { return &Rclunk{} })\n+ register(MsgTremove, func() message { return &Tremove{} })\n+ register(MsgRremove, func() message { return &Rremove{} })\n+ register(MsgTflushf, func() message { return &Tflushf{} })\n+ register(MsgRflushf, func() message { return &Rflushf{} })\n+ register(MsgTwalkgetattr, func() message { return &Twalkgetattr{} })\n+ register(MsgRwalkgetattr, func() message { return &Rwalkgetattr{} })\n+ register(MsgTucreate, func() message { return &Tucreate{} })\n+ register(MsgRucreate, func() message { return &Rucreate{} })\n+ register(MsgTumkdir, func() message { return &Tumkdir{} })\n+ register(MsgRumkdir, func() message { return &Rumkdir{} })\n+ register(MsgTumknod, func() message { return &Tumknod{} })\n+ register(MsgRumknod, func() message { return &Rumknod{} })\n+ register(MsgTusymlink, func() message { return &Tusymlink{} })\n+ register(MsgRusymlink, func() message { return &Rusymlink{} })\n+ register(MsgTlconnect, func() message { return &Tlconnect{} })\n+ register(MsgRlconnect, func() message { return &Rlconnect{} })\ncalculateLargestFixedSize()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/messages_test.go", "new_path": "pkg/p9/messages_test.go", "diff": "@@ -400,6 +400,7 @@ func TestEncodeDecode(t *testing.T) {\nfunc TestMessageStrings(t *testing.T) {\nfor typ, fn := range messageRegistry {\n+ if fn != nil {\nname := fmt.Sprintf(\"%+v\", typ)\nt.Run(name, func(t *testing.T) {\ndefer func() { // Ensure no panic.\n@@ -409,11 +410,12 @@ func TestMessageStrings(t *testing.T) {\n}()\nm := fn()\n_ = fmt.Sprintf(\"%v\", m)\n- err := ErrInvalidMsgType{typ}\n+ err := ErrInvalidMsgType{MsgType(typ)}\n_ = err.Error()\n})\n}\n}\n+}\nfunc TestRegisterDuplicate(t *testing.T) {\ndefer func() {\n@@ -424,5 +426,5 @@ func TestRegisterDuplicate(t *testing.T) {\n}()\n// Register a duplicate.\n- register(&Rlerror{})\n+ register(MsgRlerror, func() message { return &Rlerror{} })\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/transport_test.go", "new_path": "pkg/p9/transport_test.go", "diff": "@@ -180,5 +180,5 @@ func TestSendClosed(t *testing.T) {\n}\nfunc init() {\n- register(&badDecode{})\n+ register(MsgTypeBadDecode, func() message { return &badDecode{} })\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Remove reflection from 9P serving path p9.messageByType was taking 7% of p9.recv before, spending time with reflection and map lookup. Now it's reduced to 1%. PiperOrigin-RevId: 244947313 Change-Id: I42813f920557b7656f8b29157eb32acd79e11fa5
259,992
23.04.2019 17:45:34
25,200
7c9c5fd36dbff21cf255aaf08278729b02613222
Add Linux version to requirements section
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -45,6 +45,7 @@ architectures may become available in the future.\nMake sure the following dependencies are installed:\n+* Linux 4.14.77+ ([older linux][old-linux])\n* [git][git]\n* [Bazel][bazel] 0.18+\n* [Python][python]\n@@ -125,6 +126,7 @@ See [Contributing.md](CONTRIBUTING.md).\n[gvisor-users-list]: https://groups.google.com/forum/#!forum/gvisor-users\n[gvisor-dev-list]: https://groups.google.com/forum/#!forum/gvisor-dev\n[oci]: https://www.opencontainers.org\n+[old-linux]: https://gvisor.dev/docs/user_guide/networking/#gso\n[python]: https://python.org\n[rbe]: https://blog.bazel.build/2018/10/05/remote-build-execution.html\n[sandbox]: https://en.wikipedia.org/wiki/Sandbox_(computer_security)\n" } ]
Go
Apache License 2.0
google/gvisor
Add Linux version to requirements section PiperOrigin-RevId: 244959388 Change-Id: Ifb08678d975cf9f694a21012f9a1e9f45b1f197c
259,884
23.04.2019 22:12:56
14,400
66cc254a8710cdaeb6e56bb45aec1e51f696660a
Fix feature block rendering (fixes Render feature blocks as markdown again. Feature block inner content is contained in a <div> rather than a <p> to avoid nested <p> tags.
[ { "change_type": "MODIFY", "old_path": "content/_index.html", "new_path": "content/_index.html", "diff": "@@ -20,32 +20,32 @@ gVisor integrates with <a href=\"https://www.docker.com/\" target=\"_blank\" rel=\"no\n{{< blocks/section color=\"dark\" >}}\n-{{< blocks/feature icon=\"fas fa-lock\" title=\"Defense in Depth\" >}}\n+{{% blocks/feature icon=\"fas fa-lock\" title=\"Defense in Depth\" %}}\nEach sandbox has its own user-space kernel, providing additional protection from host kernel vulnerabilities.\n-{{< /blocks/feature >}}\n+{{% /blocks/feature %}}\n-{{< blocks/feature icon=\"fas fa-feather-alt\" title=\"Lightweight\" >}}\n+{{% blocks/feature icon=\"fas fa-feather-alt\" title=\"Lightweight\" %}}\nRuns as a normal process and uses the host kernel for memory management and scheduling.\n-{{< /blocks/feature >}}\n+{{% /blocks/feature %}}\n-{{< blocks/feature icon=\"fab fa-linux\" title=\"Zero Configuration\" >}}\n+{{% blocks/feature icon=\"fab fa-linux\" title=\"Zero Configuration\" %}}\nCapable of running most Linux applications unmodified, with zero configuration.\n-{{< /blocks/feature >}}\n+{{% /blocks/feature %}}\n{{< /blocks/section >}}\n{{< blocks/section color=\"white\" >}}\n-{{< blocks/feature icon=\"fas fa-book\" title=\"Read the Docs\" >}}\n+{{% blocks/feature icon=\"fas fa-book\" title=\"Read the Docs\" %}}\nRead the [documentation](./docs/) to understand gVisor, its architecture and trade-offs, and how to use it.\n-{{< /blocks/feature >}}\n+{{% /blocks/feature %}}\n-{{< blocks/feature icon=\"fas fa-code-branch\" title=\"Contribute to gVisor\" >}}\n+{{% blocks/feature icon=\"fas fa-code-branch\" title=\"Contribute to gVisor\" %}}\nAnyone is welcome to be a gVisor contributor. Please check out the [community information](./docs/community) to get started.\n-{{< /blocks/feature >}}\n+{{% /blocks/feature %}}\n-{{< blocks/feature icon=\"fab fa-github\" title=\"Give Feedback\" >}}\n+{{% blocks/feature icon=\"fab fa-github\" title=\"Give Feedback\" %}}\nFile feature requests, bugs, and compatibility issues on <a href=\"https://github.com/google/gvisor/issues\" target=\"_blank\" rel=\"noopener\">GitHub</a>.\n-{{< /blocks/feature >}}\n+{{% /blocks/feature %}}\n{{< /blocks/section >}}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix feature block rendering (fixes #56) - Render feature blocks as markdown again. - Feature block inner content is contained in a <div> rather than a <p> to avoid nested <p> tags.
259,854
24.04.2019 14:50:30
25,200
962567aafd2ead6846db5add07bb7a02ed562ff4
Add Unix socket tests for the MSG_CTRUNC msghdr flag. TCP tests and the implementation will come in followup CLs. Updates google/gvisor#206 Updates google/gvisor#207
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_unix.cc", "new_path": "test/syscalls/linux/socket_unix.cc", "diff": "#include <stdio.h>\n#include <sys/ioctl.h>\n#include <sys/socket.h>\n+#include <sys/types.h>\n#include <sys/un.h>\n+\n#include <vector>\n#include \"gtest/gtest.h\"\n@@ -181,6 +183,162 @@ TEST_P(UnixSocketPairTest, BasicFDPassNoSpace) {\nEXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));\n}\n+// BasicFDPassNoSpaceMsgCtrunc sends an FD, but does not provide any space to\n+// receive it. It then verifies that the MSG_CTRUNC flag is set in the msghdr.\n+TEST_P(UnixSocketPairTest, BasicFDPassNoSpaceMsgCtrunc) {\n+ // FIXME: Support MSG_CTRUNC.\n+ SKIP_IF(IsRunningOnGvisor());\n+\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[20];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+\n+ auto pair =\n+ ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());\n+\n+ ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),\n+ sent_data, sizeof(sent_data)));\n+\n+ struct msghdr msg = {};\n+ std::vector<char> control(CMSG_SPACE(0));\n+ msg.msg_control = &control[0];\n+ msg.msg_controllen = control.size();\n+\n+ char received_data[sizeof(sent_data)];\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),\n+ SyscallSucceedsWithValue(sizeof(received_data)));\n+\n+ EXPECT_EQ(msg.msg_controllen, 0);\n+ EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);\n+}\n+\n+// BasicFDPassNullControlMsgCtrunc sends an FD and sets contradictory values for\n+// msg_controllen and msg_control. msg_controllen is set to the correct size to\n+// accomidate the FD, but msg_control is set to NULL. In this case, msg_control\n+// should override msg_controllen.\n+TEST_P(UnixSocketPairTest, BasicFDPassNullControlMsgCtrunc) {\n+ // FIXME: Fix handling of NULL msg_control.\n+ SKIP_IF(IsRunningOnGvisor());\n+\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[20];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+\n+ auto pair =\n+ ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());\n+\n+ ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),\n+ sent_data, sizeof(sent_data)));\n+\n+ struct msghdr msg = {};\n+ msg.msg_controllen = CMSG_SPACE(1);\n+\n+ char received_data[sizeof(sent_data)];\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),\n+ SyscallSucceedsWithValue(sizeof(received_data)));\n+\n+ EXPECT_EQ(msg.msg_controllen, 0);\n+ EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);\n+}\n+\n+// BasicFDPassNotEnoughSpaceMsgCtrunc sends an FD, but does not provide enough\n+// space to receive it. It then verifies that the MSG_CTRUNC flag is set in the\n+// msghdr.\n+TEST_P(UnixSocketPairTest, BasicFDPassNotEnoughSpaceMsgCtrunc) {\n+ // FIXME: Support MSG_CTRUNC.\n+ SKIP_IF(IsRunningOnGvisor());\n+\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[20];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+\n+ auto pair =\n+ ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());\n+\n+ ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),\n+ sent_data, sizeof(sent_data)));\n+\n+ struct msghdr msg = {};\n+ std::vector<char> control(CMSG_SPACE(0) + 1);\n+ msg.msg_control = &control[0];\n+ msg.msg_controllen = control.size();\n+\n+ char received_data[sizeof(sent_data)];\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),\n+ SyscallSucceedsWithValue(sizeof(received_data)));\n+\n+ EXPECT_EQ(msg.msg_controllen, 0);\n+ EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);\n+}\n+\n+// BasicThreeFDPassTruncationMsgCtrunc sends three FDs, but only provides enough\n+// space to receive two of them. It then verifies that the MSG_CTRUNC flag is\n+// set in the msghdr.\n+TEST_P(UnixSocketPairTest, BasicThreeFDPassTruncationMsgCtrunc) {\n+ // FIXME: Support MSG_CTRUNC.\n+ SKIP_IF(IsRunningOnGvisor());\n+\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[20];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+\n+ auto pair1 =\n+ ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());\n+ auto pair2 =\n+ ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());\n+ auto pair3 =\n+ ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());\n+ int sent_fds[] = {pair1->second_fd(), pair2->second_fd(), pair3->second_fd()};\n+\n+ ASSERT_NO_FATAL_FAILURE(\n+ SendFDs(sockets->first_fd(), sent_fds, 3, sent_data, sizeof(sent_data)));\n+\n+ struct msghdr msg = {};\n+ std::vector<char> control(CMSG_SPACE(2 * sizeof(int)));\n+ msg.msg_control = &control[0];\n+ msg.msg_controllen = control.size();\n+\n+ char received_data[sizeof(sent_data)];\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),\n+ SyscallSucceedsWithValue(sizeof(received_data)));\n+\n+ EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);\n+\n+ struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);\n+ ASSERT_NE(cmsg, nullptr);\n+ EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(2 * sizeof(int)));\n+ EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);\n+ EXPECT_EQ(cmsg->cmsg_type, SCM_RIGHTS);\n+}\n+\n// BasicFDPassUnalignedRecv starts off by sending a single FD just like\n// BasicFDPass. The difference is that when calling recvmsg, the length of the\n// receive data is only aligned on a 4 byte boundry instead of the normal 8.\n@@ -206,6 +364,90 @@ TEST_P(UnixSocketPairTest, BasicFDPassUnalignedRecv) {\nASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd()));\n}\n+// BasicFDPassUnalignedRecvNoMsgTrunc sends one FD and only provides enough\n+// space to receive just it. (Normally the minimum amount of space one would\n+// provide would be enough space for two FDs.) It then verifies that the\n+// MSG_CTRUNC flag is not set in the msghdr.\n+TEST_P(UnixSocketPairTest, BasicFDPassUnalignedRecvNoMsgTrunc) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[20];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+\n+ auto pair =\n+ ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());\n+\n+ ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(),\n+ sent_data, sizeof(sent_data)));\n+\n+ struct msghdr msg = {};\n+ char control[CMSG_SPACE(sizeof(int)) - sizeof(int)];\n+ msg.msg_control = control;\n+ msg.msg_controllen = sizeof(control);\n+\n+ char received_data[sizeof(sent_data)] = {};\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),\n+ SyscallSucceedsWithValue(sizeof(received_data)));\n+\n+ EXPECT_EQ(msg.msg_flags, 0);\n+\n+ struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);\n+ ASSERT_NE(cmsg, nullptr);\n+ EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int)));\n+ EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);\n+ EXPECT_EQ(cmsg->cmsg_type, SCM_RIGHTS);\n+}\n+\n+// BasicTwoFDPassUnalignedRecvTruncationMsgTrunc sends two FDs, but only\n+// provides enough space to receive one of them. It then verifies that the\n+// MSG_CTRUNC flag is set in the msghdr.\n+TEST_P(UnixSocketPairTest, BasicTwoFDPassUnalignedRecvTruncationMsgTrunc) {\n+ // FIXME: Support MSG_CTRUNC.\n+ SKIP_IF(IsRunningOnGvisor());\n+\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[20];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+\n+ auto pair =\n+ ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create());\n+ int sent_fds[] = {pair->first_fd(), pair->second_fd()};\n+\n+ ASSERT_NO_FATAL_FAILURE(\n+ SendFDs(sockets->first_fd(), sent_fds, 2, sent_data, sizeof(sent_data)));\n+\n+ struct msghdr msg = {};\n+ // CMSG_SPACE rounds up to two FDs, we only want one.\n+ char control[CMSG_SPACE(sizeof(int)) - sizeof(int)];\n+ msg.msg_control = control;\n+ msg.msg_controllen = sizeof(control);\n+\n+ char received_data[sizeof(sent_data)] = {};\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),\n+ SyscallSucceedsWithValue(sizeof(received_data)));\n+\n+ EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);\n+\n+ struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);\n+ ASSERT_NE(cmsg, nullptr);\n+ EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int)));\n+ EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);\n+ EXPECT_EQ(cmsg->cmsg_type, SCM_RIGHTS);\n+}\n+\nTEST_P(UnixSocketPairTest, ConcurrentBasicFDPass) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n@@ -670,6 +912,202 @@ TEST_P(UnixSocketPairTest, WriteBeforeSoPassCredRecvEndAfterSendEnd) {\nEXPECT_EQ(want_creds.gid, received_creds.gid);\n}\n+TEST_P(UnixSocketPairTest, CredPassTruncated) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[20];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+\n+ struct ucred sent_creds;\n+\n+ ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds());\n+ ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds());\n+ ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds());\n+\n+ ASSERT_NO_FATAL_FAILURE(\n+ SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data)));\n+\n+ SetSoPassCred(sockets->second_fd());\n+\n+ struct msghdr msg = {};\n+ char control[CMSG_SPACE(0) + sizeof(pid_t)];\n+ msg.msg_control = control;\n+ msg.msg_controllen = sizeof(control);\n+\n+ char received_data[sizeof(sent_data)] = {};\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),\n+ SyscallSucceedsWithValue(sizeof(received_data)));\n+\n+ EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));\n+\n+ EXPECT_EQ(msg.msg_controllen, sizeof(control));\n+\n+ struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);\n+ ASSERT_NE(cmsg, nullptr);\n+ EXPECT_EQ(cmsg->cmsg_len, sizeof(control));\n+ EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);\n+ EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS);\n+\n+ pid_t pid = 0;\n+ memcpy(&pid, CMSG_DATA(cmsg), sizeof(pid));\n+ EXPECT_EQ(pid, sent_creds.pid);\n+}\n+\n+// CredPassNoMsgCtrunc passes a full set of credentials. It then verifies that\n+// receiving the full set does not result in MSG_CTRUNC being set in the msghdr.\n+TEST_P(UnixSocketPairTest, CredPassNoMsgCtrunc) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[20];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+\n+ struct ucred sent_creds;\n+\n+ ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds());\n+ ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds());\n+ ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds());\n+\n+ ASSERT_NO_FATAL_FAILURE(\n+ SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data)));\n+\n+ SetSoPassCred(sockets->second_fd());\n+\n+ struct msghdr msg = {};\n+ char control[CMSG_SPACE(sizeof(struct ucred))];\n+ msg.msg_control = control;\n+ msg.msg_controllen = sizeof(control);\n+\n+ char received_data[sizeof(sent_data)] = {};\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),\n+ SyscallSucceedsWithValue(sizeof(received_data)));\n+\n+ EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));\n+\n+ // The control message should not be truncated.\n+ EXPECT_EQ(msg.msg_flags, 0);\n+ EXPECT_EQ(msg.msg_controllen, sizeof(control));\n+\n+ struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);\n+ ASSERT_NE(cmsg, nullptr);\n+ EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(struct ucred)));\n+ EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);\n+ EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS);\n+}\n+\n+// CredPassNoSpaceMsgCtrunc passes a full set of credentials. It then receives\n+// the data without providing space for any credentials and verifies that\n+// MSG_CTRUNC is set in the msghdr.\n+TEST_P(UnixSocketPairTest, CredPassNoSpaceMsgCtrunc) {\n+ // FIXME: Support MSG_CTRUNC.\n+ SKIP_IF(IsRunningOnGvisor());\n+\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[20];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+\n+ struct ucred sent_creds;\n+\n+ ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds());\n+ ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds());\n+ ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds());\n+\n+ ASSERT_NO_FATAL_FAILURE(\n+ SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data)));\n+\n+ SetSoPassCred(sockets->second_fd());\n+\n+ struct msghdr msg = {};\n+ char control[CMSG_SPACE(0)];\n+ msg.msg_control = control;\n+ msg.msg_controllen = sizeof(control);\n+\n+ char received_data[sizeof(sent_data)] = {};\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),\n+ SyscallSucceedsWithValue(sizeof(received_data)));\n+\n+ EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));\n+\n+ // The control message should be truncated.\n+ EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);\n+ EXPECT_EQ(msg.msg_controllen, sizeof(control));\n+\n+ struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);\n+ ASSERT_NE(cmsg, nullptr);\n+ EXPECT_EQ(cmsg->cmsg_len, sizeof(control));\n+ EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);\n+ EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS);\n+}\n+\n+// CredPassTruncatedMsgCtrunc passes a full set of credentials. It then receives\n+// the data while providing enough space for only the first field of the\n+// credentials and verifies that MSG_CTRUNC is set in the msghdr.\n+TEST_P(UnixSocketPairTest, CredPassTruncatedMsgCtrunc) {\n+ // FIXME: Support MSG_CTRUNC.\n+ SKIP_IF(IsRunningOnGvisor());\n+\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ char sent_data[20];\n+ RandomizeBuffer(sent_data, sizeof(sent_data));\n+\n+ struct ucred sent_creds;\n+\n+ ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds());\n+ ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds());\n+ ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds());\n+\n+ ASSERT_NO_FATAL_FAILURE(\n+ SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data)));\n+\n+ SetSoPassCred(sockets->second_fd());\n+\n+ struct msghdr msg = {};\n+ char control[CMSG_SPACE(0) + sizeof(pid_t)];\n+ msg.msg_control = control;\n+ msg.msg_controllen = sizeof(control);\n+\n+ char received_data[sizeof(sent_data)] = {};\n+ struct iovec iov;\n+ iov.iov_base = received_data;\n+ iov.iov_len = sizeof(received_data);\n+ msg.msg_iov = &iov;\n+ msg.msg_iovlen = 1;\n+\n+ ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0),\n+ SyscallSucceedsWithValue(sizeof(received_data)));\n+\n+ EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data)));\n+\n+ // The control message should be truncated.\n+ EXPECT_EQ(msg.msg_flags, MSG_CTRUNC);\n+ EXPECT_EQ(msg.msg_controllen, sizeof(control));\n+\n+ struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);\n+ ASSERT_NE(cmsg, nullptr);\n+ EXPECT_EQ(cmsg->cmsg_len, sizeof(control));\n+ EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET);\n+ EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS);\n+}\n+\nTEST_P(UnixSocketPairTest, SoPassCred) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n" } ]
Go
Apache License 2.0
google/gvisor
Add Unix socket tests for the MSG_CTRUNC msghdr flag. TCP tests and the implementation will come in followup CLs. Updates google/gvisor#206 Updates google/gvisor#207 PiperOrigin-RevId: 245121470 Change-Id: Ib50b62724d3ba0cbfb1374e1f908798431ee2b21
259,884
25.04.2019 01:15:17
25,200
03be9ae88ca857da4f2243d17b06bd5bc38f88db
Update required bazel version to 0.23.0 in README Bazel 0.23.0 is required due to the use of cc_flags_supplier.bzl in the vdso package. cc_flags_supplier.bzl was added in 0.23.0.
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -47,7 +47,7 @@ Make sure the following dependencies are installed:\n* Linux 4.14.77+ ([older linux][old-linux])\n* [git][git]\n-* [Bazel][bazel] 0.18+\n+* [Bazel][bazel] 0.23.0+\n* [Python][python]\n* [Docker version 17.09.0 or greater][docker]\n* Gold linker (e.g. `binutils-gold` package on Ubuntu)\n" } ]
Go
Apache License 2.0
google/gvisor
Update required bazel version to 0.23.0 in README Bazel 0.23.0 is required due to the use of cc_flags_supplier.bzl in the vdso package. cc_flags_supplier.bzl was added in 0.23.0. PiperOrigin-RevId: 245192715 Change-Id: I4258c064e5cc3bac2a587c887e0d8f87b6678ec7
259,881
25.04.2019 17:45:56
25,200
f17cfa4d53742923b5c91b149b82a05bcda3ea20
Perform explicit CPUID and FP state compatibility checks on restore
[ { "change_type": "MODIFY", "old_path": "pkg/cpuid/cpuid.go", "new_path": "pkg/cpuid/cpuid.go", "diff": "@@ -446,6 +446,20 @@ const (\nextendedFeatures // Returns some extended feature bits in edx and ecx.\n)\n+// These are the extended floating point state features. They are used to\n+// enumerate floating point features in XCR0, XSTATE_BV, etc.\n+const (\n+ XSAVEFeatureX87 = 1 << 0\n+ XSAVEFeatureSSE = 1 << 1\n+ XSAVEFeatureAVX = 1 << 2\n+ XSAVEFeatureBNDREGS = 1 << 3\n+ XSAVEFeatureBNDCSR = 1 << 4\n+ XSAVEFeatureAVX512op = 1 << 5\n+ XSAVEFeatureAVX512zmm0 = 1 << 6\n+ XSAVEFeatureAVX512zmm16 = 1 << 7\n+ XSAVEFeaturePKRU = 1 << 9\n+)\n+\nvar cpuFreqMHz float64\n// x86FeaturesFromString includes features from x86FeatureStrings and\n@@ -561,6 +575,26 @@ func (fs *FeatureSet) Intel() bool {\nreturn fs.VendorID == \"GenuineIntel\"\n}\n+// ErrIncompatible is returned by FeatureSet.HostCompatible if fs is not a\n+// subset of the host feature set.\n+type ErrIncompatible struct {\n+ message string\n+}\n+\n+// Error implements error.\n+func (e ErrIncompatible) Error() string {\n+ return e.message\n+}\n+\n+// CheckHostCompatible returns nil if fs is a subset of the host feature set.\n+func (fs *FeatureSet) CheckHostCompatible() error {\n+ hfs := HostFeatureSet()\n+ if diff := fs.Subtract(hfs); diff != nil {\n+ return ErrIncompatible{fmt.Sprintf(\"CPU feature set %v incompatible with host feature set %v (missing: %v)\", fs.FlagsString(false), hfs.FlagsString(false), diff)}\n+ }\n+ return nil\n+}\n+\n// Helper to convert 3 regs into 12-byte vendor ID.\nfunc vendorIDFromRegs(bx, cx, dx uint32) string {\nbytes := make([]byte, 0, 12)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/arch/arch_state_x86.go", "new_path": "pkg/sentry/arch/arch_state_x86.go", "diff": "package arch\nimport (\n- \"sync\"\n+ \"fmt\"\n\"syscall\"\n- \"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/pkg/cpuid\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n)\n-// warnOnce is used to warn about truncated state only once.\n-var warnOnce sync.Once\n+// ErrFloatingPoint indicates a failed restore due to unusable floating point\n+// state.\n+type ErrFloatingPoint struct {\n+ // supported is the supported floating point state.\n+ supported uint64\n+\n+ // saved is the saved floating point state.\n+ saved uint64\n+}\n+\n+// Error returns a sensible description of the restore error.\n+func (e ErrFloatingPoint) Error() string {\n+ return fmt.Sprintf(\"floating point state contains unsupported features; supported: %#x saved: %#x\", e.supported, e.saved)\n+}\n+\n+// XSTATE_BV does not exist if FXSAVE is used, but FXSAVE implicitly saves x87\n+// and SSE state, so this is the equivalent XSTATE_BV value.\n+const fxsaveBV uint64 = cpuid.XSAVEFeatureX87 | cpuid.XSAVEFeatureSSE\n// afterLoad is invoked by stateify.\nfunc (s *State) afterLoad() {\n@@ -33,7 +50,8 @@ func (s *State) afterLoad() {\n// state that may be saved by the new CPU. Even if extraneous new state\n// is saved, the state we care about is guaranteed to be a subset of\n// new state. Later optimizations can use less space when using a\n- // smaller state component bitmap. Intel SDM section 13 has more info.\n+ // smaller state component bitmap. Intel SDM Volume 1 Chapter 13 has\n+ // more info.\ns.x86FPState = newX86FPState()\n// x86FPState always contains all the FP state supported by the host.\n@@ -41,15 +59,30 @@ func (s *State) afterLoad() {\n// which we cannot restore.\n//\n// The x86 FP state areas are backwards compatible, so we can simply\n- // truncate the additional floating point state. Applications should\n- // not depend on the truncated state because it should relate only to\n- // features that were not exposed in the app FeatureSet.\n+ // truncate the additional floating point state.\n+ //\n+ // Applications should not depend on the truncated state because it\n+ // should relate only to features that were not exposed in the app\n+ // FeatureSet. However, because we do not *prevent* them from using\n+ // this state, we must verify here that there is no in-use state\n+ // (according to XSTATE_BV) which we do not support.\nif len(s.x86FPState) < len(old) {\n- warnOnce.Do(func() {\n- // This will occur on every instance of state, don't\n- // bother warning more than once.\n- log.Infof(\"dropping %d bytes of floating point state; the application should not depend on this state\", len(old)-len(s.x86FPState))\n- })\n+ // What do we support?\n+ supportedBV := fxsaveBV\n+ if fs := cpuid.HostFeatureSet(); fs.UseXsave() {\n+ supportedBV = fs.ValidXCR0Mask()\n+ }\n+\n+ // What was in use?\n+ savedBV := fxsaveBV\n+ if len(old) >= xstateBVOffset+8 {\n+ savedBV = usermem.ByteOrder.Uint64(old[xstateBVOffset:])\n+ }\n+\n+ // Supported features must be a superset of saved features.\n+ if savedBV&^supportedBV != 0 {\n+ panic(ErrFloatingPoint{supported: supportedBV, saved: savedBV})\n+ }\n}\n// Copy to the new, aligned location.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/kernel.go", "new_path": "pkg/sentry/kernel/kernel.go", "diff": "@@ -337,6 +337,17 @@ func (k *Kernel) SaveTo(w io.Writer) error {\nreturn fmt.Errorf(\"failed to invalidate unsavable mappings: %v\", err)\n}\n+ // Save the CPUID FeatureSet before the rest of the kernel so we can\n+ // verify its compatibility on restore before attempting to restore the\n+ // entire kernel, which may fail on an incompatible machine.\n+ //\n+ // N.B. This will also be saved along with the full kernel save below.\n+ cpuidStart := time.Now()\n+ if err := state.Save(w, k.FeatureSet(), nil); err != nil {\n+ return err\n+ }\n+ log.Infof(\"CPUID save took [%s].\", time.Since(cpuidStart))\n+\n// Save the kernel state.\nkernelStart := time.Now()\nvar stats state.Stats\n@@ -469,6 +480,25 @@ func (k *Kernel) LoadFrom(r io.Reader, net inet.Stack) error {\ninitAppCores := k.applicationCores\n+ // Load the pre-saved CPUID FeatureSet.\n+ //\n+ // N.B. This was also saved along with the full kernel below, so we\n+ // don't need to explicitly install it in the Kernel.\n+ cpuidStart := time.Now()\n+ var features cpuid.FeatureSet\n+ if err := state.Load(r, &features, nil); err != nil {\n+ return err\n+ }\n+ log.Infof(\"CPUID load took [%s].\", time.Since(cpuidStart))\n+\n+ // Verify that the FeatureSet is usable on this host. We do this before\n+ // Kernel load so that the explicit CPUID mismatch error has priority\n+ // over floating point state restore errors that may occur on load on\n+ // an incompatible machine.\n+ if err := features.CheckHostCompatible(); err != nil {\n+ return err\n+ }\n+\n// Load the kernel state.\nkernelStart := time.Now()\nvar stats state.Stats\n" } ]
Go
Apache License 2.0
google/gvisor
Perform explicit CPUID and FP state compatibility checks on restore PiperOrigin-RevId: 245341004 Change-Id: Ic4d581039d034a8ae944b43e45e84eb2c3973657
259,891
26.04.2019 11:06:59
25,200
f4d34b420bd30b9c3725f9247c9145808aab0ffb
Change name of sticky test arg.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/sticky.cc", "new_path": "test/syscalls/linux/sticky.cc", "diff": "#include \"test/util/test_util.h\"\n#include \"test/util/thread_util.h\"\n-DEFINE_int32(scratch_uid1, 65534, \"first scratch UID\");\n+DEFINE_int32(scratch_uid, 65534, \"first scratch UID\");\nDEFINE_int32(scratch_gid, 65534, \"first scratch GID\");\nnamespace gvisor {\n@@ -54,7 +54,7 @@ TEST(StickyTest, StickyBitPermDenied) {\n// Change EUID and EGID.\nEXPECT_THAT(syscall(SYS_setresgid, -1, FLAGS_scratch_gid, -1),\nSyscallSucceeds());\n- EXPECT_THAT(syscall(SYS_setresuid, -1, FLAGS_scratch_uid1, -1),\n+ EXPECT_THAT(syscall(SYS_setresuid, -1, FLAGS_scratch_uid, -1),\nSyscallSucceeds());\nEXPECT_THAT(rmdir(path.c_str()), SyscallFailsWithErrno(EPERM));\n@@ -103,7 +103,7 @@ TEST(StickyTest, StickyBitCapFOWNER) {\n// Change EUID and EGID.\nEXPECT_THAT(syscall(SYS_setresgid, -1, FLAGS_scratch_gid, -1),\nSyscallSucceeds());\n- EXPECT_THAT(syscall(SYS_setresuid, -1, FLAGS_scratch_uid1, -1),\n+ EXPECT_THAT(syscall(SYS_setresuid, -1, FLAGS_scratch_uid, -1),\nSyscallSucceeds());\nEXPECT_NO_ERRNO(SetCapability(CAP_FOWNER, true));\n" } ]
Go
Apache License 2.0
google/gvisor
Change name of sticky test arg. PiperOrigin-RevId: 245451875 Change-Id: Icee2c4ed74564e77454c60d60f456454443ccadf
259,891
26.04.2019 11:08:37
25,200
5f13338d30fb59241cf7f1aa6374c54c69677314
Fix reference counting bug in /proc/PID/fdinfo/.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/proc/fds.go", "new_path": "pkg/sentry/fs/proc/fds.go", "diff": "@@ -236,24 +236,6 @@ func (f *fdDirFile) Readdir(ctx context.Context, file *fs.File, ser fs.DentrySer\n})\n}\n-// fdInfoInode is a single file in /proc/TID/fdinfo/.\n-//\n-// +stateify savable\n-type fdInfoInode struct {\n- staticFileInodeOps\n-\n- file *fs.File\n- flags fs.FileFlags\n- fdFlags kernel.FDFlags\n-}\n-\n-var _ fs.InodeOperations = (*fdInfoInode)(nil)\n-\n-// Release implements fs.InodeOperations.Release.\n-func (f *fdInfoInode) Release(ctx context.Context) {\n- f.file.DecRef()\n-}\n-\n// fdInfoDir implements /proc/TID/fdinfo. It embeds an fdDir, but overrides\n// Lookup and Readdir.\n//\n@@ -283,6 +265,7 @@ func (fdid *fdInfoDir) Lookup(ctx context.Context, dir *fs.Inode, p string) (*fs\n// locks, and other data. For now we only have flags.\n// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt\nflags := file.Flags().ToLinux() | fdFlags.ToLinuxFileFlags()\n+ file.DecRef()\ncontents := []byte(fmt.Sprintf(\"flags:\\t0%o\\n\", flags))\nreturn newStaticProcInode(ctx, dir.MountSource, contents)\n})\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/pipe.cc", "new_path": "test/syscalls/linux/pipe.cc", "diff": "@@ -455,9 +455,11 @@ TEST_F(PipeTest, LargeFile) {\nEXPECT_EQ(rflags, 0);\n}\n-// Test that accessing /proc/<PID>/fd/<FD> correctly decrements the refcount of\n-// that file descriptor.\n+// Test that accesses of /proc/<PID>/fd/<FD> and /proc/<PID>/fdinfo/<FD>\n+// correctly decrement the refcount of that file descriptor.\nTEST_F(PipeTest, ProcFDReleasesFile) {\n+ std::vector<std::string> paths = {\"/proc/self/fd/\", \"/proc/self/fdinfo/\"};\n+ for (const std::string& path : paths) {\nint fds[2];\nASSERT_THAT(pipe(fds), SyscallSucceeds());\nFileDescriptor rfd(fds[0]);\n@@ -466,14 +468,14 @@ TEST_F(PipeTest, ProcFDReleasesFile) {\n// Stat the pipe FD, which shouldn't alter the refcount of the write end of\n// the pipe.\nstruct stat wst;\n- ASSERT_THAT(lstat(absl::StrCat(\"/proc/self/fd/\", wfd.get()).c_str(), &wst),\n+ ASSERT_THAT(lstat(absl::StrCat(path.c_str(), wfd.get()).c_str(), &wst),\nSyscallSucceeds());\n-\n// Close the write end of the pipe and ensure that read indicates EOF.\nwfd.reset();\nchar buf;\nASSERT_THAT(read(rfd.get(), &buf, 1), SyscallSucceedsWithValue(0));\n}\n+}\n} // namespace\n} // namespace testing\n" } ]
Go
Apache License 2.0
google/gvisor
Fix reference counting bug in /proc/PID/fdinfo/. PiperOrigin-RevId: 245452217 Change-Id: I7164d8f57fe34c17e601079eb9410a6d95af1869
259,858
26.04.2019 13:51:48
25,200
5749f64314d38516badec156ab048d3523294a81
kvm: remove non-sane sanity check Apparently some platforms don't have pSize < vSize. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/physical_map.go", "new_path": "pkg/sentry/platform/kvm/physical_map.go", "diff": "@@ -50,8 +50,9 @@ type physicalRegion struct {\nvar physicalRegions []physicalRegion\n// fillAddressSpace fills the host address space with PROT_NONE mappings until\n-// the number of available bits until we have a host address space size that is\n-// equal to the physical address space.\n+// we have a host address space size that is less than or equal to the physical\n+// address space. This allows us to have an injective host virtual to guest\n+// physical mapping.\n//\n// The excluded regions are returned.\nfunc fillAddressSpace() (excludedRegions []region) {\n@@ -67,11 +68,6 @@ func fillAddressSpace() (excludedRegions []region) {\npSize := uintptr(1) << ring0.PhysicalAddressBits()\npSize -= reservedMemory\n- // Sanity check.\n- if vSize < pSize {\n- panic(fmt.Sprintf(\"vSize (%x) < pSize (%x)\", vSize, pSize))\n- }\n-\n// Add specifically excluded regions; see excludeVirtualRegion.\napplyVirtualRegions(func(vr virtualRegion) {\nif excludeVirtualRegion(vr) {\n@@ -81,6 +77,11 @@ func fillAddressSpace() (excludedRegions []region) {\n}\n})\n+ // Do we need any more work?\n+ if vSize < pSize {\n+ return excludedRegions\n+ }\n+\n// Calculate the required space and fill it.\n//\n// Note carefully that we add faultBlockSize to required up front, and\n" } ]
Go
Apache License 2.0
google/gvisor
kvm: remove non-sane sanity check Apparently some platforms don't have pSize < vSize. Fixes #208 PiperOrigin-RevId: 245480998 Change-Id: I2a98229912f4ccbfcd8e79dfa355104f14275a9c
259,891
26.04.2019 16:50:35
25,200
43dff57b878edb5502daf486cbc13b058780dd56
Make raw sockets a toggleable feature disabled by default.
[ { "change_type": "MODIFY", "old_path": "pkg/syserr/netstack.go", "new_path": "pkg/syserr/netstack.go", "diff": "@@ -45,6 +45,7 @@ var (\nErrNoSuchFile = New(tcpip.ErrNoSuchFile.String(), linux.ENOENT)\nErrInvalidOptionValue = New(tcpip.ErrInvalidOptionValue.String(), linux.EINVAL)\nErrBroadcastDisabled = New(tcpip.ErrBroadcastDisabled.String(), linux.EACCES)\n+ ErrNotPermittedNet = New(tcpip.ErrNotPermitted.String(), linux.EPERM)\n)\nvar netstackErrorTranslations = map[*tcpip.Error]*Error{\n@@ -84,6 +85,7 @@ var netstackErrorTranslations = map[*tcpip.Error]*Error{\ntcpip.ErrMessageTooLong: ErrMessageTooLong,\ntcpip.ErrNoBufferSpace: ErrNoBufferSpace,\ntcpip.ErrBroadcastDisabled: ErrBroadcastDisabled,\n+ tcpip.ErrNotPermitted: ErrNotPermittedNet,\n}\n// TranslateNetstackError converts an error from the tcpip package to a sentry\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack.go", "new_path": "pkg/tcpip/stack/stack.go", "diff": "@@ -291,6 +291,10 @@ type Stack struct {\nlinkAddrCache *linkAddrCache\n+ // raw indicates whether raw sockets may be created. It is set during\n+ // Stack creation and is immutable.\n+ raw bool\n+\nmu sync.RWMutex\nnics map[tcpip.NICID]*NIC\nforwarding bool\n@@ -327,6 +331,9 @@ type Options struct {\n// should be handled by the stack internally (true) or outside the\n// stack (false).\nHandleLocal bool\n+\n+ // Raw indicates whether raw sockets may be created.\n+ Raw bool\n}\n// New allocates a new networking stack with only the requested networking and\n@@ -352,6 +359,7 @@ func New(network []string, transport []string, opts Options) *Stack {\nclock: clock,\nstats: opts.Stats.FillIn(),\nhandleLocal: opts.HandleLocal,\n+ raw: opts.Raw,\n}\n// Add specified network protocols.\n@@ -512,6 +520,10 @@ func (s *Stack) NewEndpoint(transport tcpip.TransportProtocolNumber, network tcp\n// protocol. Raw endpoints receive all traffic for a given protocol regardless\n// of address.\nfunc (s *Stack) NewRawEndpoint(transport tcpip.TransportProtocolNumber, network tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) {\n+ if !s.raw {\n+ return nil, tcpip.ErrNotPermitted\n+ }\n+\nt, ok := s.transportProtocols[transport]\nif !ok {\nreturn nil, tcpip.ErrUnknownProtocol\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/tcpip.go", "new_path": "pkg/tcpip/tcpip.go", "diff": "@@ -102,6 +102,7 @@ var (\nErrMessageTooLong = &Error{msg: \"message too long\"}\nErrNoBufferSpace = &Error{msg: \"no buffer space available\"}\nErrBroadcastDisabled = &Error{msg: \"broadcast socket option disabled\"}\n+ ErrNotPermitted = &Error{msg: \"operation not permitted\"}\n)\n// Errors related to Subnet\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint_state.go", "new_path": "pkg/tcpip/transport/tcp/endpoint_state.go", "diff": "@@ -341,6 +341,7 @@ func loadError(s string) *tcpip.Error {\ntcpip.ErrMessageTooLong,\ntcpip.ErrNoBufferSpace,\ntcpip.ErrBroadcastDisabled,\n+ tcpip.ErrNotPermitted,\n}\nmessageToError = make(map[string]*tcpip.Error)\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/config.go", "new_path": "runsc/boot/config.go", "diff": "@@ -175,6 +175,11 @@ type Config struct {\n// Network indicates what type of network to use.\nNetwork NetworkType\n+ // EnableRaw indicates whether raw sockets should be enabled. Raw\n+ // sockets are disabled by stripping CAP_NET_RAW from the list of\n+ // capabilities.\n+ EnableRaw bool\n+\n// GSO indicates that generic segmentation offload is enabled.\nGSO bool\n@@ -235,6 +240,7 @@ func (c *Config) ToFlags() []string {\n\"--watchdog-action=\" + c.WatchdogAction.String(),\n\"--panic-signal=\" + strconv.Itoa(c.PanicSignal),\n\"--profile=\" + strconv.FormatBool(c.ProfileEnable),\n+ \"--net-raw=\" + strconv.FormatBool(c.EnableRaw),\n}\nif c.TestOnlyAllowRunAsCurrentUserWithoutChroot {\n// Only include if set since it is never to be used by users.\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -227,7 +227,7 @@ func New(args Args) (*Loader, error) {\n}\n// Create capabilities.\n- caps, err := specutils.Capabilities(args.Spec.Process.Capabilities)\n+ caps, err := specutils.Capabilities(args.Conf.EnableRaw, args.Spec.Process.Capabilities)\nif err != nil {\nreturn nil, fmt.Errorf(\"converting capabilities: %v\", err)\n}\n@@ -554,7 +554,7 @@ func (l *Loader) createContainer(cid string) error {\n// this method returns.\nfunc (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config, cid string, files []*os.File) error {\n// Create capabilities.\n- caps, err := specutils.Capabilities(spec.Process.Capabilities)\n+ caps, err := specutils.Capabilities(conf.EnableRaw, spec.Process.Capabilities)\nif err != nil {\nreturn fmt.Errorf(\"creating capabilities: %v\", err)\n}\n@@ -800,6 +800,9 @@ func newEmptyNetworkStack(conf *Config, clock tcpip.Clock) (inet.Stack, error) {\nClock: clock,\nStats: epsocket.Metrics,\nHandleLocal: true,\n+ // Enable raw sockets for users with sufficient\n+ // privileges.\n+ Raw: true,\n})}\nif err := s.Stack.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SACKEnabled(true)); err != nil {\nreturn nil, fmt.Errorf(\"failed to enable SACK: %v\", err)\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/exec.go", "new_path": "runsc/cmd/exec.go", "diff": "@@ -132,7 +132,11 @@ func (ex *Exec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n}\n}\nif e.Capabilities == nil {\n- e.Capabilities, err = specutils.Capabilities(c.Spec.Process.Capabilities)\n+ // enableRaw is set to true to prevent the filtering out of\n+ // CAP_NET_RAW. This is the opposite of Create() because exec\n+ // requires the capability to be set explicitly, while 'docker\n+ // run' sets it by default.\n+ e.Capabilities, err = specutils.Capabilities(true /* enableRaw */, c.Spec.Process.Capabilities)\nif err != nil {\nFatalf(\"creating capabilities: %v\", err)\n}\n@@ -351,7 +355,11 @@ func argsFromProcess(p *specs.Process) (*control.ExecArgs, error) {\nvar caps *auth.TaskCapabilities\nif p.Capabilities != nil {\nvar err error\n- caps, err = specutils.Capabilities(p.Capabilities)\n+ // enableRaw is set to true to prevent the filtering out of\n+ // CAP_NET_RAW. This is the opposite of Create() because exec\n+ // requires the capability to be set explicitly, while 'docker\n+ // run' sets it by default.\n+ caps, err = specutils.Capabilities(true /* enableRaw */, p.Capabilities)\nif err != nil {\nreturn nil, fmt.Errorf(\"error creating capabilities: %v\", err)\n}\n@@ -413,7 +421,11 @@ func capabilities(cs []string) (*auth.TaskCapabilities, error) {\nspecCaps.Inheritable = append(specCaps.Inheritable, cap)\nspecCaps.Permitted = append(specCaps.Permitted, cap)\n}\n- return specutils.Capabilities(&specCaps)\n+ // enableRaw is set to true to prevent the filtering out of\n+ // CAP_NET_RAW. This is the opposite of Create() because exec requires\n+ // the capability to be set explicitly, while 'docker run' sets it by\n+ // default.\n+ return specutils.Capabilities(true /* enableRaw */, &specCaps)\n}\n// stringSlice allows a flag to be used multiple times, where each occurrence\n" }, { "change_type": "MODIFY", "old_path": "runsc/main.go", "new_path": "runsc/main.go", "diff": "@@ -68,6 +68,7 @@ var (\nwatchdogAction = flag.String(\"watchdog-action\", \"log\", \"sets what action the watchdog takes when triggered: log (default), panic.\")\npanicSignal = flag.Int(\"panic-signal\", -1, \"register signal handling that panics. Usually set to SIGUSR2(12) to troubleshoot hangs. -1 disables it.\")\nprofile = flag.Bool(\"profile\", false, \"prepares the sandbox to use Golang profiler. Note that enabling profiler loosens the seccomp protection added to the sandbox (DO NOT USE IN PRODUCTION).\")\n+ netRaw = flag.Bool(\"net-raw\", false, \"enable raw sockets. When false, raw sockets are disabled by removing CAP_NET_RAW from containers (`runsc exec` will still be able to utilize raw sockets). Raw sockets allow malicious containers to craft packets and potentially attack the network.\")\ntestOnlyAllowRunAsCurrentUserWithoutChroot = flag.Bool(\"TESTONLY-unsafe-nonroot\", false, \"TEST ONLY; do not ever use! This skips many security measures that isolate the host from the sandbox.\")\n)\n@@ -159,6 +160,7 @@ func main() {\nWatchdogAction: wa,\nPanicSignal: *panicSignal,\nProfileEnable: *profile,\n+ EnableRaw: *netRaw,\nTestOnlyAllowRunAsCurrentUserWithoutChroot: *testOnlyAllowRunAsCurrentUserWithoutChroot,\n}\nif len(*straceSyscalls) != 0 {\n" }, { "change_type": "MODIFY", "old_path": "runsc/specutils/specutils.go", "new_path": "runsc/specutils/specutils.go", "diff": "@@ -198,20 +198,26 @@ func ReadMounts(f *os.File) ([]specs.Mount, error) {\n// Capabilities takes in spec and returns a TaskCapabilities corresponding to\n// the spec.\n-func Capabilities(specCaps *specs.LinuxCapabilities) (*auth.TaskCapabilities, error) {\n+func Capabilities(enableRaw bool, specCaps *specs.LinuxCapabilities) (*auth.TaskCapabilities, error) {\n+ // Strip CAP_NET_RAW from all capability sets if necessary.\n+ skipSet := map[linux.Capability]struct{}{}\n+ if !enableRaw {\n+ skipSet[linux.CAP_NET_RAW] = struct{}{}\n+ }\n+\nvar caps auth.TaskCapabilities\nif specCaps != nil {\nvar err error\n- if caps.BoundingCaps, err = capsFromNames(specCaps.Bounding); err != nil {\n+ if caps.BoundingCaps, err = capsFromNames(specCaps.Bounding, skipSet); err != nil {\nreturn nil, err\n}\n- if caps.EffectiveCaps, err = capsFromNames(specCaps.Effective); err != nil {\n+ if caps.EffectiveCaps, err = capsFromNames(specCaps.Effective, skipSet); err != nil {\nreturn nil, err\n}\n- if caps.InheritableCaps, err = capsFromNames(specCaps.Inheritable); err != nil {\n+ if caps.InheritableCaps, err = capsFromNames(specCaps.Inheritable, skipSet); err != nil {\nreturn nil, err\n}\n- if caps.PermittedCaps, err = capsFromNames(specCaps.Permitted); err != nil {\n+ if caps.PermittedCaps, err = capsFromNames(specCaps.Permitted, skipSet); err != nil {\nreturn nil, err\n}\n// TODO: Support ambient capabilities.\n@@ -275,13 +281,17 @@ var capFromName = map[string]linux.Capability{\n\"CAP_AUDIT_READ\": linux.CAP_AUDIT_READ,\n}\n-func capsFromNames(names []string) (auth.CapabilitySet, error) {\n+func capsFromNames(names []string, skipSet map[linux.Capability]struct{}) (auth.CapabilitySet, error) {\nvar caps []linux.Capability\nfor _, n := range names {\nc, ok := capFromName[n]\nif !ok {\nreturn 0, fmt.Errorf(\"unknown capability %q\", n)\n}\n+ // Should we skip this capabilty?\n+ if _, ok := skipSet[c]; ok {\n+ continue\n+ }\ncaps = append(caps, c)\n}\nreturn auth.CapabilitySetOfMany(caps), nil\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/integration/BUILD", "new_path": "runsc/test/integration/BUILD", "diff": "@@ -15,7 +15,10 @@ go_test(\n\"manual\",\n\"local\",\n],\n- deps = [\"//runsc/test/testutil\"],\n+ deps = [\n+ \"//pkg/abi/linux\",\n+ \"//runsc/test/testutil\",\n+ ],\n)\ngo_library(\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/integration/exec_test.go", "new_path": "runsc/test/integration/exec_test.go", "diff": "package integration\nimport (\n+ \"fmt\"\n+ \"strconv\"\n\"syscall\"\n\"testing\"\n\"time\"\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n)\n@@ -46,11 +49,28 @@ func TestExecCapabilities(t *testing.T) {\n}\ndefer d.CleanUp()\n- want, err := d.WaitForOutput(\"CapEff:\\t[0-9a-f]+\\n\", 5*time.Second)\n+ matches, err := d.WaitForOutputSubmatch(\"CapEff:\\t([0-9a-f]+)\\n\", 5*time.Second)\nif err != nil {\n- t.Fatalf(\"WaitForOutput() timeout: %v\", err)\n+ t.Fatalf(\"WaitForOutputSubmatch() timeout: %v\", err)\n}\n- t.Log(\"Root capabilities:\", want)\n+ if len(matches) != 2 {\n+ t.Fatalf(\"There should be a match for the whole line and the capability bitmask\")\n+ }\n+ capString := matches[1]\n+ t.Log(\"Root capabilities:\", capString)\n+\n+ // CAP_NET_RAW was in the capability set for the container, but was\n+ // removed. However, `exec` does not remove it. Verify that it's not\n+ // set in the container, then re-add it for comparison.\n+ caps, err := strconv.ParseUint(capString, 16, 64)\n+ if err != nil {\n+ t.Fatalf(\"failed to convert capabilities %q: %v\", capString, err)\n+ }\n+ if caps&(1<<uint64(linux.CAP_NET_RAW)) != 0 {\n+ t.Fatalf(\"CAP_NET_RAW should be filtered, but is set in the container: %x\", caps)\n+ }\n+ caps |= 1 << uint64(linux.CAP_NET_RAW)\n+ want := fmt.Sprintf(\"CapEff:\\t%016x\\n\", caps)\n// Now check that exec'd process capabilities match the root.\ngot, err := d.Exec(\"grep\", \"CapEff:\", \"/proc/self/status\")\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/testutil/docker.go", "new_path": "runsc/test/testutil/docker.go", "diff": "@@ -334,19 +334,32 @@ func (d *Docker) Wait(timeout time.Duration) (syscall.WaitStatus, error) {\n// WaitForOutput calls 'docker logs' to retrieve containers output and searches\n// for the given pattern.\nfunc (d *Docker) WaitForOutput(pattern string, timeout time.Duration) (string, error) {\n+ matches, err := d.WaitForOutputSubmatch(pattern, timeout)\n+ if err != nil {\n+ return \"\", err\n+ }\n+ if len(matches) == 0 {\n+ return \"\", nil\n+ }\n+ return matches[0], nil\n+}\n+\n+// WaitForOutputSubmatch calls 'docker logs' to retrieve containers output and\n+// searches for the given pattern. It returns any regexp submatches as well.\n+func (d *Docker) WaitForOutputSubmatch(pattern string, timeout time.Duration) ([]string, error) {\nre := regexp.MustCompile(pattern)\nvar out string\nfor exp := time.Now().Add(timeout); time.Now().Before(exp); {\nvar err error\nout, err = d.Logs()\nif err != nil {\n- return \"\", err\n+ return nil, err\n}\n- if match := re.FindString(out); match != \"\" {\n+ if matches := re.FindStringSubmatch(out); matches != nil {\n// Success!\n- return match, nil\n+ return matches, nil\n}\ntime.Sleep(100 * time.Millisecond)\n}\n- return \"\", fmt.Errorf(\"timeout waiting for output %q: %s\", re.String(), out)\n+ return nil, fmt.Errorf(\"timeout waiting for output %q: %s\", re.String(), out)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Make raw sockets a toggleable feature disabled by default. PiperOrigin-RevId: 245511019 Change-Id: Ia9562a301b46458988a6a1f0bbd5f07cbfcb0615
259,899
26.04.2019 22:45:45
25,200
66bca6fc221393c9553cbaa0486e07c8124e2477
tcpip/adapters/gonet: add CloseRead & CloseWrite methods to Conn Add the CloseRead & CloseWrite methods that performs shutdown on the corresponding Read & Write sides of a connection.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/adapters/gonet/gonet.go", "new_path": "pkg/tcpip/adapters/gonet/gonet.go", "diff": "@@ -435,6 +435,28 @@ func (c *Conn) Close() error {\nreturn nil\n}\n+// CloseRead shuts down the reading side of the TCP connection. Most callers\n+// should just use Close.\n+//\n+// A TCP Half-Close is performed the same as CloseRead for *net.TCPConn.\n+func (c *Conn) CloseRead() error {\n+ if terr := c.ep.Shutdown(tcpip.ShutdownRead); terr != nil {\n+ return c.newOpError(\"close\", errors.New(terr.String()))\n+ }\n+ return nil\n+}\n+\n+// CloseWrite shuts down the writing side of the TCP connection. Most callers\n+// should just use Close.\n+//\n+// A TCP Half-Close is performed the same as CloseWrite for *net.TCPConn.\n+func (c *Conn) CloseWrite() error {\n+ if terr := c.ep.Shutdown(tcpip.ShutdownWrite); terr != nil {\n+ return c.newOpError(\"close\", errors.New(terr.String()))\n+ }\n+ return nil\n+}\n+\n// LocalAddr implements net.Conn.LocalAddr.\nfunc (c *Conn) LocalAddr() net.Addr {\na, err := c.ep.GetLocalAddress()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/adapters/gonet/gonet_test.go", "new_path": "pkg/tcpip/adapters/gonet/gonet_test.go", "diff": "@@ -16,6 +16,7 @@ package gonet\nimport (\n\"fmt\"\n+ \"io\"\n\"net\"\n\"reflect\"\n\"strings\"\n@@ -222,6 +223,115 @@ func TestCloseReaderWithForwarder(t *testing.T) {\nsender.close()\n}\n+func TestCloseRead(t *testing.T) {\n+ s, terr := newLoopbackStack()\n+ if terr != nil {\n+ t.Fatalf(\"newLoopbackStack() = %v\", terr)\n+ }\n+\n+ addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211}\n+ s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr)\n+\n+ fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) {\n+ var wq waiter.Queue\n+ ep, err := r.CreateEndpoint(&wq)\n+ if err != nil {\n+ t.Fatalf(\"r.CreateEndpoint() = %v\", err)\n+ }\n+ defer ep.Close()\n+ r.Complete(false)\n+\n+ c := NewConn(&wq, ep)\n+\n+ buf := make([]byte, 256)\n+ n, e := c.Read(buf)\n+ if e != nil || string(buf[:n]) != \"abc123\" {\n+ t.Fatalf(\"c.Read() = (%d, %v), want (6, nil)\", n, e)\n+ }\n+\n+ if n, e = c.Write([]byte(\"abc123\")); e != nil {\n+ t.Errorf(\"c.Write() = (%d, %v), want (6, nil)\", n, e)\n+ }\n+ })\n+\n+ s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket)\n+\n+ tc, terr := connect(s, addr)\n+ if terr != nil {\n+ t.Fatalf(\"connect() = %v\", terr)\n+ }\n+ c := NewConn(tc.wq, tc.ep)\n+\n+ if err := c.CloseRead(); err != nil {\n+ t.Errorf(\"c.CloseRead() = %v\", err)\n+ }\n+\n+ buf := make([]byte, 256)\n+ if n, err := c.Read(buf); err != io.EOF {\n+ t.Errorf(\"c.Read() = (%d, %v), want (0, io.EOF)\", n, err)\n+ }\n+\n+ if n, err := c.Write([]byte(\"abc123\")); n != 6 || err != nil {\n+ t.Errorf(\"c.Write() = (%d, %v), want (6, nil)\", n, err)\n+ }\n+}\n+\n+func TestCloseWrite(t *testing.T) {\n+ s, terr := newLoopbackStack()\n+ if terr != nil {\n+ t.Fatalf(\"newLoopbackStack() = %v\", terr)\n+ }\n+\n+ addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211}\n+ s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr)\n+\n+ fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) {\n+ var wq waiter.Queue\n+ ep, err := r.CreateEndpoint(&wq)\n+ if err != nil {\n+ t.Fatalf(\"r.CreateEndpoint() = %v\", err)\n+ }\n+ defer ep.Close()\n+ r.Complete(false)\n+\n+ c := NewConn(&wq, ep)\n+\n+ n, e := c.Read(make([]byte, 256))\n+ if n != 0 || e != io.EOF {\n+ t.Errorf(\"c.Read() = (%d, %v), want (0, io.EOF)\", n, e)\n+ }\n+\n+ if n, e = c.Write([]byte(\"abc123\")); n != 6 || e != nil {\n+ t.Errorf(\"c.Write() = (%d, %v), want (6, nil)\", n, e)\n+ }\n+ })\n+\n+ s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket)\n+\n+ tc, terr := connect(s, addr)\n+ if terr != nil {\n+ t.Fatalf(\"connect() = %v\", terr)\n+ }\n+ c := NewConn(tc.wq, tc.ep)\n+\n+ if err := c.CloseWrite(); err != nil {\n+ t.Errorf(\"c.CloseWrite() = %v\", err)\n+ }\n+\n+ buf := make([]byte, 256)\n+ n, err := c.Read(buf)\n+ if err != nil || string(buf[:n]) != \"abc123\" {\n+ t.Fatalf(\"c.Read() = (%d, %v), want (6, nil)\", n, err)\n+ }\n+\n+ n, err = c.Write([]byte(\"abc123\"))\n+ got, ok := err.(*net.OpError)\n+ want := \"endpoint is closed for send\"\n+ if n != 0 || !ok || got.Op != \"write\" || got.Err == nil || !strings.HasSuffix(got.Err.Error(), want) {\n+ t.Errorf(\"c.Write() = (%d, %v), want (0, OpError(Op: write, Err: %s))\", n, err, want)\n+ }\n+}\n+\nfunc TestUDPForwarder(t *testing.T) {\ns, terr := newLoopbackStack()\nif terr != nil {\n" } ]
Go
Apache License 2.0
google/gvisor
tcpip/adapters/gonet: add CloseRead & CloseWrite methods to Conn Add the CloseRead & CloseWrite methods that performs shutdown on the corresponding Read & Write sides of a connection. Change-Id: I3996a2abdc7cd68a2becba44dc4bd9f0919d2ce1 PiperOrigin-RevId: 245537950
259,992
29.04.2019 15:32:45
25,200
ddab854b9a895603664fa4abfa525f6a29047083
Reduce memory allocations on serving path Cache last used messages and reuse them for subsequent requests. If more messages are needed, they are created outside the cache on demand.
[ { "change_type": "MODIFY", "old_path": "pkg/p9/buffer.go", "new_path": "pkg/p9/buffer.go", "diff": "@@ -20,7 +20,8 @@ import (\n// encoder is used for messages and 9P primitives.\ntype encoder interface {\n- // Decode decodes from the given buffer.\n+ // Decode decodes from the given buffer. Decode may be called more than once\n+ // to reuse the instance. It must clear any previous state.\n//\n// This may not fail, exhaustion will be recorded in the buffer.\nDecode(b *buffer)\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/client.go", "new_path": "pkg/p9/client.go", "diff": "@@ -110,16 +110,16 @@ type Client struct {\n// You should not use the same socket for multiple clients.\nfunc NewClient(socket *unet.Socket, messageSize uint32, version string) (*Client, error) {\n// Need at least one byte of payload.\n- if messageSize <= largestFixedSize {\n+ if messageSize <= msgRegistry.largestFixedSize {\nreturn nil, &ErrMessageTooLarge{\nsize: messageSize,\n- msize: largestFixedSize,\n+ msize: msgRegistry.largestFixedSize,\n}\n}\n// Compute a payload size and round to 512 (normal block size)\n// if it's larger than a single block.\n- payloadSize := messageSize - largestFixedSize\n+ payloadSize := messageSize - msgRegistry.largestFixedSize\nif payloadSize > 512 && payloadSize%512 != 0 {\npayloadSize -= (payloadSize % 512)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/messages.go", "new_path": "pkg/p9/messages.go", "diff": "@@ -193,6 +193,7 @@ func (t *Twalk) Decode(b *buffer) {\nt.FID = b.ReadFID()\nt.NewFID = b.ReadFID()\nn := b.Read16()\n+ t.Names = t.Names[:0]\nfor i := 0; i < int(n); i++ {\nt.Names = append(t.Names, b.ReadString())\n}\n@@ -227,6 +228,7 @@ type Rwalk struct {\n// Decode implements encoder.Decode.\nfunc (r *Rwalk) Decode(b *buffer) {\nn := b.Read16()\n+ r.QIDs = r.QIDs[:0]\nfor i := 0; i < int(n); i++ {\nvar q QID\nq.Decode(b)\n@@ -1608,6 +1610,7 @@ type Rreaddir struct {\nfunc (r *Rreaddir) Decode(b *buffer) {\nr.Count = b.Read32()\nentriesBuf := buffer{data: r.payload}\n+ r.Entries = r.Entries[:0]\nfor {\nvar d Dirent\nd.Decode(&entriesBuf)\n@@ -1827,6 +1830,7 @@ func (t *Twalkgetattr) Decode(b *buffer) {\nt.FID = b.ReadFID()\nt.NewFID = b.ReadFID()\nn := b.Read16()\n+ t.Names = t.Names[:0]\nfor i := 0; i < int(n); i++ {\nt.Names = append(t.Names, b.ReadString())\n}\n@@ -1869,6 +1873,7 @@ func (r *Rwalkgetattr) Decode(b *buffer) {\nr.Valid.Decode(b)\nr.Attr.Decode(b)\nn := b.Read16()\n+ r.QIDs = r.QIDs[:0]\nfor i := 0; i < int(n); i++ {\nvar q QID\nq.Decode(b)\n@@ -2139,34 +2144,80 @@ func (r *Rlconnect) String() string {\nreturn fmt.Sprintf(\"Rlconnect{File: %v}\", r.File)\n}\n-// messageRegistry indexes all messages by type.\n-var messageRegistry = make([]func() message, math.MaxUint8)\n+const maxCacheSize = 3\n-// messageByType creates a new message by type.\n+// msgFactory is used to reduce allocations by caching messages for reuse.\n+type msgFactory struct {\n+ create func() message\n+ cache chan message\n+}\n+\n+// msgRegistry indexes all message factories by type.\n+var msgRegistry registry\n+\n+type registry struct {\n+ factories [math.MaxUint8]msgFactory\n+\n+ // largestFixedSize is computed so that given some message size M, you can\n+ // compute the maximum payload size (e.g. for Twrite, Rread) with\n+ // M-largestFixedSize. You could do this individual on a per-message basis,\n+ // but it's easier to compute a single maximum safe payload.\n+ largestFixedSize uint32\n+}\n+\n+// get returns a new message by type.\n//\n// An error is returned in the case of an unknown message.\n//\n// This takes, and ignores, a message tag so that it may be used directly as a\n// lookupTagAndType function for recv (by design).\n-func messageByType(_ Tag, t MsgType) (message, error) {\n- fn := messageRegistry[t]\n- if fn == nil {\n+func (r *registry) get(_ Tag, t MsgType) (message, error) {\n+ entry := &r.factories[t]\n+ if entry.create == nil {\nreturn nil, &ErrInvalidMsgType{t}\n}\n- return fn(), nil\n+\n+ select {\n+ case msg := <-entry.cache:\n+ return msg, nil\n+ default:\n+ return entry.create(), nil\n+ }\n+}\n+\n+func (r *registry) put(msg message) {\n+ if p, ok := msg.(payloader); ok {\n+ p.SetPayload(nil)\n+ }\n+ if f, ok := msg.(filer); ok {\n+ f.SetFilePayload(nil)\n+ }\n+\n+ entry := &r.factories[msg.Type()]\n+ select {\n+ case entry.cache <- msg:\n+ default:\n+ }\n}\n// register registers the given message type.\n//\n// This may cause panic on failure and should only be used from init.\n-func register(t MsgType, fn func() message) {\n- if int(t) >= len(messageRegistry) {\n- panic(fmt.Sprintf(\"message type %d is too large. It must be smaller than %d\", t, len(messageRegistry)))\n+func (r *registry) register(t MsgType, fn func() message) {\n+ if int(t) >= len(r.factories) {\n+ panic(fmt.Sprintf(\"message type %d is too large. It must be smaller than %d\", t, len(r.factories)))\n+ }\n+ if r.factories[t].create != nil {\n+ panic(fmt.Sprintf(\"duplicate message type %d: first is %T, second is %T\", t, r.factories[t].create(), fn()))\n}\n- if messageRegistry[t] != nil {\n- panic(fmt.Sprintf(\"duplicate message type %d: first is %T, second is %T\", t, messageRegistry[t](), fn()))\n+ r.factories[t] = msgFactory{\n+ create: fn,\n+ cache: make(chan message, maxCacheSize),\n+ }\n+\n+ if size := calculateSize(fn()); size > r.largestFixedSize {\n+ r.largestFixedSize = size\n}\n- messageRegistry[t] = fn\n}\nfunc calculateSize(m message) uint32 {\n@@ -2178,93 +2229,72 @@ func calculateSize(m message) uint32 {\nreturn uint32(len(dataBuf.data))\n}\n-// largestFixedSize is computed within calculateLargestSize.\n-//\n-// This is computed so that given some message size M, you can compute\n-// the maximum payload size (e.g. for Twrite, Rread) with M-largestFixedSize.\n-// You could do this individual on a per-message basis, but it's easier to\n-// compute a single maximum safe payload.\n-var largestFixedSize uint32\n-\n-// calculateLargestFixedSize is called from within init.\n-func calculateLargestFixedSize() {\n- for _, fn := range messageRegistry {\n- if fn != nil {\n- if size := calculateSize(fn()); size > largestFixedSize {\n- largestFixedSize = size\n- }\n- }\n- }\n-}\n-\nfunc init() {\n- register(MsgRlerror, func() message { return &Rlerror{} })\n- register(MsgTstatfs, func() message { return &Tstatfs{} })\n- register(MsgRstatfs, func() message { return &Rstatfs{} })\n- register(MsgTlopen, func() message { return &Tlopen{} })\n- register(MsgRlopen, func() message { return &Rlopen{} })\n- register(MsgTlcreate, func() message { return &Tlcreate{} })\n- register(MsgRlcreate, func() message { return &Rlcreate{} })\n- register(MsgTsymlink, func() message { return &Tsymlink{} })\n- register(MsgRsymlink, func() message { return &Rsymlink{} })\n- register(MsgTmknod, func() message { return &Tmknod{} })\n- register(MsgRmknod, func() message { return &Rmknod{} })\n- register(MsgTrename, func() message { return &Trename{} })\n- register(MsgRrename, func() message { return &Rrename{} })\n- register(MsgTreadlink, func() message { return &Treadlink{} })\n- register(MsgRreadlink, func() message { return &Rreadlink{} })\n- register(MsgTgetattr, func() message { return &Tgetattr{} })\n- register(MsgRgetattr, func() message { return &Rgetattr{} })\n- register(MsgTsetattr, func() message { return &Tsetattr{} })\n- register(MsgRsetattr, func() message { return &Rsetattr{} })\n- register(MsgTxattrwalk, func() message { return &Txattrwalk{} })\n- register(MsgRxattrwalk, func() message { return &Rxattrwalk{} })\n- register(MsgTxattrcreate, func() message { return &Txattrcreate{} })\n- register(MsgRxattrcreate, func() message { return &Rxattrcreate{} })\n- register(MsgTreaddir, func() message { return &Treaddir{} })\n- register(MsgRreaddir, func() message { return &Rreaddir{} })\n- register(MsgTfsync, func() message { return &Tfsync{} })\n- register(MsgRfsync, func() message { return &Rfsync{} })\n- register(MsgTlink, func() message { return &Tlink{} })\n- register(MsgRlink, func() message { return &Rlink{} })\n- register(MsgTmkdir, func() message { return &Tmkdir{} })\n- register(MsgRmkdir, func() message { return &Rmkdir{} })\n- register(MsgTrenameat, func() message { return &Trenameat{} })\n- register(MsgRrenameat, func() message { return &Rrenameat{} })\n- register(MsgTunlinkat, func() message { return &Tunlinkat{} })\n- register(MsgRunlinkat, func() message { return &Runlinkat{} })\n- register(MsgTversion, func() message { return &Tversion{} })\n- register(MsgRversion, func() message { return &Rversion{} })\n- register(MsgTauth, func() message { return &Tauth{} })\n- register(MsgRauth, func() message { return &Rauth{} })\n- register(MsgTattach, func() message { return &Tattach{} })\n- register(MsgRattach, func() message { return &Rattach{} })\n- register(MsgTflush, func() message { return &Tflush{} })\n- register(MsgRflush, func() message { return &Rflush{} })\n- register(MsgTwalk, func() message { return &Twalk{} })\n- register(MsgRwalk, func() message { return &Rwalk{} })\n- register(MsgTread, func() message { return &Tread{} })\n- register(MsgRread, func() message { return &Rread{} })\n- register(MsgTwrite, func() message { return &Twrite{} })\n- register(MsgRwrite, func() message { return &Rwrite{} })\n- register(MsgTclunk, func() message { return &Tclunk{} })\n- register(MsgRclunk, func() message { return &Rclunk{} })\n- register(MsgTremove, func() message { return &Tremove{} })\n- register(MsgRremove, func() message { return &Rremove{} })\n- register(MsgTflushf, func() message { return &Tflushf{} })\n- register(MsgRflushf, func() message { return &Rflushf{} })\n- register(MsgTwalkgetattr, func() message { return &Twalkgetattr{} })\n- register(MsgRwalkgetattr, func() message { return &Rwalkgetattr{} })\n- register(MsgTucreate, func() message { return &Tucreate{} })\n- register(MsgRucreate, func() message { return &Rucreate{} })\n- register(MsgTumkdir, func() message { return &Tumkdir{} })\n- register(MsgRumkdir, func() message { return &Rumkdir{} })\n- register(MsgTumknod, func() message { return &Tumknod{} })\n- register(MsgRumknod, func() message { return &Rumknod{} })\n- register(MsgTusymlink, func() message { return &Tusymlink{} })\n- register(MsgRusymlink, func() message { return &Rusymlink{} })\n- register(MsgTlconnect, func() message { return &Tlconnect{} })\n- register(MsgRlconnect, func() message { return &Rlconnect{} })\n-\n- calculateLargestFixedSize()\n+ msgRegistry.register(MsgRlerror, func() message { return &Rlerror{} })\n+ msgRegistry.register(MsgTstatfs, func() message { return &Tstatfs{} })\n+ msgRegistry.register(MsgRstatfs, func() message { return &Rstatfs{} })\n+ msgRegistry.register(MsgTlopen, func() message { return &Tlopen{} })\n+ msgRegistry.register(MsgRlopen, func() message { return &Rlopen{} })\n+ msgRegistry.register(MsgTlcreate, func() message { return &Tlcreate{} })\n+ msgRegistry.register(MsgRlcreate, func() message { return &Rlcreate{} })\n+ msgRegistry.register(MsgTsymlink, func() message { return &Tsymlink{} })\n+ msgRegistry.register(MsgRsymlink, func() message { return &Rsymlink{} })\n+ msgRegistry.register(MsgTmknod, func() message { return &Tmknod{} })\n+ msgRegistry.register(MsgRmknod, func() message { return &Rmknod{} })\n+ msgRegistry.register(MsgTrename, func() message { return &Trename{} })\n+ msgRegistry.register(MsgRrename, func() message { return &Rrename{} })\n+ msgRegistry.register(MsgTreadlink, func() message { return &Treadlink{} })\n+ msgRegistry.register(MsgRreadlink, func() message { return &Rreadlink{} })\n+ msgRegistry.register(MsgTgetattr, func() message { return &Tgetattr{} })\n+ msgRegistry.register(MsgRgetattr, func() message { return &Rgetattr{} })\n+ msgRegistry.register(MsgTsetattr, func() message { return &Tsetattr{} })\n+ msgRegistry.register(MsgRsetattr, func() message { return &Rsetattr{} })\n+ msgRegistry.register(MsgTxattrwalk, func() message { return &Txattrwalk{} })\n+ msgRegistry.register(MsgRxattrwalk, func() message { return &Rxattrwalk{} })\n+ msgRegistry.register(MsgTxattrcreate, func() message { return &Txattrcreate{} })\n+ msgRegistry.register(MsgRxattrcreate, func() message { return &Rxattrcreate{} })\n+ msgRegistry.register(MsgTreaddir, func() message { return &Treaddir{} })\n+ msgRegistry.register(MsgRreaddir, func() message { return &Rreaddir{} })\n+ msgRegistry.register(MsgTfsync, func() message { return &Tfsync{} })\n+ msgRegistry.register(MsgRfsync, func() message { return &Rfsync{} })\n+ msgRegistry.register(MsgTlink, func() message { return &Tlink{} })\n+ msgRegistry.register(MsgRlink, func() message { return &Rlink{} })\n+ msgRegistry.register(MsgTmkdir, func() message { return &Tmkdir{} })\n+ msgRegistry.register(MsgRmkdir, func() message { return &Rmkdir{} })\n+ msgRegistry.register(MsgTrenameat, func() message { return &Trenameat{} })\n+ msgRegistry.register(MsgRrenameat, func() message { return &Rrenameat{} })\n+ msgRegistry.register(MsgTunlinkat, func() message { return &Tunlinkat{} })\n+ msgRegistry.register(MsgRunlinkat, func() message { return &Runlinkat{} })\n+ msgRegistry.register(MsgTversion, func() message { return &Tversion{} })\n+ msgRegistry.register(MsgRversion, func() message { return &Rversion{} })\n+ msgRegistry.register(MsgTauth, func() message { return &Tauth{} })\n+ msgRegistry.register(MsgRauth, func() message { return &Rauth{} })\n+ msgRegistry.register(MsgTattach, func() message { return &Tattach{} })\n+ msgRegistry.register(MsgRattach, func() message { return &Rattach{} })\n+ msgRegistry.register(MsgTflush, func() message { return &Tflush{} })\n+ msgRegistry.register(MsgRflush, func() message { return &Rflush{} })\n+ msgRegistry.register(MsgTwalk, func() message { return &Twalk{} })\n+ msgRegistry.register(MsgRwalk, func() message { return &Rwalk{} })\n+ msgRegistry.register(MsgTread, func() message { return &Tread{} })\n+ msgRegistry.register(MsgRread, func() message { return &Rread{} })\n+ msgRegistry.register(MsgTwrite, func() message { return &Twrite{} })\n+ msgRegistry.register(MsgRwrite, func() message { return &Rwrite{} })\n+ msgRegistry.register(MsgTclunk, func() message { return &Tclunk{} })\n+ msgRegistry.register(MsgRclunk, func() message { return &Rclunk{} })\n+ msgRegistry.register(MsgTremove, func() message { return &Tremove{} })\n+ msgRegistry.register(MsgRremove, func() message { return &Rremove{} })\n+ msgRegistry.register(MsgTflushf, func() message { return &Tflushf{} })\n+ msgRegistry.register(MsgRflushf, func() message { return &Rflushf{} })\n+ msgRegistry.register(MsgTwalkgetattr, func() message { return &Twalkgetattr{} })\n+ msgRegistry.register(MsgRwalkgetattr, func() message { return &Rwalkgetattr{} })\n+ msgRegistry.register(MsgTucreate, func() message { return &Tucreate{} })\n+ msgRegistry.register(MsgRucreate, func() message { return &Rucreate{} })\n+ msgRegistry.register(MsgTumkdir, func() message { return &Tumkdir{} })\n+ msgRegistry.register(MsgRumkdir, func() message { return &Rumkdir{} })\n+ msgRegistry.register(MsgTumknod, func() message { return &Tumknod{} })\n+ msgRegistry.register(MsgRumknod, func() message { return &Rumknod{} })\n+ msgRegistry.register(MsgTusymlink, func() message { return &Tusymlink{} })\n+ msgRegistry.register(MsgRusymlink, func() message { return &Rusymlink{} })\n+ msgRegistry.register(MsgTlconnect, func() message { return &Tlconnect{} })\n+ msgRegistry.register(MsgRlconnect, func() message { return &Rlconnect{} })\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/messages_test.go", "new_path": "pkg/p9/messages_test.go", "diff": "@@ -399,8 +399,9 @@ func TestEncodeDecode(t *testing.T) {\n}\nfunc TestMessageStrings(t *testing.T) {\n- for typ, fn := range messageRegistry {\n- if fn != nil {\n+ for typ := range msgRegistry.factories {\n+ entry := &msgRegistry.factories[typ]\n+ if entry.create != nil {\nname := fmt.Sprintf(\"%+v\", typ)\nt.Run(name, func(t *testing.T) {\ndefer func() { // Ensure no panic.\n@@ -408,7 +409,7 @@ func TestMessageStrings(t *testing.T) {\nt.Errorf(\"printing %s failed: %v\", name, r)\n}\n}()\n- m := fn()\n+ m := entry.create()\n_ = fmt.Sprintf(\"%v\", m)\nerr := ErrInvalidMsgType{MsgType(typ)}\n_ = err.Error()\n@@ -426,5 +427,42 @@ func TestRegisterDuplicate(t *testing.T) {\n}()\n// Register a duplicate.\n- register(MsgRlerror, func() message { return &Rlerror{} })\n+ msgRegistry.register(MsgRlerror, func() message { return &Rlerror{} })\n+}\n+\n+func TestMsgCache(t *testing.T) {\n+ // Cache starts empty.\n+ if got, want := len(msgRegistry.factories[MsgRlerror].cache), 0; got != want {\n+ t.Errorf(\"Wrong cache size, got: %d, want: %d\", got, want)\n+ }\n+\n+ // Message can be created with an empty cache.\n+ msg, err := msgRegistry.get(0, MsgRlerror)\n+ if err != nil {\n+ t.Errorf(\"msgRegistry.get(): %v\", err)\n+ }\n+ if got, want := len(msgRegistry.factories[MsgRlerror].cache), 0; got != want {\n+ t.Errorf(\"Wrong cache size, got: %d, want: %d\", got, want)\n+ }\n+\n+ // Check that message is added to the cache when returned.\n+ msgRegistry.put(msg)\n+ if got, want := len(msgRegistry.factories[MsgRlerror].cache), 1; got != want {\n+ t.Errorf(\"Wrong cache size, got: %d, want: %d\", got, want)\n+ }\n+\n+ // Check that returned message is reused.\n+ if got, err := msgRegistry.get(0, MsgRlerror); err != nil {\n+ t.Errorf(\"msgRegistry.get(): %v\", err)\n+ } else if msg != got {\n+ t.Errorf(\"Message not reused, got: %d, want: %d\", got, msg)\n+ }\n+\n+ // Check that cache doesn't grow beyond max size.\n+ for i := 0; i < maxCacheSize+1; i++ {\n+ msgRegistry.put(&Rlerror{})\n+ }\n+ if got, want := len(msgRegistry.factories[MsgRlerror].cache), maxCacheSize; got != want {\n+ t.Errorf(\"Wrong cache size, got: %d, want: %d\", got, want)\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/server.go", "new_path": "pkg/p9/server.go", "diff": "@@ -395,7 +395,7 @@ func (cs *connState) handleRequest() {\n}\n// Receive a message.\n- tag, m, err := recv(cs.conn, messageSize, messageByType)\n+ tag, m, err := recv(cs.conn, messageSize, msgRegistry.get)\nif errSocket, ok := err.(ErrSocket); ok {\n// Connection problem; stop serving.\ncs.recvDone <- errSocket.error\n@@ -458,6 +458,8 @@ func (cs *connState) handleRequest() {\n// Produce an ENOSYS error.\nr = newErr(syscall.ENOSYS)\n}\n+ msgRegistry.put(m)\n+ m = nil // 'm' should not be touched after this point.\n}\nfunc (cs *connState) handleRequests() {\n" }, { "change_type": "MODIFY", "old_path": "pkg/p9/transport_test.go", "new_path": "pkg/p9/transport_test.go", "diff": "@@ -41,7 +41,7 @@ func TestSendRecv(t *testing.T) {\nt.Fatalf(\"send got err %v expected nil\", err)\n}\n- tag, m, err := recv(server, maximumLength, messageByType)\n+ tag, m, err := recv(server, maximumLength, msgRegistry.get)\nif err != nil {\nt.Fatalf(\"recv got err %v expected nil\", err)\n}\n@@ -73,7 +73,7 @@ func TestRecvOverrun(t *testing.T) {\nt.Fatalf(\"send got err %v expected nil\", err)\n}\n- if _, _, err := recv(server, maximumLength, messageByType); err == nil {\n+ if _, _, err := recv(server, maximumLength, msgRegistry.get); err == nil {\nt.Fatalf(\"recv got err %v expected ErrSocket{ErrNoValidMessage}\", err)\n}\n}\n@@ -98,7 +98,7 @@ func TestRecvInvalidType(t *testing.T) {\nt.Fatalf(\"send got err %v expected nil\", err)\n}\n- _, _, err = recv(server, maximumLength, messageByType)\n+ _, _, err = recv(server, maximumLength, msgRegistry.get)\nif _, ok := err.(*ErrInvalidMsgType); !ok {\nt.Fatalf(\"recv got err %v expected ErrInvalidMsgType\", err)\n}\n@@ -129,7 +129,7 @@ func TestSendRecvWithFile(t *testing.T) {\n}\n// Enable withFile.\n- tag, m, err := recv(server, maximumLength, messageByType)\n+ tag, m, err := recv(server, maximumLength, msgRegistry.get)\nif err != nil {\nt.Fatalf(\"recv got err %v expected nil\", err)\n}\n@@ -153,7 +153,7 @@ func TestRecvClosed(t *testing.T) {\ndefer server.Close()\nclient.Close()\n- _, _, err = recv(server, maximumLength, messageByType)\n+ _, _, err = recv(server, maximumLength, msgRegistry.get)\nif err == nil {\nt.Fatalf(\"got err nil expected non-nil\")\n}\n@@ -180,5 +180,5 @@ func TestSendClosed(t *testing.T) {\n}\nfunc init() {\n- register(MsgTypeBadDecode, func() message { return &badDecode{} })\n+ msgRegistry.register(MsgTypeBadDecode, func() message { return &badDecode{} })\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Reduce memory allocations on serving path Cache last used messages and reuse them for subsequent requests. If more messages are needed, they are created outside the cache on demand. PiperOrigin-RevId: 245836910 Change-Id: Icf099ddff95df420db8e09f5cdd41dcdce406c61
259,992
29.04.2019 18:40:51
25,200
2843f2a956f5ef23e621f571f5c3e6a1e4a8223a
Skip flaky ClockGettime.CputimeId Test times out when it runs on a single core. Skip until the bug in the Go runtime is fixed.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/clock_gettime.cc", "new_path": "test/syscalls/linux/clock_gettime.cc", "diff": "@@ -55,6 +55,10 @@ void spin_ns(int64_t ns) {\n// Test that CLOCK_PROCESS_CPUTIME_ID is a superset of CLOCK_THREAD_CPUTIME_ID.\nTEST(ClockGettime, CputimeId) {\n+ // TODO(b/128871825,golang.org/issue/10958): Test times out when there is a\n+ // single core because one goroutine starves the others.\n+ SKIP_IF(std::thread::hardware_concurrency() == 1);\n+\nconstexpr int kNumThreads = 13; // arbitrary\nabsl::Duration spin_time = absl::Seconds(1);\n" } ]
Go
Apache License 2.0
google/gvisor
Skip flaky ClockGettime.CputimeId Test times out when it runs on a single core. Skip until the bug in the Go runtime is fixed. PiperOrigin-RevId: 245866466 Change-Id: Ic3e72131c27136d58b71f6b11acc78abf55895d4
259,854
29.04.2019 21:20:05
25,200
81ecd8b6eab7457b331762626f8c210fec3504e6
Implement the MSG_CTRUNC msghdr flag for Unix sockets. Updates google/gvisor#206
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/control.go", "new_path": "pkg/sentry/fs/host/control.go", "diff": "@@ -32,17 +32,20 @@ func newSCMRights(fds []int) control.SCMRights {\n}\n// Files implements control.SCMRights.Files.\n-func (c *scmRights) Files(ctx context.Context, max int) control.RightsFiles {\n+func (c *scmRights) Files(ctx context.Context, max int) (control.RightsFiles, bool) {\nn := max\n+ var trunc bool\nif l := len(c.fds); n > l {\nn = l\n+ } else if n < l {\n+ trunc = true\n}\nrf := control.RightsFiles(fdsToFiles(ctx, c.fds[:n]))\n// Only consume converted FDs (fdsToFiles may convert fewer than n FDs).\nc.fds = c.fds[len(rf):]\n- return rf\n+ return rf, trunc\n}\n// Clone implements transport.RightsControlMessage.Clone.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/socket.go", "new_path": "pkg/sentry/fs/host/socket.go", "diff": "@@ -282,11 +282,11 @@ func (c *ConnectedEndpoint) EventUpdate() {\n}\n// Recv implements transport.Receiver.Recv.\n-func (c *ConnectedEndpoint) Recv(data [][]byte, creds bool, numRights uintptr, peek bool) (uintptr, uintptr, transport.ControlMessages, tcpip.FullAddress, bool, *syserr.Error) {\n+func (c *ConnectedEndpoint) Recv(data [][]byte, creds bool, numRights uintptr, peek bool) (uintptr, uintptr, transport.ControlMessages, bool, tcpip.FullAddress, bool, *syserr.Error) {\nc.mu.RLock()\ndefer c.mu.RUnlock()\nif c.readClosed {\n- return 0, 0, transport.ControlMessages{}, tcpip.FullAddress{}, false, syserr.ErrClosedForReceive\n+ return 0, 0, transport.ControlMessages{}, false, tcpip.FullAddress{}, false, syserr.ErrClosedForReceive\n}\nvar cm unet.ControlMessage\n@@ -296,7 +296,7 @@ func (c *ConnectedEndpoint) Recv(data [][]byte, creds bool, numRights uintptr, p\n// N.B. Unix sockets don't have a receive buffer, the send buffer\n// serves both purposes.\n- rl, ml, cl, err := fdReadVec(c.file.FD(), data, []byte(cm), peek, c.sndbuf)\n+ rl, ml, cl, cTrunc, err := fdReadVec(c.file.FD(), data, []byte(cm), peek, c.sndbuf)\nif rl > 0 && err != nil {\n// We got some data, so all we need to do on error is return\n// the data that we got. Short reads are fine, no need to\n@@ -304,7 +304,7 @@ func (c *ConnectedEndpoint) Recv(data [][]byte, creds bool, numRights uintptr, p\nerr = nil\n}\nif err != nil {\n- return 0, 0, transport.ControlMessages{}, tcpip.FullAddress{}, false, syserr.FromError(err)\n+ return 0, 0, transport.ControlMessages{}, false, tcpip.FullAddress{}, false, syserr.FromError(err)\n}\n// There is no need for the callee to call RecvNotify because fdReadVec uses\n@@ -317,18 +317,18 @@ func (c *ConnectedEndpoint) Recv(data [][]byte, creds bool, numRights uintptr, p\n// Avoid extra allocations in the case where there isn't any control data.\nif len(cm) == 0 {\n- return rl, ml, transport.ControlMessages{}, tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, nil\n+ return rl, ml, transport.ControlMessages{}, cTrunc, tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, nil\n}\nfds, err := cm.ExtractFDs()\nif err != nil {\n- return 0, 0, transport.ControlMessages{}, tcpip.FullAddress{}, false, syserr.FromError(err)\n+ return 0, 0, transport.ControlMessages{}, false, tcpip.FullAddress{}, false, syserr.FromError(err)\n}\nif len(fds) == 0 {\n- return rl, ml, transport.ControlMessages{}, tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, nil\n+ return rl, ml, transport.ControlMessages{}, cTrunc, tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, nil\n}\n- return rl, ml, control.New(nil, nil, newSCMRights(fds)), tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, nil\n+ return rl, ml, control.New(nil, nil, newSCMRights(fds)), cTrunc, tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, nil\n}\n// close releases all resources related to the endpoint.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/socket_test.go", "new_path": "pkg/sentry/fs/host/socket_test.go", "diff": "@@ -207,7 +207,7 @@ func TestSend(t *testing.T) {\nfunc TestRecv(t *testing.T) {\ne := ConnectedEndpoint{readClosed: true}\n- if _, _, _, _, _, err := e.Recv(nil, false, 0, false); err != syserr.ErrClosedForReceive {\n+ if _, _, _, _, _, _, err := e.Recv(nil, false, 0, false); err != syserr.ErrClosedForReceive {\nt.Errorf(\"Got %#v.Recv() = %v, want = %v\", e, err, syserr.ErrClosedForReceive)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/socket_unsafe.go", "new_path": "pkg/sentry/fs/host/socket_unsafe.go", "diff": "@@ -23,7 +23,7 @@ import (\n//\n// If the total length of bufs is > maxlen, fdReadVec will do a partial read\n// and err will indicate why the message was truncated.\n-func fdReadVec(fd int, bufs [][]byte, control []byte, peek bool, maxlen int) (readLen uintptr, msgLen uintptr, controlLen uint64, err error) {\n+func fdReadVec(fd int, bufs [][]byte, control []byte, peek bool, maxlen int) (readLen uintptr, msgLen uintptr, controlLen uint64, controlTrunc bool, err error) {\nflags := uintptr(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC)\nif peek {\nflags |= syscall.MSG_PEEK\n@@ -34,7 +34,7 @@ func fdReadVec(fd int, bufs [][]byte, control []byte, peek bool, maxlen int) (re\nlength, iovecs, intermediate, err := buildIovec(bufs, maxlen, true)\nif err != nil && len(iovecs) == 0 {\n// No partial write to do, return error immediately.\n- return 0, 0, 0, err\n+ return 0, 0, 0, false, err\n}\nvar msg syscall.Msghdr\n@@ -51,7 +51,7 @@ func fdReadVec(fd int, bufs [][]byte, control []byte, peek bool, maxlen int) (re\nn, _, e := syscall.RawSyscall(syscall.SYS_RECVMSG, uintptr(fd), uintptr(unsafe.Pointer(&msg)), flags)\nif e != 0 {\n// N.B. prioritize the syscall error over the buildIovec error.\n- return 0, 0, 0, e\n+ return 0, 0, 0, false, e\n}\n// Copy data back to bufs.\n@@ -59,11 +59,13 @@ func fdReadVec(fd int, bufs [][]byte, control []byte, peek bool, maxlen int) (re\ncopyToMulti(bufs, intermediate)\n}\n+ controlTrunc = msg.Flags&syscall.MSG_CTRUNC == syscall.MSG_CTRUNC\n+\nif n > length {\n- return length, n, msg.Controllen, err\n+ return length, n, msg.Controllen, controlTrunc, err\n}\n- return n, n, msg.Controllen, err\n+ return n, n, msg.Controllen, controlTrunc, err\n}\n// fdWriteVec sends from bufs to fd.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/control/control.go", "new_path": "pkg/sentry/socket/control/control.go", "diff": "@@ -45,7 +45,10 @@ type SCMRights interface {\ntransport.RightsControlMessage\n// Files returns up to max RightsFiles.\n- Files(ctx context.Context, max int) RightsFiles\n+ //\n+ // Returned files are consumed and ownership is transferred to the caller.\n+ // Subsequent calls to Files will return the next files.\n+ Files(ctx context.Context, max int) (rf RightsFiles, truncated bool)\n}\n// RightsFiles represents a SCM_RIGHTS socket control message. A reference is\n@@ -71,14 +74,17 @@ func NewSCMRights(t *kernel.Task, fds []int32) (SCMRights, error) {\n}\n// Files implements SCMRights.Files.\n-func (fs *RightsFiles) Files(ctx context.Context, max int) RightsFiles {\n+func (fs *RightsFiles) Files(ctx context.Context, max int) (RightsFiles, bool) {\nn := max\n+ var trunc bool\nif l := len(*fs); n > l {\nn = l\n+ } else if n < l {\n+ trunc = true\n}\nrf := (*fs)[:n]\n*fs = (*fs)[n:]\n- return rf\n+ return rf, trunc\n}\n// Clone implements transport.RightsControlMessage.Clone.\n@@ -99,8 +105,8 @@ func (fs *RightsFiles) Release() {\n}\n// rightsFDs gets up to the specified maximum number of FDs.\n-func rightsFDs(t *kernel.Task, rights SCMRights, cloexec bool, max int) []int32 {\n- files := rights.Files(t, max)\n+func rightsFDs(t *kernel.Task, rights SCMRights, cloexec bool, max int) ([]int32, bool) {\n+ files, trunc := rights.Files(t, max)\nfds := make([]int32, 0, len(files))\nfor i := 0; i < max && len(files) > 0; i++ {\nfd, err := t.FDMap().NewFDFrom(0, files[0], kernel.FDFlags{cloexec}, t.ThreadGroup().Limits())\n@@ -114,19 +120,23 @@ func rightsFDs(t *kernel.Task, rights SCMRights, cloexec bool, max int) []int32\nfds = append(fds, int32(fd))\n}\n- return fds\n+ return fds, trunc\n}\n// PackRights packs as many FDs as will fit into the unused capacity of buf.\n-func PackRights(t *kernel.Task, rights SCMRights, cloexec bool, buf []byte) []byte {\n+func PackRights(t *kernel.Task, rights SCMRights, cloexec bool, buf []byte, flags int) ([]byte, int) {\nmaxFDs := (cap(buf) - len(buf) - linux.SizeOfControlMessageHeader) / 4\n// Linux does not return any FDs if none fit.\nif maxFDs <= 0 {\n- return buf\n+ flags |= linux.MSG_CTRUNC\n+ return buf, flags\n+ }\n+ fds, trunc := rightsFDs(t, rights, cloexec, maxFDs)\n+ if trunc {\n+ flags |= linux.MSG_CTRUNC\n}\n- fds := rightsFDs(t, rights, cloexec, maxFDs)\nalign := t.Arch().Width()\n- return putCmsg(buf, linux.SCM_RIGHTS, align, fds)\n+ return putCmsg(buf, flags, linux.SCM_RIGHTS, align, fds)\n}\n// scmCredentials represents an SCM_CREDENTIALS socket control message.\n@@ -176,7 +186,7 @@ func putUint32(buf []byte, n uint32) []byte {\n// putCmsg writes a control message header and as much data as will fit into\n// the unused capacity of a buffer.\n-func putCmsg(buf []byte, msgType uint32, align uint, data []int32) []byte {\n+func putCmsg(buf []byte, flags int, msgType uint32, align uint, data []int32) ([]byte, int) {\nspace := AlignDown(cap(buf)-len(buf), 4)\n// We can't write to space that doesn't exist, so if we are going to align\n@@ -193,7 +203,8 @@ func putCmsg(buf []byte, msgType uint32, align uint, data []int32) []byte {\n// a partial int32, so the length of the message will be\n// min(aligned length, header + datas).\nif space < linux.SizeOfControlMessageHeader {\n- return buf\n+ flags |= linux.MSG_CTRUNC\n+ return buf, flags\n}\nlength := 4*len(data) + linux.SizeOfControlMessageHeader\n@@ -205,11 +216,12 @@ func putCmsg(buf []byte, msgType uint32, align uint, data []int32) []byte {\nbuf = putUint32(buf, msgType)\nfor _, d := range data {\nif len(buf)+4 > cap(buf) {\n+ flags |= linux.MSG_CTRUNC\nbreak\n}\nbuf = putUint32(buf, uint32(d))\n}\n- return alignSlice(buf, align)\n+ return alignSlice(buf, align), flags\n}\nfunc putCmsgStruct(buf []byte, msgType uint32, align uint, data interface{}) []byte {\n@@ -253,7 +265,7 @@ func (c *scmCredentials) Credentials(t *kernel.Task) (kernel.ThreadID, auth.UID,\n// PackCredentials packs the credentials in the control message (or default\n// credentials if none) into a buffer.\n-func PackCredentials(t *kernel.Task, creds SCMCredentials, buf []byte) []byte {\n+func PackCredentials(t *kernel.Task, creds SCMCredentials, buf []byte, flags int) ([]byte, int) {\nalign := t.Arch().Width()\n// Default credentials if none are available.\n@@ -265,7 +277,7 @@ func PackCredentials(t *kernel.Task, creds SCMCredentials, buf []byte) []byte {\npid, uid, gid = creds.Credentials(t)\n}\nc := []int32{int32(pid), int32(uid), int32(gid)}\n- return putCmsg(buf, linux.SCM_CREDENTIALS, align, c)\n+ return putCmsg(buf, flags, linux.SCM_CREDENTIALS, align, c)\n}\n// AlignUp rounds a length up to an alignment. align must be a power of 2.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/socket.go", "new_path": "pkg/sentry/socket/socket.go", "diff": "@@ -87,6 +87,11 @@ type Socket interface {\n// senderAddrLen is the address length to be returned to the application,\n// not necessarily the actual length of the address.\n//\n+ // flags control how RecvMsg should be completed. msgFlags indicate how\n+ // the RecvMsg call was completed. Note that control message truncation\n+ // may still be required even if the MSG_CTRUNC bit is not set in\n+ // msgFlags. In that case, the caller should set MSG_CTRUNC appropriately.\n+ //\n// If err != nil, the recv was not successful.\nRecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, msgFlags int, senderAddr interface{}, senderAddrLen uint32, controlMessages ControlMessages, err *syserr.Error)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/io.go", "new_path": "pkg/sentry/socket/unix/io.go", "diff": "@@ -72,13 +72,18 @@ type EndpointReader struct {\n// Control contains the received control messages.\nControl transport.ControlMessages\n+\n+ // ControlTrunc indicates that SCM_RIGHTS FDs were discarded based on\n+ // the value of NumRights.\n+ ControlTrunc bool\n}\n// ReadToBlocks implements safemem.Reader.ReadToBlocks.\nfunc (r *EndpointReader) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {\nreturn safemem.FromVecReaderFunc{func(bufs [][]byte) (int64, error) {\n- n, ms, c, err := r.Endpoint.RecvMsg(bufs, r.Creds, r.NumRights, r.Peek, r.From)\n+ n, ms, c, ct, err := r.Endpoint.RecvMsg(bufs, r.Creds, r.NumRights, r.Peek, r.From)\nr.Control = c\n+ r.ControlTrunc = ct\nr.MsgSize = ms\nif err != nil {\nreturn int64(n), err.ToError()\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/transport/unix.go", "new_path": "pkg/sentry/socket/unix/transport/unix.go", "diff": "@@ -130,7 +130,11 @@ type Endpoint interface {\n//\n// msgLen is the length of the read message consumed for datagram Endpoints.\n// msgLen is always the same as recvLen for stream Endpoints.\n- RecvMsg(data [][]byte, creds bool, numRights uintptr, peek bool, addr *tcpip.FullAddress) (recvLen, msgLen uintptr, cm ControlMessages, err *syserr.Error)\n+ //\n+ // CMTruncated indicates that the numRights hint was used to receive fewer\n+ // than the total available SCM_RIGHTS FDs. Additional truncation may be\n+ // required by the caller.\n+ RecvMsg(data [][]byte, creds bool, numRights uintptr, peek bool, addr *tcpip.FullAddress) (recvLen, msgLen uintptr, cm ControlMessages, CMTruncated bool, err *syserr.Error)\n// SendMsg writes data and a control message to the endpoint's peer.\n// This method does not block if the data cannot be written.\n@@ -288,7 +292,7 @@ type Receiver interface {\n// See Endpoint.RecvMsg for documentation on shared arguments.\n//\n// notify indicates if RecvNotify should be called.\n- Recv(data [][]byte, creds bool, numRights uintptr, peek bool) (recvLen, msgLen uintptr, cm ControlMessages, source tcpip.FullAddress, notify bool, err *syserr.Error)\n+ Recv(data [][]byte, creds bool, numRights uintptr, peek bool) (recvLen, msgLen uintptr, cm ControlMessages, CMTruncated bool, source tcpip.FullAddress, notify bool, err *syserr.Error)\n// RecvNotify notifies the Receiver of a successful Recv. This must not be\n// called while holding any endpoint locks.\n@@ -328,7 +332,7 @@ type queueReceiver struct {\n}\n// Recv implements Receiver.Recv.\n-func (q *queueReceiver) Recv(data [][]byte, creds bool, numRights uintptr, peek bool) (uintptr, uintptr, ControlMessages, tcpip.FullAddress, bool, *syserr.Error) {\n+func (q *queueReceiver) Recv(data [][]byte, creds bool, numRights uintptr, peek bool) (uintptr, uintptr, ControlMessages, bool, tcpip.FullAddress, bool, *syserr.Error) {\nvar m *message\nvar notify bool\nvar err *syserr.Error\n@@ -338,7 +342,7 @@ func (q *queueReceiver) Recv(data [][]byte, creds bool, numRights uintptr, peek\nm, notify, err = q.readQueue.Dequeue()\n}\nif err != nil {\n- return 0, 0, ControlMessages{}, tcpip.FullAddress{}, false, err\n+ return 0, 0, ControlMessages{}, false, tcpip.FullAddress{}, false, err\n}\nsrc := []byte(m.Data)\nvar copied uintptr\n@@ -347,7 +351,7 @@ func (q *queueReceiver) Recv(data [][]byte, creds bool, numRights uintptr, peek\ncopied += uintptr(n)\nsrc = src[n:]\n}\n- return copied, uintptr(len(m.Data)), m.Control, m.Address, notify, nil\n+ return copied, uintptr(len(m.Data)), m.Control, false, m.Address, notify, nil\n}\n// RecvNotify implements Receiver.RecvNotify.\n@@ -440,7 +444,7 @@ func (q *streamQueueReceiver) RecvMaxQueueSize() int64 {\n}\n// Recv implements Receiver.Recv.\n-func (q *streamQueueReceiver) Recv(data [][]byte, wantCreds bool, numRights uintptr, peek bool) (uintptr, uintptr, ControlMessages, tcpip.FullAddress, bool, *syserr.Error) {\n+func (q *streamQueueReceiver) Recv(data [][]byte, wantCreds bool, numRights uintptr, peek bool) (uintptr, uintptr, ControlMessages, bool, tcpip.FullAddress, bool, *syserr.Error) {\nq.mu.Lock()\ndefer q.mu.Unlock()\n@@ -453,7 +457,7 @@ func (q *streamQueueReceiver) Recv(data [][]byte, wantCreds bool, numRights uint\n// the next time Recv() is called.\nm, n, err := q.readQueue.Dequeue()\nif err != nil {\n- return 0, 0, ControlMessages{}, tcpip.FullAddress{}, false, err\n+ return 0, 0, ControlMessages{}, false, tcpip.FullAddress{}, false, err\n}\nnotify = n\nq.buffer = []byte(m.Data)\n@@ -469,7 +473,7 @@ func (q *streamQueueReceiver) Recv(data [][]byte, wantCreds bool, numRights uint\n// Don't consume data since we are peeking.\ncopied, data, _ = vecCopy(data, q.buffer)\n- return copied, copied, c, q.addr, notify, nil\n+ return copied, copied, c, false, q.addr, notify, nil\n}\n// Consume data and control message since we are not peeking.\n@@ -484,9 +488,11 @@ func (q *streamQueueReceiver) Recv(data [][]byte, wantCreds bool, numRights uint\nc.Credentials = nil\n}\n+ var cmTruncated bool\nif c.Rights != nil && numRights == 0 {\nc.Rights.Release()\nc.Rights = nil\n+ cmTruncated = true\n}\nhaveRights := c.Rights != nil\n@@ -538,6 +544,7 @@ func (q *streamQueueReceiver) Recv(data [][]byte, wantCreds bool, numRights uint\nif q.control.Rights != nil {\n// Consume rights.\nif numRights == 0 {\n+ cmTruncated = true\nq.control.Rights.Release()\n} else {\nc.Rights = q.control.Rights\n@@ -546,7 +553,7 @@ func (q *streamQueueReceiver) Recv(data [][]byte, wantCreds bool, numRights uint\nq.control.Rights = nil\n}\n}\n- return copied, copied, c, q.addr, notify, nil\n+ return copied, copied, c, cmTruncated, q.addr, notify, nil\n}\n// A ConnectedEndpoint is an Endpoint that can be used to send Messages.\n@@ -775,18 +782,18 @@ func (e *baseEndpoint) Connected() bool {\n}\n// RecvMsg reads data and a control message from the endpoint.\n-func (e *baseEndpoint) RecvMsg(data [][]byte, creds bool, numRights uintptr, peek bool, addr *tcpip.FullAddress) (uintptr, uintptr, ControlMessages, *syserr.Error) {\n+func (e *baseEndpoint) RecvMsg(data [][]byte, creds bool, numRights uintptr, peek bool, addr *tcpip.FullAddress) (uintptr, uintptr, ControlMessages, bool, *syserr.Error) {\ne.Lock()\nif e.receiver == nil {\ne.Unlock()\n- return 0, 0, ControlMessages{}, syserr.ErrNotConnected\n+ return 0, 0, ControlMessages{}, false, syserr.ErrNotConnected\n}\n- recvLen, msgLen, cms, a, notify, err := e.receiver.Recv(data, creds, numRights, peek)\n+ recvLen, msgLen, cms, cmt, a, notify, err := e.receiver.Recv(data, creds, numRights, peek)\ne.Unlock()\nif err != nil {\n- return 0, 0, ControlMessages{}, err\n+ return 0, 0, ControlMessages{}, false, err\n}\nif notify {\n@@ -796,7 +803,7 @@ func (e *baseEndpoint) RecvMsg(data [][]byte, creds bool, numRights uintptr, pee\nif addr != nil {\n*addr = a\n}\n- return recvLen, msgLen, cms, nil\n+ return recvLen, msgLen, cms, cmt, nil\n}\n// SendMsg writes data and a control message to the endpoint's peer.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/unix.go", "new_path": "pkg/sentry/socket/unix/unix.go", "diff": "@@ -490,6 +490,9 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nif s.Passcred() {\n// Credentials take priority if they are enabled and there is space.\nwantCreds = rightsLen > 0\n+ if !wantCreds {\n+ msgFlags |= linux.MSG_CTRUNC\n+ }\ncredLen := syscall.CmsgSpace(syscall.SizeofUcred)\nrightsLen -= credLen\n}\n@@ -516,6 +519,10 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nfrom, fromLen = epsocket.ConvertAddress(linux.AF_UNIX, *r.From)\n}\n+ if r.ControlTrunc {\n+ msgFlags |= linux.MSG_CTRUNC\n+ }\n+\nif err != nil || dontWait || !waitAll || s.isPacket || n >= dst.NumBytes() {\nif s.isPacket && n < int64(r.MsgSize) {\nmsgFlags |= linux.MSG_TRUNC\n@@ -546,12 +553,18 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\nif r.From != nil {\nfrom, fromLen = epsocket.ConvertAddress(linux.AF_UNIX, *r.From)\n}\n+\n+ if r.ControlTrunc {\n+ msgFlags |= linux.MSG_CTRUNC\n+ }\n+\nif trunc {\n// n and r.MsgSize are the same for streams.\ntotal += int64(r.MsgSize)\n} else {\ntotal += n\n}\n+\nif err != nil || !waitAll || s.isPacket || n >= dst.NumBytes() {\nif total > 0 {\nerr = nil\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_socket.go", "new_path": "pkg/sentry/syscalls/linux/sys_socket.go", "diff": "@@ -746,7 +746,10 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i\nif err != nil {\nreturn 0, syserror.ConvertIntr(err.ToError(), kernel.ERESTARTSYS)\n}\n+ if !cms.Unix.Empty() {\n+ mflags |= linux.MSG_CTRUNC\ncms.Unix.Release()\n+ }\nif int(msg.Flags) != mflags {\n// Copy out the flags to the caller.\n@@ -771,7 +774,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i\nif cr, ok := s.(transport.Credentialer); ok && cr.Passcred() {\ncreds, _ := cms.Unix.Credentials.(control.SCMCredentials)\n- controlData = control.PackCredentials(t, creds, controlData)\n+ controlData, mflags = control.PackCredentials(t, creds, controlData, mflags)\n}\nif cms.IP.HasTimestamp {\n@@ -779,7 +782,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i\n}\nif cms.Unix.Rights != nil {\n- controlData = control.PackRights(t, cms.Unix.Rights.(control.SCMRights), flags&linux.MSG_CMSG_CLOEXEC != 0, controlData)\n+ controlData, mflags = control.PackRights(t, cms.Unix.Rights.(control.SCMRights), flags&linux.MSG_CMSG_CLOEXEC != 0, controlData, mflags)\n}\n// Copy the address to the caller.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_unix.cc", "new_path": "test/syscalls/linux/socket_unix.cc", "diff": "@@ -186,9 +186,6 @@ TEST_P(UnixSocketPairTest, BasicFDPassNoSpace) {\n// BasicFDPassNoSpaceMsgCtrunc sends an FD, but does not provide any space to\n// receive it. It then verifies that the MSG_CTRUNC flag is set in the msghdr.\nTEST_P(UnixSocketPairTest, BasicFDPassNoSpaceMsgCtrunc) {\n- // FIXME(gvisor.dev/issue/206): Support MSG_CTRUNC.\n- SKIP_IF(IsRunningOnGvisor());\n-\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nchar sent_data[20];\n@@ -259,9 +256,6 @@ TEST_P(UnixSocketPairTest, BasicFDPassNullControlMsgCtrunc) {\n// space to receive it. It then verifies that the MSG_CTRUNC flag is set in the\n// msghdr.\nTEST_P(UnixSocketPairTest, BasicFDPassNotEnoughSpaceMsgCtrunc) {\n- // FIXME(gvisor.dev/issue/206): Support MSG_CTRUNC.\n- SKIP_IF(IsRunningOnGvisor());\n-\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nchar sent_data[20];\n@@ -296,9 +290,6 @@ TEST_P(UnixSocketPairTest, BasicFDPassNotEnoughSpaceMsgCtrunc) {\n// space to receive two of them. It then verifies that the MSG_CTRUNC flag is\n// set in the msghdr.\nTEST_P(UnixSocketPairTest, BasicThreeFDPassTruncationMsgCtrunc) {\n- // FIXME(gvisor.dev/issue/206): Support MSG_CTRUNC.\n- SKIP_IF(IsRunningOnGvisor());\n-\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nchar sent_data[20];\n@@ -408,9 +399,6 @@ TEST_P(UnixSocketPairTest, BasicFDPassUnalignedRecvNoMsgTrunc) {\n// provides enough space to receive one of them. It then verifies that the\n// MSG_CTRUNC flag is set in the msghdr.\nTEST_P(UnixSocketPairTest, BasicTwoFDPassUnalignedRecvTruncationMsgTrunc) {\n- // FIXME(gvisor.dev/issue/206): Support MSG_CTRUNC.\n- SKIP_IF(IsRunningOnGvisor());\n-\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nchar sent_data[20];\n@@ -1010,9 +998,6 @@ TEST_P(UnixSocketPairTest, CredPassNoMsgCtrunc) {\n// the data without providing space for any credentials and verifies that\n// MSG_CTRUNC is set in the msghdr.\nTEST_P(UnixSocketPairTest, CredPassNoSpaceMsgCtrunc) {\n- // FIXME(gvisor.dev/issue/206): Support MSG_CTRUNC.\n- SKIP_IF(IsRunningOnGvisor());\n-\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nchar sent_data[20];\n@@ -1061,9 +1046,6 @@ TEST_P(UnixSocketPairTest, CredPassNoSpaceMsgCtrunc) {\n// the data while providing enough space for only the first field of the\n// credentials and verifies that MSG_CTRUNC is set in the msghdr.\nTEST_P(UnixSocketPairTest, CredPassTruncatedMsgCtrunc) {\n- // FIXME(gvisor.dev/issue/206): Support MSG_CTRUNC.\n- SKIP_IF(IsRunningOnGvisor());\n-\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\nchar sent_data[20];\n" } ]
Go
Apache License 2.0
google/gvisor
Implement the MSG_CTRUNC msghdr flag for Unix sockets. Updates google/gvisor#206 PiperOrigin-RevId: 245880573 Change-Id: Ifa715e98d47f64b8a32b04ae9378d6cd6bd4025e
259,881
30.04.2019 15:41:42
25,200
23ca9886c6cfe499438f1b994ee66a4f803673ae
Update reference to old type
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/dentry.go", "new_path": "pkg/sentry/fs/dentry.go", "diff": "@@ -73,8 +73,8 @@ func (c *CollectEntriesSerializer) Written() int {\nreturn len(c.Entries)\n}\n-// DirCtx is used by node.Readdir to emit directory entries. It is not\n-// thread-safe.\n+// DirCtx is used in FileOperations.IterateDir to emit directory entries. It is\n+// not thread-safe.\ntype DirCtx struct {\n// Serializer is used to serialize the node attributes.\nSerializer DentrySerializer\n" } ]
Go
Apache License 2.0
google/gvisor
Update reference to old type PiperOrigin-RevId: 246036806 Change-Id: I5554a43a1f8146c927402db3bf98488a2da0fbe7
259,858
30.04.2019 21:52:45
25,200
2c1c1c9917617ddac0042aa0e7ae14d5032100c5
CONTRIBUTING: fix broken repository link
[ { "change_type": "MODIFY", "old_path": "CONTRIBUTING.md", "new_path": "CONTRIBUTING.md", "diff": "@@ -144,5 +144,5 @@ one above, the\n[gccla]: https://cla.developers.google.com/about/google-corporate\n[gerrit]: https://gvisor-review.googlesource.com\n[gostyle]: https://github.com/golang/go/wiki/CodeReviewComments\n-[repo]: https://gvisor.googlesource.com\n+[repo]: https://gvisor.googlesource.com/?format=HTML\n[teststyle]: ./test/\n" } ]
Go
Apache License 2.0
google/gvisor
CONTRIBUTING: fix broken repository link PiperOrigin-RevId: 246079174 Change-Id: I423078a065e0cc5d258d674b4f2f0680a5db0aee
259,891
02.05.2019 18:56:40
25,200
bf40fa21292f08e66a274169ad1318e62fbc542b
Replace dynamic macros with constants in memfd test.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/memfd.cc", "new_path": "test/syscalls/linux/memfd.cc", "diff": "@@ -38,8 +38,15 @@ namespace {\n// The header sys/memfd.h isn't available on all systems, so redefining some of\n// the constants here.\n#define F_LINUX_SPECIFIC_BASE 1024\n+\n+#ifndef F_ADD_SEALS\n#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)\n+#endif /* F_ADD_SEALS */\n+\n+#ifndef F_GET_SEALS\n#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)\n+#endif /* F_GET_SEALS */\n+\n#define F_SEAL_SEAL 0x0001\n#define F_SEAL_SHRINK 0x0002\n#define F_SEAL_GROW 0x0004\n" } ]
Go
Apache License 2.0
google/gvisor
Replace dynamic macros with constants in memfd test. PiperOrigin-RevId: 246433167 Change-Id: Idb9b6c20ee1da193176288dfd2f9d85ec0e69c54
259,853
02.05.2019 19:26:16
25,200
c967fbdaa2cda260312f73a3f75744ac1ad11176
runsc: move test_app in a separate directory Opensource tools (e. g. https://github.com/fatih/vim-go) can't hanlde more than one golang package in one directory.
[ { "change_type": "MODIFY", "old_path": "runsc/container/BUILD", "new_path": "runsc/container/BUILD", "diff": "-load(\"@io_bazel_rules_go//go:def.bzl\", \"go_binary\", \"go_library\", \"go_test\")\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\", \"go_test\")\npackage(licenses = [\"notice\"])\n@@ -37,8 +37,8 @@ go_test(\n\"shared_volume_test.go\",\n],\ndata = [\n- \":test_app\",\n\"//runsc\",\n+ \"//runsc/container/test_app\",\n],\nembed = [\":container\"],\nshard_count = 5,\n@@ -61,14 +61,3 @@ go_test(\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n-\n-go_binary(\n- name = \"test_app\",\n- testonly = 1,\n- srcs = [\"test_app.go\"],\n- pure = \"on\",\n- deps = [\n- \"//runsc/test/testutil\",\n- \"@com_github_google_subcommands//:go_default_library\",\n- ],\n-)\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/container_test.go", "new_path": "runsc/container/container_test.go", "diff": "@@ -569,7 +569,7 @@ func TestKillPid(t *testing.T) {\nfor _, conf := range configs(overlay) {\nt.Logf(\"Running test with conf: %+v\", conf)\n- app, err := testutil.FindFile(\"runsc/container/test_app\")\n+ app, err := testutil.FindFile(\"runsc/container/test_app/test_app\")\nif err != nil {\nt.Fatal(\"error finding test_app:\", err)\n}\n@@ -792,7 +792,7 @@ func TestUnixDomainSockets(t *testing.T) {\n}\ndefer outputFile.Close()\n- app, err := testutil.FindFile(\"runsc/container/test_app\")\n+ app, err := testutil.FindFile(\"runsc/container/test_app/test_app\")\nif err != nil {\nt.Fatal(\"error finding test_app:\", err)\n}\n@@ -1471,7 +1471,7 @@ func TestRootNotMount(t *testing.T) {\nt.Skip(\"race makes test_app not statically linked\")\n}\n- appSym, err := testutil.FindFile(\"runsc/container/test_app\")\n+ appSym, err := testutil.FindFile(\"runsc/container/test_app/test_app\")\nif err != nil {\nt.Fatal(\"error finding test_app:\", err)\n}\n@@ -1497,7 +1497,7 @@ func TestRootNotMount(t *testing.T) {\n}\nfunc TestUserLog(t *testing.T) {\n- app, err := testutil.FindFile(\"runsc/container/test_app\")\n+ app, err := testutil.FindFile(\"runsc/container/test_app/test_app\")\nif err != nil {\nt.Fatal(\"error finding test_app:\", err)\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/multi_container_test.go", "new_path": "runsc/container/multi_container_test.go", "diff": "@@ -403,7 +403,7 @@ func TestMultiContainerSignal(t *testing.T) {\n// TestMultiContainerDestroy checks that container are properly cleaned-up when\n// they are destroyed.\nfunc TestMultiContainerDestroy(t *testing.T) {\n- app, err := testutil.FindFile(\"runsc/container/test_app\")\n+ app, err := testutil.FindFile(\"runsc/container/test_app/test_app\")\nif err != nil {\nt.Fatal(\"error finding test_app:\", err)\n}\n@@ -533,7 +533,7 @@ func TestMultiContainerKillAll(t *testing.T) {\n{killContainer: true},\n{killContainer: false},\n} {\n- app, err := testutil.FindFile(\"runsc/container/test_app\")\n+ app, err := testutil.FindFile(\"runsc/container/test_app/test_app\")\nif err != nil {\nt.Fatal(\"error finding test_app:\", err)\n}\n@@ -734,7 +734,7 @@ func TestMultiContainerDestroyStarting(t *testing.T) {\n// TestMultiContainerGoferStop tests that IO operations continue to work after\n// containers have been stopped and gofers killed.\nfunc TestMultiContainerGoferStop(t *testing.T) {\n- app, err := testutil.FindFile(\"runsc/container/test_app\")\n+ app, err := testutil.FindFile(\"runsc/container/test_app/test_app\")\nif err != nil {\nt.Fatal(\"error finding test_app:\", err)\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "runsc/container/test_app/BUILD", "diff": "+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_binary\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_binary(\n+ name = \"test_app\",\n+ testonly = 1,\n+ srcs = [\"test_app.go\"],\n+ pure = \"on\",\n+ visibility = [\"//runsc/container:__pkg__\"],\n+ deps = [\n+ \"//runsc/test/testutil\",\n+ \"@com_github_google_subcommands//:go_default_library\",\n+ ],\n+)\n" }, { "change_type": "RENAME", "old_path": "runsc/container/test_app.go", "new_path": "runsc/container/test_app/test_app.go", "diff": "" } ]
Go
Apache License 2.0
google/gvisor
runsc: move test_app in a separate directory Opensource tools (e. g. https://github.com/fatih/vim-go) can't hanlde more than one golang package in one directory. PiperOrigin-RevId: 246435962 Change-Id: I67487915e3838762424b2d168efc54ae34fb801f
259,853
02.05.2019 19:33:19
25,200
5f8225c009fcf297139c54c7b329da4aff679ece
runsc: don't create an empty network namespace if NetworkHost is set With this change, we will be able to run runsc do in a host network namespace.
[ { "change_type": "MODIFY", "old_path": "runsc/sandbox/sandbox.go", "new_path": "runsc/sandbox/sandbox.go", "diff": "@@ -472,6 +472,8 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\nif ns, ok := specutils.GetNS(specs.NetworkNamespace, spec); ok && conf.Network != boot.NetworkNone {\nlog.Infof(\"Sandbox will be started in the container's network namespace: %+v\", ns)\nnss = append(nss, ns)\n+ } else if conf.Network == boot.NetworkHost {\n+ log.Infof(\"Sandbox will be started in the host network namespace\")\n} else {\nlog.Infof(\"Sandbox will be started in new network namespace\")\nnss = append(nss, specs.LinuxNamespace{Type: specs.NetworkNamespace})\n" } ]
Go
Apache License 2.0
google/gvisor
runsc: don't create an empty network namespace if NetworkHost is set With this change, we will be able to run runsc do in a host network namespace. PiperOrigin-RevId: 246436660 Change-Id: I8ea18b1053c88fe2feed74239b915fe7a151ce34
260,006
03.05.2019 07:01:38
25,200
2d8e90b31102fa784f1657153db99d6fe52b4e9d
Proper cleanup of sockets that used REUSEPORT Fixed a small logic error that broke proper accounting of MultiPortEndpoints.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/transport_demuxer.go", "new_path": "pkg/tcpip/stack/transport_demuxer.go", "diff": "@@ -171,7 +171,7 @@ func (ep *multiPortEndpoint) singleRegisterEndpoint(t TransportEndpoint) {\n// A new endpoint is added into endpointsArr and its index there is\n// saved in endpointsMap. This will allows to remove endpoint from\n// the array fast.\n- ep.endpointsMap[ep] = len(ep.endpointsArr)\n+ ep.endpointsMap[t] = len(ep.endpointsArr)\nep.endpointsArr = append(ep.endpointsArr, t)\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_inet_loopback.cc", "new_path": "test/syscalls/linux/socket_inet_loopback.cc", "diff": "@@ -1040,6 +1040,40 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, PortReuseTwoSockets) {\n}\n}\n+// Check that when a socket was bound to an address with REUSEPORT and then\n+// closed, we can bind a different socket to the same address without needing\n+// REUSEPORT.\n+TEST_P(SocketMultiProtocolInetLoopbackTest, NoReusePortFollowingReusePort) {\n+ auto const& param = GetParam();\n+ TestAddress const& test_addr = V4Loopback();\n+ sockaddr_storage addr = test_addr.addr;\n+\n+ auto s = ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+ int fd = s.get();\n+ socklen_t addrlen = test_addr.addr_len;\n+ int portreuse = 1;\n+ ASSERT_THAT(\n+ setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &portreuse, sizeof(portreuse)),\n+ SyscallSucceeds());\n+ ASSERT_THAT(bind(fd, reinterpret_cast<sockaddr*>(&addr), addrlen),\n+ SyscallSucceeds());\n+ ASSERT_THAT(getsockname(fd, reinterpret_cast<sockaddr*>(&addr), &addrlen),\n+ SyscallSucceeds());\n+ ASSERT_EQ(addrlen, test_addr.addr_len);\n+\n+ s.reset();\n+\n+ // Open a new socket and bind to the same address, but w/o REUSEPORT.\n+ s = ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr.family(), param.type, 0));\n+ fd = s.get();\n+ portreuse = 0;\n+ ASSERT_THAT(\n+ setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &portreuse, sizeof(portreuse)),\n+ SyscallSucceeds());\n+ ASSERT_THAT(bind(fd, reinterpret_cast<sockaddr*>(&addr), addrlen),\n+ SyscallSucceeds());\n+}\n+\nINSTANTIATE_TEST_SUITE_P(\nAllFamlies, SocketMultiProtocolInetLoopbackTest,\n::testing::Values(ProtocolTestParam{\"TCP\", SOCK_STREAM},\n" } ]
Go
Apache License 2.0
google/gvisor
Proper cleanup of sockets that used REUSEPORT Fixed a small logic error that broke proper accounting of MultiPortEndpoints. PiperOrigin-RevId: 246502126 Change-Id: I1a7d6ea134f811612e545676212899a3707bc2c2
259,992
03.05.2019 09:41:08
25,200
6b9ab65163528239c9fbf10053427513513e6ab0
Skip flaky ClockGettime.CputimeId take 2 The test also times out when GCE machine has 2 CPUs. I cannot repro it locally with a 2 CPU cgroup though. Let's skip the test when there are 2 CPUs to stop the flakiness and retest it once the fix is available.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/clock_gettime.cc", "new_path": "test/syscalls/linux/clock_gettime.cc", "diff": "@@ -56,8 +56,9 @@ void spin_ns(int64_t ns) {\n// Test that CLOCK_PROCESS_CPUTIME_ID is a superset of CLOCK_THREAD_CPUTIME_ID.\nTEST(ClockGettime, CputimeId) {\n// TODO(b/128871825,golang.org/issue/10958): Test times out when there is a\n- // single core because one goroutine starves the others.\n- SKIP_IF(std::thread::hardware_concurrency() == 1);\n+ // small number of core because one goroutine starves the others.\n+ printf(\"CPUS: %d\\n\", std::thread::hardware_concurrency());\n+ SKIP_IF(std::thread::hardware_concurrency() <= 2);\nconstexpr int kNumThreads = 13; // arbitrary\n" } ]
Go
Apache License 2.0
google/gvisor
Skip flaky ClockGettime.CputimeId take 2 The test also times out when GCE machine has 2 CPUs. I cannot repro it locally with a 2 CPU cgroup though. Let's skip the test when there are 2 CPUs to stop the flakiness and retest it once the fix is available. PiperOrigin-RevId: 246523363 Change-Id: I9d9d922a5be3aa7bc91dff5a1807ca99f3f4a4f9
259,992
03.05.2019 09:53:26
25,200
95614bbefa2f4657c77b2040630088fdec7f5dd1
Increase timeout to wait for port to become available TestHttpd fails sporadically waiting for the port on slow machines.
[ { "change_type": "MODIFY", "old_path": "runsc/test/image/image_test.go", "new_path": "runsc/test/image/image_test.go", "diff": "@@ -103,7 +103,7 @@ func TestHttpd(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := testutil.WaitForHTTP(port, 5*time.Second); err != nil {\n+ if err := testutil.WaitForHTTP(port, 10*time.Second); err != nil {\nt.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n@@ -137,7 +137,7 @@ func TestNginx(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := testutil.WaitForHTTP(port, 5*time.Second); err != nil {\n+ if err := testutil.WaitForHTTP(port, 10*time.Second); err != nil {\nt.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/integration/integration_test.go", "new_path": "runsc/test/integration/integration_test.go", "diff": "@@ -68,7 +68,7 @@ func TestLifeCycle(t *testing.T) {\nif err != nil {\nt.Fatal(\"docker.FindPort(80) failed: \", err)\n}\n- if err := testutil.WaitForHTTP(port, 5*time.Second); err != nil {\n+ if err := testutil.WaitForHTTP(port, 10*time.Second); err != nil {\nt.Fatal(\"WaitForHTTP() timeout:\", err)\n}\nclient := http.Client{Timeout: time.Duration(2 * time.Second)}\n" } ]
Go
Apache License 2.0
google/gvisor
Increase timeout to wait for port to become available TestHttpd fails sporadically waiting for the port on slow machines. PiperOrigin-RevId: 246525277 Change-Id: Ie0ea71e3c4664d24f580eabd8f7461e47079f734
259,853
03.05.2019 11:20:12
25,200
3f3e3a63033f87dd42076423661b62c04d10c15f
gvisor/kokoro: save runsc logs
[ { "change_type": "MODIFY", "old_path": "kokoro/continuous.cfg", "new_path": "kokoro/continuous.cfg", "diff": "@@ -7,5 +7,7 @@ action {\ndefine_artifacts {\nregex: \"**/sponge_log.xml\"\nregex: \"**/sponge_log.log\"\n+ regex: \"**/outputs.zip\"\n+ regex: \"**/runsc-logs.tar.gz\"\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "kokoro/presubmit.cfg", "new_path": "kokoro/presubmit.cfg", "diff": "@@ -8,5 +8,6 @@ action {\nregex: \"**/sponge_log.xml\"\nregex: \"**/sponge_log.log\"\nregex: \"**/outputs.zip\"\n+ regex: \"**/runsc-logs.tar.gz\"\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "kokoro/run_tests.sh", "new_path": "kokoro/run_tests.sh", "diff": "@@ -183,6 +183,9 @@ upload_test_artifacts() {\nfind -L \"bazel-testlogs\" -name \"test.xml\" -o -name \"test.log\" -o -name \"outputs.zip\" |\ntar --create --files-from - --transform 's/test\\./sponge_log./' |\ntar --extract --directory ${KOKORO_ARTIFACTS_DIR}\n+ if [[ -d \"/tmp/${RUNTIME}/logs\" ]]; then\n+ tar --create --gzip \"--file=${KOKORO_ARTIFACTS_DIR}/runsc-logs.tar.gz\" -C /tmp/ ${RUNTIME}/logs\n+ fi\n}\n# Finish runs at exit, even in the event of an error, and uploads all test\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/testutil/docker.go", "new_path": "runsc/test/testutil/docker.go", "diff": "@@ -120,7 +120,7 @@ func getLocalPath(file string) string {\n// do executes docker command.\nfunc do(args ...string) (string, error) {\n- fmt.Printf(\"Running: docker %s\\n\", args)\n+ log.Printf(\"Running: docker %s\\n\", args)\ncmd := exec.Command(\"docker\", args...)\nout, err := cmd.CombinedOutput()\nif err != nil {\n@@ -131,7 +131,7 @@ func do(args ...string) (string, error) {\n// doWithPty executes docker command with stdio attached to a pty.\nfunc doWithPty(args ...string) (*exec.Cmd, *os.File, error) {\n- fmt.Printf(\"Running with pty: docker %s\\n\", args)\n+ log.Printf(\"Running with pty: docker %s\\n\", args)\ncmd := exec.Command(\"docker\", args...)\nptmx, err := pty.Start(cmd)\nif err != nil {\n@@ -160,11 +160,23 @@ func MakeDocker(namePrefix string) Docker {\nreturn Docker{Name: RandomName(namePrefix), Runtime: getRuntime()}\n}\n+// logDockerID logs a container id, which is needed to find container runsc logs.\n+func (d *Docker) logDockerID() {\n+ id, err := d.ID()\n+ if err != nil {\n+ log.Printf(\"%v\\n\", err)\n+ }\n+ log.Printf(\"Name: %s ID: %v\\n\", d.Name, id)\n+}\n+\n// Create calls 'docker create' with the arguments provided.\nfunc (d *Docker) Create(args ...string) error {\na := []string{\"create\", \"--runtime\", d.Runtime, \"--name\", d.Name}\na = append(a, args...)\n_, err := do(a...)\n+ if err == nil {\n+ d.logDockerID()\n+ }\nreturn err\n}\n@@ -190,6 +202,9 @@ func (d *Docker) Run(args ...string) error {\na := []string{\"run\", \"--runtime\", d.Runtime, \"--name\", d.Name, \"-d\"}\na = append(a, args...)\n_, err := do(a...)\n+ if err == nil {\n+ d.logDockerID()\n+ }\nreturn err\n}\n@@ -206,6 +221,9 @@ func (d *Docker) RunFg(args ...string) (string, error) {\na := []string{\"run\", \"--runtime\", d.Runtime, \"--name\", d.Name}\na = append(a, args...)\nout, err := do(a...)\n+ if err == nil {\n+ d.logDockerID()\n+ }\nreturn string(out), err\n}\n@@ -255,6 +273,7 @@ func (d *Docker) Remove() error {\n// CleanUp kills and deletes the container (best effort).\nfunc (d *Docker) CleanUp() {\n+ d.logDockerID()\nif _, err := do(\"kill\", d.Name); err != nil {\nlog.Printf(\"error killing container %q: %v\", d.Name, err)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
gvisor/kokoro: save runsc logs PiperOrigin-RevId: 246542315 Change-Id: Ia9ba2bc104e0af3277d3b6102122c13d320ea802
259,891
03.05.2019 13:04:46
25,200
264d012d81d210c6d949554667c6fbf8e330587a
Add netfilter ABI for iptables support.
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/BUILD", "new_path": "pkg/abi/linux/BUILD", "diff": "@@ -32,6 +32,7 @@ go_library(\n\"linux.go\",\n\"mm.go\",\n\"netdevice.go\",\n+ \"netfilter.go\",\n\"netlink.go\",\n\"netlink_route.go\",\n\"poll.go\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/abi/linux/netfilter.go", "diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+// This file contains structures required to support netfilter, specifically\n+// the iptables tool.\n+\n+// Hooks into the network stack. These correspond to values in\n+// include/uapi/linux/netfilter.h.\n+const (\n+ NF_INET_PRE_ROUTING = 0\n+ NF_INET_LOCAL_IN = 1\n+ NF_INET_FORWARD = 2\n+ NF_INET_LOCAL_OUT = 3\n+ NF_INET_POST_ROUTING = 4\n+ NF_INET_NUMHOOKS = 5\n+)\n+\n+// Verdicts that can be returned by targets. These correspond to values in\n+// include/uapi/linux/netfilter.h\n+const (\n+ NF_DROP = 0\n+ NF_ACCEPT = 1\n+ NF_STOLEN = 2\n+ NF_QUEUE = 3\n+ NF_REPEAT = 4\n+ NF_STOP = 5\n+ NF_MAX_VERDICT = NF_STOP\n+ // NF_RETURN is defined in include/uapi/linux/netfilter/x_tables.h.\n+ NF_RETURN = -NF_REPEAT - 1\n+)\n+\n+// Socket options. These correspond to values in\n+// include/uapi/linux/netfilter_ipv4/ip_tables.h.\n+const (\n+ IPT_BASE_CTL = 64\n+ IPT_SO_SET_REPLACE = IPT_BASE_CTL\n+ IPT_SO_SET_ADD_COUNTERS = IPT_BASE_CTL + 1\n+ IPT_SO_SET_MAX = IPT_SO_SET_ADD_COUNTERS\n+\n+ IPT_SO_GET_INFO = IPT_BASE_CTL\n+ IPT_SO_GET_ENTRIES = IPT_BASE_CTL + 1\n+ IPT_SO_GET_REVISION_MATCH = IPT_BASE_CTL + 2\n+ IPT_SO_GET_REVISION_TARGET = IPT_BASE_CTL + 3\n+ IPT_SO_GET_MAX = IPT_SO_GET_REVISION_TARGET\n+)\n+\n+// Name lengths. These correspond to values in\n+// include/uapi/linux/netfilter/x_tables.h.\n+const (\n+ XT_FUNCTION_MAXNAMELEN = 30\n+ XT_EXTENSION_MAXNAMELEN = 29\n+ XT_TABLE_MAXNAMELEN = 32\n+)\n+\n+// IPTEntry is an iptable rule. It corresponds to struct ipt_entry in\n+// include/uapi/linux/netfilter_ipv4/ip_tables.h.\n+type IPTEntry struct {\n+ // IP is used to filter packets based on the IP header.\n+ IP IPTIP\n+\n+ // NFCache relates to kernel-internal caching and isn't used by\n+ // userspace.\n+ NFCache uint32\n+\n+ // TargetOffset is the byte offset from the beginning of this IPTEntry\n+ // to the start of the entry's target.\n+ TargetOffset uint16\n+\n+ // NextOffset is the byte offset from the beginning of this IPTEntry to\n+ // the start of the next entry. It is thus also the size of the entry.\n+ NextOffset uint16\n+\n+ // Comeback is a return pointer. It is not used by userspace.\n+ Comeback uint32\n+\n+ // Counters holds the packet and byte counts for this rule.\n+ Counters XTCounters\n+\n+ // Elems holds the data for all this rule's matches followed by the\n+ // target. It is variable length -- users have to iterate over any\n+ // matches and use TargetOffset and NextOffset to make sense of the\n+ // data.\n+ //\n+ // Elems is omitted here because it would cause IPTEntry to be an extra\n+ // byte larger (see http://www.catb.org/esr/structure-packing/).\n+ //\n+ // Elems [0]byte\n+}\n+\n+// IPTIP contains information for matching a packet's IP header.\n+// It corresponds to struct ipt_ip in\n+// include/uapi/linux/netfilter_ipv4/ip_tables.h.\n+type IPTIP struct {\n+ // Src is the source IP address.\n+ Src InetAddr\n+\n+ // Dst is the destination IP address.\n+ Dst InetAddr\n+\n+ // SrcMask is the source IP mask.\n+ SrcMask InetAddr\n+\n+ // DstMask is the destination IP mask.\n+ DstMask InetAddr\n+\n+ // InputInterface is the input network interface.\n+ InputInterface [IFNAMSIZ]byte\n+\n+ // OutputInterface is the output network interface.\n+ OutputInterface [IFNAMSIZ]byte\n+\n+ // InputInterfaceMask is the intput interface mask.\n+ InputInterfaceMast [IFNAMSIZ]byte\n+\n+ // OuputInterfaceMask is the output interface mask.\n+ OuputInterfaceMask [IFNAMSIZ]byte\n+\n+ // Protocol is the transport protocol.\n+ Protocol uint16\n+\n+ // Flags define matching behavior for the IP header.\n+ Flags uint8\n+\n+ // InverseFlags invert the meaning of fields in struct IPTIP.\n+ InverseFlags uint8\n+}\n+\n+// XTCounters holds packet and byte counts for a rule. It corresponds to struct\n+// xt_counters in include/uapi/linux/netfilter/x_tables.h.\n+type XTCounters struct {\n+ // Pcnt is the packet count.\n+ Pcnt uint64\n+\n+ // Bcnt is the byte count.\n+ Bcnt uint64\n+}\n+\n+// XTEntryMatch holds a match for a rule. For example, a user using the\n+// addrtype iptables match extension would put the data for that match into an\n+// XTEntryMatch. iptables-extensions(8) has a list of possible matches.\n+//\n+// XTEntryMatch corresponds to struct xt_entry_match in\n+// include/uapi/linux/netfilter/x_tables.h. That struct contains a union\n+// exposing different data to the user and kernel, but this struct holds only\n+// the user data.\n+type XTEntryMatch struct {\n+ MatchSize uint16\n+ Name [XT_EXTENSION_MAXNAMELEN]byte\n+ Revision uint8\n+ // Data is omitted here because it would cause XTEntryTarget to be an\n+ // extra byte larger (see http://www.catb.org/esr/structure-packing/).\n+ // Data [0]byte\n+}\n+\n+// XTEntryTarget holds a target for a rule. For example, it can specify that\n+// packets matching the rule should DROP, ACCEPT, or use an extension target.\n+// iptables-extension(8) has a list of possible targets.\n+//\n+// XTEntryTarget corresponds to struct xt_entry_target in\n+// include/uapi/linux/netfilter/x_tables.h. That struct contains a union\n+// exposing different data to the user and kernel, but this struct holds only\n+// the user data.\n+type XTEntryTarget struct {\n+ MatchSize uint16\n+ Name [XT_EXTENSION_MAXNAMELEN]byte\n+ Revision uint8\n+ // Data is omitted here because it would cause XTEntryTarget to be an\n+ // extra byte larger (see http://www.catb.org/esr/structure-packing/).\n+ // Data [0]byte\n+}\n+\n+// XTStandardTarget is a builtin target, one of ACCEPT, DROP, JUMP, QUEUE, or\n+// RETURN. It corresponds to struct xt_standard_target in\n+// include/uapi/linux/netfilter/x_tables.h.\n+type XTStandardTarget struct {\n+ Target XTEntryTarget\n+ Verdict int32\n+}\n+\n+// XTErrorTarget triggers an error when reached. It is also used to mark the\n+// beginning of user-defined chains by putting the name of the chain in\n+// ErrorName. It corresponds to struct xt_error_target in\n+// include/uapi/linux/netfilter/x_tables.h.\n+type XTErrorTarget struct {\n+ Target XTEntryTarget\n+ ErrorName [XT_FUNCTION_MAXNAMELEN]byte\n+}\n+\n+// IPTGetinfo is the argument for the IPT_SO_GET_INFO sockopt. It corresponds\n+// to struct ipt_getinfo in include/uapi/linux/netfilter_ipv4/ip_tables.h.\n+type IPTGetinfo struct {\n+ Name [XT_TABLE_MAXNAMELEN]byte\n+ ValidHooks uint32\n+ HookEntry [NF_INET_NUMHOOKS]uint32\n+ Underflow [NF_INET_NUMHOOKS]uint32\n+ NumEntries uint32\n+ Size uint32\n+}\n+\n+// IPTGetEntries is the argument for the IPT_SO_GET_ENTRIES sockopt. It\n+// corresponds to struct ipt_get_entries in\n+// include/uapi/linux/netfilter_ipv4/ip_tables.h.\n+type IPTGetEntries struct {\n+ Name [XT_TABLE_MAXNAMELEN]byte\n+ Size uint32\n+ // Entrytable is omitted here because it would cause IPTGetEntries to\n+ // be an extra byte longer (see\n+ // http://www.catb.org/esr/structure-packing/).\n+ // Entrytable [0]IPTEntry\n+}\n+\n+// IPTReplace is the argument for the IPT_SO_SET_REPLACE sockopt. It\n+// corresponds to struct ipt_replace in\n+// include/uapi/linux/netfilter_ipv4/ip_tables.h.\n+type IPTReplace struct {\n+ Name [XT_TABLE_MAXNAMELEN]byte\n+ ValidHooks uint32\n+ NumEntries uint32\n+ Size uint32\n+ HookEntry [NF_INET_NUMHOOKS]uint32\n+ Underflow [NF_INET_NUMHOOKS]uint32\n+ NumCounters uint32\n+ Counters *XTCounters\n+ // Entries is omitted here because it would cause IPTReplace to be an\n+ // extra byte longer (see http://www.catb.org/esr/structure-packing/).\n+ // Entries [0]IPTEntry\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Add netfilter ABI for iptables support. Change-Id: Ifbd2abf63ea8062a89b83e948d3e9735480d8216 PiperOrigin-RevId: 246559904
259,853
03.05.2019 14:00:31
25,200
24d8656585e6072ff7d5a00a7eb4bd25cba42dc4
gofer: don't leak file descriptors Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/gofer/cache_policy.go", "new_path": "pkg/sentry/fs/gofer/cache_policy.go", "diff": "@@ -139,11 +139,12 @@ func (cp cachePolicy) revalidate(ctx context.Context, name string, parent, child\n// TODO(b/112031682): If we have a directory FD in the parent\n// inodeOperations, then we can use fstatat(2) to get the inode\n// attributes instead of making this RPC.\n- qids, _, mask, attr, err := parentIops.fileState.file.walkGetAttr(ctx, []string{name})\n+ qids, f, mask, attr, err := parentIops.fileState.file.walkGetAttr(ctx, []string{name})\nif err != nil {\n// Can't look up the name. Trigger reload.\nreturn true\n}\n+ f.close(ctx)\n// If the Path has changed, then we are not looking at the file file.\n// We must reload.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/gofer/path.go", "new_path": "pkg/sentry/fs/gofer/path.go", "diff": "@@ -109,6 +109,7 @@ func (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string\nhostFile, err := newFile.create(ctx, name, openFlags, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID))\nif err != nil {\n// Could not create the file.\n+ newFile.close(ctx)\nreturn nil, err\n}\n@@ -120,11 +121,14 @@ func (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string\nqids, unopened, mask, p9attr, err := i.fileState.file.walkGetAttr(ctx, []string{name})\nif err != nil {\nnewFile.close(ctx)\n+ hostFile.Close()\nreturn nil, err\n}\nif len(qids) != 1 {\nlog.Warningf(\"WalkGetAttr(%s) succeeded, but returned %d QIDs (%v), wanted 1\", name, len(qids), qids)\nnewFile.close(ctx)\n+ hostFile.Close()\n+ unopened.close(ctx)\nreturn nil, syserror.EIO\n}\nqid := qids[0]\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer.go", "new_path": "runsc/fsgofer/fsgofer.go", "diff": "@@ -502,6 +502,9 @@ func (l *localFile) Walk(names []string) ([]p9.QID, p9.File, error) {\nlast := l\nfor _, name := range names {\nf, path, err := openAnyFileFromParent(last, name)\n+ if last != l {\n+ last.Close()\n+ }\nif err != nil {\nreturn nil, nil, extractErrno(err)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
gofer: don't leak file descriptors Fixes #219 PiperOrigin-RevId: 246568639 Change-Id: Ic7afd15dde922638d77f6429c508d1cbe2e4288a
259,854
03.05.2019 21:00:29
25,200
b4a9f186872d6687f34e609a39aa10eb33cce1d2
Update tcpip Clock description. The tcpip.Clock comment stated that times provided by it should not be used for netstack internal timekeeping. This comment was from before the interface supported monotonic times. The monotonic times that it provides are now be the preferred time source for netstack internal timekeeping.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/tcpip.go", "new_path": "pkg/tcpip/tcpip.go", "diff": "@@ -125,7 +125,7 @@ func (e ErrSaveRejection) Error() string {\n// A Clock provides the current time.\n//\n// Times returned by a Clock should always be used for application-visible\n-// time, but never for netstack internal timekeeping.\n+// time. Only monotonic times should be used for netstack internal timekeeping.\ntype Clock interface {\n// NowNanoseconds returns the current real time as a number of\n// nanoseconds since the Unix epoch.\n" } ]
Go
Apache License 2.0
google/gvisor
Update tcpip Clock description. The tcpip.Clock comment stated that times provided by it should not be used for netstack internal timekeeping. This comment was from before the interface supported monotonic times. The monotonic times that it provides are now be the preferred time source for netstack internal timekeeping. PiperOrigin-RevId: 246618772 Change-Id: I853b720e3d719b03fabd6156d2431da05d354bda
259,891
05.05.2019 16:06:11
25,200
ff8ed5e6a5a391c5465230121af09afa5d1906e9
Fix raw socket behavior and tests. Some behavior was broken due to the difficulty of running automated raw socket tests.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/icmp.go", "new_path": "pkg/tcpip/network/ipv4/icmp.go", "diff": "@@ -72,7 +72,24 @@ func (e *endpoint) handleICMP(r *stack.Route, netHeader buffer.View, vv buffer.V\nreceived.Invalid.Increment()\nreturn\n}\n+\n+ // Only send a reply if the checksum is valid.\n+ wantChecksum := h.Checksum()\n+ // Reset the checksum field to 0 to can calculate the proper\n+ // checksum. We'll have to reset this before we hand the packet\n+ // off.\n+ h.SetChecksum(0)\n+ gotChecksum := ^header.ChecksumVV(vv, 0 /* initial */)\n+ if gotChecksum != wantChecksum {\n+ // It's possible that a raw socket expects to receive this.\n+ h.SetChecksum(wantChecksum)\n+ e.dispatcher.DeliverTransportPacket(r, header.ICMPv4ProtocolNumber, netHeader, vv)\n+ received.Invalid.Increment()\n+ return\n+ }\n+\n// It's possible that a raw socket expects to receive this.\n+ h.SetChecksum(wantChecksum)\ne.dispatcher.DeliverTransportPacket(r, header.ICMPv4ProtocolNumber, netHeader, vv)\nvv := vv.Clone(nil)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/icmp/endpoint.go", "new_path": "pkg/tcpip/transport/icmp/endpoint.go", "diff": "@@ -661,6 +661,22 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {\n// HandlePacket is called by the stack when new packets arrive to this transport\n// endpoint.\nfunc (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, vv buffer.VectorisedView) {\n+ // Only accept echo replies.\n+ switch e.netProto {\n+ case header.IPv4ProtocolNumber:\n+ h := header.ICMPv4(vv.First())\n+ if h.Type() != header.ICMPv4EchoReply {\n+ e.stack.Stats().DroppedPackets.Increment()\n+ return\n+ }\n+ case header.IPv6ProtocolNumber:\n+ h := header.ICMPv6(vv.First())\n+ if h.Type() != header.ICMPv6EchoReply {\n+ e.stack.Stats().DroppedPackets.Increment()\n+ return\n+ }\n+ }\n+\ne.rcvMu.Lock()\n// Drop the packet if our buffer is currently full.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/raw/endpoint.go", "new_path": "pkg/tcpip/transport/raw/endpoint.go", "diff": "@@ -80,8 +80,6 @@ type endpoint struct {\n// The following fields are protected by mu.\nmu sync.RWMutex `state:\"nosave\"`\nsndBufSize int\n- // shutdownFlags represent the current shutdown state of the endpoint.\n- shutdownFlags tcpip.ShutdownFlags\nclosed bool\nconnected bool\nbound bool\n@@ -192,12 +190,6 @@ func (ep *endpoint) Write(payload tcpip.Payload, opts tcpip.WriteOptions) (uintp\nreturn 0, nil, tcpip.ErrInvalidEndpointState\n}\n- // Check whether we've shutdown writing.\n- if ep.shutdownFlags&tcpip.ShutdownWrite != 0 {\n- ep.mu.RUnlock()\n- return 0, nil, tcpip.ErrClosedForSend\n- }\n-\n// Did the user caller provide a destination? If not, use the connected\n// destination.\nif opts.To == nil {\n@@ -205,7 +197,7 @@ func (ep *endpoint) Write(payload tcpip.Payload, opts tcpip.WriteOptions) (uintp\n// connected to another address.\nif !ep.connected {\nep.mu.RUnlock()\n- return 0, nil, tcpip.ErrNotConnected\n+ return 0, nil, tcpip.ErrDestinationRequired\n}\nif ep.route.IsResolutionRequired() {\n@@ -355,7 +347,7 @@ func (ep *endpoint) Connect(addr tcpip.FullAddress) *tcpip.Error {\nreturn nil\n}\n-// Shutdown implements tcpip.Endpoint.Shutdown.\n+// Shutdown implements tcpip.Endpoint.Shutdown. It's a noop for raw sockets.\nfunc (ep *endpoint) Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error {\nep.mu.Lock()\ndefer ep.mu.Unlock()\n@@ -363,20 +355,6 @@ func (ep *endpoint) Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error {\nif !ep.connected {\nreturn tcpip.ErrNotConnected\n}\n-\n- ep.shutdownFlags |= flags\n-\n- if flags&tcpip.ShutdownRead != 0 {\n- ep.rcvMu.Lock()\n- wasClosed := ep.rcvClosed\n- ep.rcvClosed = true\n- ep.rcvMu.Unlock()\n-\n- if !wasClosed {\n- ep.waiterQueue.Notify(waiter.EventIn)\n- }\n- }\n-\nreturn nil\n}\n@@ -427,19 +405,10 @@ func (ep *endpoint) GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) {\n// GetRemoteAddress implements tcpip.Endpoint.GetRemoteAddress.\nfunc (ep *endpoint) GetRemoteAddress() (tcpip.FullAddress, *tcpip.Error) {\n- ep.mu.RLock()\n- defer ep.mu.RUnlock()\n-\n- if !ep.connected {\n+ // Even a connected socket doesn't return a remote address.\nreturn tcpip.FullAddress{}, tcpip.ErrNotConnected\n}\n- return tcpip.FullAddress{\n- NIC: ep.registeredNIC,\n- Addr: ep.route.RemoteAddress,\n- }, nil\n-}\n-\n// Readiness implements tcpip.Endpoint.Readiness.\nfunc (ep *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {\n// The endpoint is always writable.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/raw_socket_ipv4.cc", "new_path": "test/syscalls/linux/raw_socket_ipv4.cc", "diff": "#include \"test/util/file_descriptor.h\"\n#include \"test/util/test_util.h\"\n+// Note: in order to run these tests, /proc/sys/net/ipv4/ping_group_range will\n+// need to be configured to let the superuser create ping sockets (see icmp(7)).\n+\nnamespace gvisor {\nnamespace testing {\n@@ -58,6 +61,9 @@ class RawSocketTest : public ::testing::Test {\nvoid ReceiveICMPFrom(char* recv_buf, size_t recv_buf_len,\nsize_t expected_size, struct sockaddr_in* src, int sock);\n+ // Compute the internet checksum of the ICMP header (assuming no payload).\n+ unsigned short Checksum(struct icmphdr* icmp);\n+\n// The socket used for both reading and writing.\nint s_;\n@@ -95,8 +101,9 @@ TEST_F(RawSocketTest, MultipleCreation) {\nASSERT_THAT(close(s2), SyscallSucceeds());\n}\n-// Send and receive an ICMP packet.\n-TEST_F(RawSocketTest, SendAndReceive) {\n+// We'll only read an echo in this case, as the kernel won't respond to the\n+// malformed ICMP checksum.\n+TEST_F(RawSocketTest, SendAndReceiveBadChecksum) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n// Prepare and send an ICMP packet. Use arbitrary junk for checksum, sequence,\n@@ -105,9 +112,39 @@ TEST_F(RawSocketTest, SendAndReceive) {\nstruct icmphdr icmp;\nicmp.type = ICMP_ECHO;\nicmp.code = 0;\n- icmp.checksum = 2011;\n+ icmp.checksum = 0;\n+ icmp.un.echo.sequence = 2012;\n+ icmp.un.echo.id = 2014;\n+ ASSERT_NO_FATAL_FAILURE(SendEmptyICMP(icmp));\n+\n+ // Veryify that we get the echo, then that there's nothing else to read.\n+ char recv_buf[sizeof(icmp) + sizeof(struct iphdr)];\n+ struct sockaddr_in src;\n+ ASSERT_NO_FATAL_FAILURE(\n+ ReceiveICMP(recv_buf, sizeof(recv_buf), sizeof(struct icmphdr), &src));\n+ EXPECT_EQ(memcmp(&src, &addr_, sizeof(src)), 0);\n+ // The packet should be identical to what we sent.\n+ EXPECT_EQ(memcmp(recv_buf + sizeof(struct iphdr), &icmp, sizeof(icmp)), 0);\n+\n+ // And there should be nothing left to read.\n+ EXPECT_THAT(RetryEINTR(recv)(s_, recv_buf, sizeof(recv_buf), MSG_DONTWAIT),\n+ SyscallFailsWithErrno(EAGAIN));\n+}\n+\n+// Send and receive an ICMP packet.\n+TEST_F(RawSocketTest, SendAndReceive) {\n+ SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n+\n+ // Prepare and send an ICMP packet. Use arbitrary junk for sequence and ID.\n+ // None of that should matter for raw sockets - the kernel should still give\n+ // us the packet.\n+ struct icmphdr icmp;\n+ icmp.type = ICMP_ECHO;\n+ icmp.code = 0;\n+ icmp.checksum = 0;\nicmp.un.echo.sequence = 2012;\nicmp.un.echo.id = 2014;\n+ icmp.checksum = Checksum(&icmp);\nASSERT_NO_FATAL_FAILURE(SendEmptyICMP(icmp));\nASSERT_NO_FATAL_FAILURE(ExpectICMPSuccess(icmp));\n@@ -121,29 +158,30 @@ TEST_F(RawSocketTest, MultipleSocketReceive) {\nFileDescriptor s2 =\nASSERT_NO_ERRNO_AND_VALUE(Socket(AF_INET, SOCK_RAW, IPPROTO_ICMP));\n- // Prepare and send an ICMP packet. Use arbitrary junk for checksum, sequence,\n- // and ID. None of that should matter for raw sockets - the kernel should\n- // still give us the packet.\n+ // Prepare and send an ICMP packet. Use arbitrary junk for sequence and ID.\n+ // None of that should matter for raw sockets - the kernel should still give\n+ // us the packet.\nstruct icmphdr icmp;\nicmp.type = ICMP_ECHO;\nicmp.code = 0;\n- icmp.checksum = 2014;\n+ icmp.checksum = 0;\nicmp.un.echo.sequence = 2016;\nicmp.un.echo.id = 2018;\n+ icmp.checksum = Checksum(&icmp);\nASSERT_NO_FATAL_FAILURE(SendEmptyICMP(icmp));\n// Both sockets will receive the echo request and reply in indeterminate\n// order, so we'll need to read 2 packets from each.\n// Receive on socket 1.\n- constexpr int kBufSize = 256;\n+ constexpr int kBufSize = sizeof(icmp) + sizeof(struct iphdr);\nstd::vector<char[kBufSize]> recv_buf1(2);\nstruct sockaddr_in src;\nfor (int i = 0; i < 2; i++) {\nASSERT_NO_FATAL_FAILURE(ReceiveICMP(recv_buf1[i],\nABSL_ARRAYSIZE(recv_buf1[i]),\nsizeof(struct icmphdr), &src));\n- EXPECT_EQ(memcmp(&src, &addr_, sizeof(sockaddr_in)), 0);\n+ EXPECT_EQ(memcmp(&src, &addr_, sizeof(src)), 0);\n}\n// Receive on socket 2.\n@@ -152,7 +190,7 @@ TEST_F(RawSocketTest, MultipleSocketReceive) {\nASSERT_NO_FATAL_FAILURE(\nReceiveICMPFrom(recv_buf2[i], ABSL_ARRAYSIZE(recv_buf2[i]),\nsizeof(struct icmphdr), &src, s2.get()));\n- EXPECT_EQ(memcmp(&src, &addr_, sizeof(sockaddr_in)), 0);\n+ EXPECT_EQ(memcmp(&src, &addr_, sizeof(src)), 0);\n}\n// Ensure both sockets receive identical packets.\n@@ -193,47 +231,34 @@ TEST_F(RawSocketTest, RawAndPingSockets) {\nsizeof(addr_)),\nSyscallSucceedsWithValue(sizeof(icmp)));\n- // Both sockets will receive the echo request and reply in indeterminate\n- // order, so we'll need to read 2 packets from each.\n-\n- // Receive on socket 1.\n- constexpr int kBufSize = 256;\n+ // Receive on socket 1, which receives the echo request and reply in\n+ // indeterminate order.\n+ constexpr int kBufSize = sizeof(icmp) + sizeof(struct iphdr);\nstd::vector<char[kBufSize]> recv_buf1(2);\nstruct sockaddr_in src;\nfor (int i = 0; i < 2; i++) {\nASSERT_NO_FATAL_FAILURE(\nReceiveICMP(recv_buf1[i], kBufSize, sizeof(struct icmphdr), &src));\n- EXPECT_EQ(memcmp(&src, &addr_, sizeof(sockaddr_in)), 0);\n+ EXPECT_EQ(memcmp(&src, &addr_, sizeof(src)), 0);\n}\n- // Receive on socket 2.\n- std::vector<char[kBufSize]> recv_buf2(2);\n- for (int i = 0; i < 2; i++) {\n- ASSERT_THAT(RetryEINTR(recv)(ping_sock.get(), recv_buf2[i], kBufSize, 0),\n+ // Receive on socket 2. Ping sockets only get the echo reply, not the initial\n+ // echo.\n+ char ping_recv_buf[kBufSize];\n+ ASSERT_THAT(RetryEINTR(recv)(ping_sock.get(), ping_recv_buf, kBufSize, 0),\nSyscallSucceedsWithValue(sizeof(struct icmphdr)));\n- }\n- // Ensure both sockets receive identical packets.\n- int types[] = {ICMP_ECHO, ICMP_ECHOREPLY};\n- for (int type : types) {\n- auto match_type_ping = [=](char buf[kBufSize]) {\n- struct icmphdr* icmp = reinterpret_cast<struct icmphdr*>(buf);\n- return icmp->type == type;\n- };\n+ // Ensure both sockets receive identical echo reply packets.\nauto match_type_raw = [=](char buf[kBufSize]) {\nstruct icmphdr* icmp =\nreinterpret_cast<struct icmphdr*>(buf + sizeof(struct iphdr));\n- return icmp->type == type;\n+ return icmp->type == ICMP_ECHOREPLY;\n};\n-\n- char *icmp1 =\n+ char* raw_reply =\n*std::find_if(recv_buf1.begin(), recv_buf1.end(), match_type_raw);\n- char *icmp2 =\n- *std::find_if(recv_buf2.begin(), recv_buf2.end(), match_type_ping);\n- ASSERT_NE(icmp1, *recv_buf1.end());\n- ASSERT_NE(icmp2, *recv_buf2.end());\n- EXPECT_EQ(memcmp(icmp1 + sizeof(struct iphdr), icmp2, sizeof(icmp)), 0);\n- }\n+ ASSERT_NE(raw_reply, *recv_buf1.end());\n+ EXPECT_EQ(\n+ memcmp(raw_reply + sizeof(struct iphdr), ping_recv_buf, sizeof(icmp)), 0);\n}\n// Test that shutting down an unconnected socket fails.\n@@ -244,8 +269,8 @@ TEST_F(RawSocketTest, FailShutdownWithoutConnect) {\nASSERT_THAT(shutdown(s_, SHUT_RD), SyscallFailsWithErrno(ENOTCONN));\n}\n-// Test that writing to a shutdown write socket fails.\n-TEST_F(RawSocketTest, FailWritingToShutdown) {\n+// Shutdown is a no-op for raw sockets (and datagram sockets in general).\n+TEST_F(RawSocketTest, ShutdownWriteNoop) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nASSERT_THAT(\n@@ -253,13 +278,13 @@ TEST_F(RawSocketTest, FailWritingToShutdown) {\nSyscallSucceeds());\nASSERT_THAT(shutdown(s_, SHUT_WR), SyscallSucceeds());\n- char c;\n- ASSERT_THAT(RetryEINTR(write)(s_, &c, sizeof(c)),\n- SyscallFailsWithErrno(EPIPE));\n+ constexpr char kBuf[] = \"noop\";\n+ ASSERT_THAT(RetryEINTR(write)(s_, kBuf, sizeof(kBuf)),\n+ SyscallSucceedsWithValue(sizeof(kBuf)));\n}\n-// Test that reading from a shutdown read socket gets nothing.\n-TEST_F(RawSocketTest, FailReadingFromShutdown) {\n+// Shutdown is a no-op for raw sockets (and datagram sockets in general).\n+TEST_F(RawSocketTest, ShutdownReadNoop) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nASSERT_THAT(\n@@ -267,8 +292,18 @@ TEST_F(RawSocketTest, FailReadingFromShutdown) {\nSyscallSucceeds());\nASSERT_THAT(shutdown(s_, SHUT_RD), SyscallSucceeds());\n- char c;\n- ASSERT_THAT(read(s_, &c, sizeof(c)), SyscallSucceedsWithValue(0));\n+ struct icmphdr icmp;\n+ icmp.type = ICMP_ECHO;\n+ icmp.code = 0;\n+ icmp.checksum = 0;\n+ icmp.un.echo.sequence = 2012;\n+ icmp.un.echo.id = 2014;\n+ icmp.checksum = Checksum(&icmp);\n+ ASSERT_NO_FATAL_FAILURE(SendEmptyICMP(icmp));\n+\n+ char c[sizeof(icmp) + sizeof(struct iphdr)];\n+ ASSERT_THAT(read(s_, &c, sizeof(c)),\n+ SyscallSucceedsWithValue(sizeof(icmp) + sizeof(struct iphdr)));\n}\n// Test that listen() fails.\n@@ -292,7 +327,7 @@ TEST_F(RawSocketTest, FailGetPeerNameBeforeConnect) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\nstruct sockaddr saddr;\n- socklen_t addrlen;\n+ socklen_t addrlen = sizeof(saddr);\nASSERT_THAT(getpeername(s_, &saddr, &addrlen),\nSyscallFailsWithErrno(ENOTCONN));\n}\n@@ -305,8 +340,9 @@ TEST_F(RawSocketTest, GetPeerName) {\nconnect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\nSyscallSucceeds());\nstruct sockaddr saddr;\n- socklen_t addrlen;\n- ASSERT_THAT(getpeername(s_, &saddr, &addrlen), SyscallSucceeds());\n+ socklen_t addrlen = sizeof(saddr);\n+ ASSERT_THAT(getpeername(s_, &saddr, &addrlen),\n+ SyscallFailsWithErrno(ENOTCONN));\nASSERT_GT(addrlen, 0);\n}\n@@ -362,15 +398,16 @@ TEST_F(RawSocketTest, SendAndReceiveViaConnect) {\nconnect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\nSyscallSucceeds());\n- // Prepare and send an ICMP packet. Use arbitrary junk for checksum, sequence,\n- // and ID. None of that should matter for raw sockets - the kernel should\n- // still give us the packet.\n+ // Prepare and send an ICMP packet. Use arbitrary junk for sequence and ID.\n+ // None of that should matter for raw sockets - the kernel should still give\n+ // us the packet.\nstruct icmphdr icmp;\nicmp.type = ICMP_ECHO;\nicmp.code = 0;\n- icmp.checksum = 2001;\n+ icmp.checksum = 0;\nicmp.un.echo.sequence = 2003;\nicmp.un.echo.id = 2004;\n+ icmp.checksum = Checksum(&icmp);\nASSERT_THAT(send(s_, &icmp, sizeof(icmp), 0),\nSyscallSucceedsWithValue(sizeof(icmp)));\n@@ -381,17 +418,18 @@ TEST_F(RawSocketTest, SendAndReceiveViaConnect) {\nTEST_F(RawSocketTest, SendWithoutConnectFails) {\nSKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_NET_RAW)));\n- // Prepare and send an ICMP packet. Use arbitrary junk for checksum, sequence,\n- // and ID. None of that should matter for raw sockets - the kernel should\n- // still give us the packet.\n+ // Prepare and send an ICMP packet. Use arbitrary junk for sequence and ID.\n+ // None of that should matter for raw sockets - the kernel should still give\n+ // us the packet.\nstruct icmphdr icmp;\nicmp.type = ICMP_ECHO;\nicmp.code = 0;\n- icmp.checksum = 2015;\n+ icmp.checksum = 0;\nicmp.un.echo.sequence = 2017;\nicmp.un.echo.id = 2019;\n+ icmp.checksum = Checksum(&icmp);\nASSERT_THAT(send(s_, &icmp, sizeof(icmp), 0),\n- SyscallFailsWithErrno(ENOTCONN));\n+ SyscallFailsWithErrno(EDESTADDRREQ));\n}\n// Bind to localhost.\n@@ -423,15 +461,16 @@ TEST_F(RawSocketTest, BindSendAndReceive) {\nbind(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\nSyscallSucceeds());\n- // Prepare and send an ICMP packet. Use arbitrary junk for checksum, sequence,\n- // and ID. None of that should matter for raw sockets - the kernel should\n- // still give us the packet.\n+ // Prepare and send an ICMP packet. Use arbitrary junk for sequence and ID.\n+ // None of that should matter for raw sockets - the kernel should still give\n+ // us the packet.\nstruct icmphdr icmp;\nicmp.type = ICMP_ECHO;\nicmp.code = 0;\n- icmp.checksum = 2001;\n+ icmp.checksum = 0;\nicmp.un.echo.sequence = 2004;\nicmp.un.echo.id = 2007;\n+ icmp.checksum = Checksum(&icmp);\nASSERT_NO_FATAL_FAILURE(SendEmptyICMP(icmp));\nASSERT_NO_FATAL_FAILURE(ExpectICMPSuccess(icmp));\n@@ -448,15 +487,16 @@ TEST_F(RawSocketTest, BindConnectSendAndReceive) {\nconnect(s_, reinterpret_cast<struct sockaddr*>(&addr_), sizeof(addr_)),\nSyscallSucceeds());\n- // Prepare and send an ICMP packet. Use arbitrary junk for checksum, sequence,\n+ // Prepare and send an ICMP packet. Use arbitrary junk for sequence\n// and ID. None of that should matter for raw sockets - the kernel should\n// still give us the packet.\nstruct icmphdr icmp;\nicmp.type = ICMP_ECHO;\nicmp.code = 0;\n- icmp.checksum = 2009;\n+ icmp.checksum = 0;\nicmp.un.echo.sequence = 2010;\nicmp.un.echo.id = 7;\n+ icmp.checksum = Checksum(&icmp);\nASSERT_NO_FATAL_FAILURE(SendEmptyICMP(icmp));\nASSERT_NO_FATAL_FAILURE(ExpectICMPSuccess(icmp));\n@@ -465,7 +505,7 @@ TEST_F(RawSocketTest, BindConnectSendAndReceive) {\nvoid RawSocketTest::ExpectICMPSuccess(const struct icmphdr& icmp) {\n// We're going to receive both the echo request and reply, but the order is\n// indeterminate.\n- char recv_buf[512];\n+ char recv_buf[sizeof(icmp) + sizeof(struct iphdr)];\nstruct sockaddr_in src;\nbool received_request = false;\nbool received_reply = false;\n@@ -474,7 +514,7 @@ void RawSocketTest::ExpectICMPSuccess(const struct icmphdr& icmp) {\n// Receive the packet.\nASSERT_NO_FATAL_FAILURE(ReceiveICMP(recv_buf, ABSL_ARRAYSIZE(recv_buf),\nsizeof(struct icmphdr), &src));\n- EXPECT_EQ(memcmp(&src, &addr_, sizeof(sockaddr_in)), 0);\n+ EXPECT_EQ(memcmp(&src, &addr_, sizeof(src)), 0);\nstruct icmphdr* recvd_icmp =\nreinterpret_cast<struct icmphdr*>(recv_buf + sizeof(struct iphdr));\nswitch (recvd_icmp->type) {\n@@ -527,6 +567,28 @@ void RawSocketTest::SendEmptyICMPTo(int sock, struct sockaddr_in* addr,\nASSERT_THAT(sendmsg(sock, &msg, 0), SyscallSucceedsWithValue(sizeof(icmp)));\n}\n+unsigned short RawSocketTest::Checksum(struct icmphdr* icmp) {\n+ unsigned int total = 0;\n+ unsigned short* num = reinterpret_cast<unsigned short*>(icmp);\n+\n+ // This is just the ICMP header, so there's an even number of bytes.\n+ for (unsigned int i = 0; i < sizeof(*icmp); i += sizeof(*num)) {\n+ total += *num;\n+ num++;\n+ }\n+\n+ // Combine the upper and lower 16 bits. This happens twice in case the first\n+ // combination causes a carry.\n+ unsigned short upper = total >> 16;\n+ unsigned short lower = total & 0xffff;\n+ total = upper + lower;\n+ upper = total >> 16;\n+ lower = total & 0xffff;\n+ total = upper + lower;\n+\n+ return ~total;\n+}\n+\nvoid RawSocketTest::ReceiveICMP(char* recv_buf, size_t recv_buf_len,\nsize_t expected_size, struct sockaddr_in* src) {\nASSERT_NO_FATAL_FAILURE(\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/syscall_test_runner.go", "new_path": "test/syscalls/syscall_test_runner.go", "diff": "@@ -187,6 +187,7 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\n\"--network=none\",\n\"-log-format=text\",\n\"-TESTONLY-unsafe-nonroot=true\",\n+ \"--net-raw=true\",\n}\nif *debug {\nargs = append(args, \"-debug\", \"-log-packets=true\")\n" } ]
Go
Apache License 2.0
google/gvisor
Fix raw socket behavior and tests. Some behavior was broken due to the difficulty of running automated raw socket tests. Change-Id: I152ca53916bb24a0208f2dc1c4f5bc87f4724ff6 PiperOrigin-RevId: 246747067
259,885
06.05.2019 16:38:37
25,200
14f0e7618e28dac78ca7b00ec61fcec062159009
Ensure all uses of MM.brk occur under MM.mappingMu in MM.Brk().
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/mm/syscalls.go", "new_path": "pkg/sentry/mm/syscalls.go", "diff": "@@ -694,8 +694,9 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad\n// Can't defer mm.mappingMu.Unlock(); see below.\nif addr < mm.brk.Start {\n+ addr = mm.brk.End\nmm.mappingMu.Unlock()\n- return mm.brk.End, syserror.EINVAL\n+ return addr, syserror.EINVAL\n}\n// TODO(gvisor.dev/issue/156): This enforces RLIMIT_DATA, but is\n@@ -704,22 +705,20 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad\n// size of heap + data + bss. The segment sizes need to be plumbed from\n// the loader package to fully enforce RLIMIT_DATA.\nif uint64(addr-mm.brk.Start) > limits.FromContext(ctx).Get(limits.Data).Cur {\n+ addr = mm.brk.End\nmm.mappingMu.Unlock()\n- return mm.brk.End, syserror.ENOMEM\n+ return addr, syserror.ENOMEM\n}\noldbrkpg, _ := mm.brk.End.RoundUp()\nnewbrkpg, ok := addr.RoundUp()\nif !ok {\n+ addr = mm.brk.End\nmm.mappingMu.Unlock()\n- return mm.brk.End, syserror.EFAULT\n+ return addr, syserror.EFAULT\n}\nswitch {\n- case newbrkpg < oldbrkpg:\n- mm.unmapLocked(ctx, usermem.AddrRange{newbrkpg, oldbrkpg})\n- mm.mappingMu.Unlock()\n-\ncase oldbrkpg < newbrkpg:\nvseg, ar, err := mm.createVMALocked(ctx, memmap.MMapOpts{\nLength: uint64(newbrkpg - oldbrkpg),\n@@ -736,21 +735,26 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad\nHint: \"[heap]\",\n})\nif err != nil {\n+ addr = mm.brk.End\nmm.mappingMu.Unlock()\n- return mm.brk.End, err\n+ return addr, err\n}\n+ mm.brk.End = addr\nif mm.defMLockMode == memmap.MLockEager {\nmm.populateVMAAndUnlock(ctx, vseg, ar, true)\n} else {\nmm.mappingMu.Unlock()\n}\n+ case newbrkpg < oldbrkpg:\n+ mm.unmapLocked(ctx, usermem.AddrRange{newbrkpg, oldbrkpg})\n+ fallthrough\n+\ndefault:\n- // Nothing to do.\n+ mm.brk.End = addr\nmm.mappingMu.Unlock()\n}\n- mm.brk.End = addr\nreturn addr, nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Ensure all uses of MM.brk occur under MM.mappingMu in MM.Brk(). PiperOrigin-RevId: 246921386 Change-Id: I71d8908858f45a9a33a0483470d0240eaf0fd012
259,884
22.04.2019 22:13:11
14,400
0b4eca7b32702a3d88435cc0afb0e169e5c274f0
Fix links to syscall man pages (refs:
[ { "change_type": "MODIFY", "old_path": "content/docs/user_guide/compatibility/amd64.md", "new_path": "content/docs/user_guide/compatibility/amd64.md", "diff": "@@ -73,8 +73,8 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns ENOSYS; Obsolete</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"setpersonality\"></a>135</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/setpersonality.2.html\" target=\"_blank\" rel=\"noopener\">setpersonality</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"personality\"></a>135</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/personality.2.html\" target=\"_blank\" rel=\"noopener\">personality</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EINVAL; Unable to change personality</td>\n@@ -94,15 +94,15 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"schedsetparam\"></a>142</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/schedsetparam.2.html\" target=\"_blank\" rel=\"noopener\">schedsetparam</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"sched_setparam\"></a>142</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/sched_setparam.2.html\" target=\"_blank\" rel=\"noopener\">sched_setparam</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_nice; ENOSYS otherwise</td>\n</tr>\n<tr>\n<td><a class=\"doc-table-anchor\" id=\"schedrrgetinterval\"></a>148</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/schedrrgetinterval.2.html\" target=\"_blank\" rel=\"noopener\">schedrrgetinterval</a></td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/sched_rr_get_interval.2.html\" target=\"_blank\" rel=\"noopener\">sched_rr_get_interval</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM</td>\n@@ -115,15 +115,15 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns EPERM</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"modifyldt\"></a>154</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/modifyldt.2.html\" target=\"_blank\" rel=\"noopener\">modifyldt</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"modify_ldt\"></a>154</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/modify_ldt.2.html\" target=\"_blank\" rel=\"noopener\">modify_ldt</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"pivotroot\"></a>155</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/pivotroot.2.html\" target=\"_blank\" rel=\"noopener\">pivotroot</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"pivot_root\"></a>155</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/pivot_root.2.html\" target=\"_blank\" rel=\"noopener\">pivot_root</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM</td>\n@@ -192,36 +192,36 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_rawio; ENOSYS otherwise</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"createmodule\"></a>174</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/createmodule.2.html\" target=\"_blank\" rel=\"noopener\">createmodule</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"create_module\"></a>174</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/create_module.2.html\" target=\"_blank\" rel=\"noopener\">create_module</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_module; ENOSYS otherwise</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"initmodule\"></a>175</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/initmodule.2.html\" target=\"_blank\" rel=\"noopener\">initmodule</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"init_module\"></a>175</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/init_module.2.html\" target=\"_blank\" rel=\"noopener\">init_module</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_module; ENOSYS otherwise</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"deletemodule\"></a>176</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/deletemodule.2.html\" target=\"_blank\" rel=\"noopener\">deletemodule</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"delete_module\"></a>176</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/delete_module.2.html\" target=\"_blank\" rel=\"noopener\">delete_module</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_module; ENOSYS otherwise</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"getkernelsyms\"></a>177</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/getkernelsyms.2.html\" target=\"_blank\" rel=\"noopener\">getkernelsyms</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"get_kernel_syms\"></a>177</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/get_kernel_syms.2.html\" target=\"_blank\" rel=\"noopener\">get_kernel_syms</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS; Not supported in > 2.6</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"querymodule\"></a>178</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/querymodule.2.html\" target=\"_blank\" rel=\"noopener\">querymodule</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"query_module\"></a>178</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/query_module.2.html\" target=\"_blank\" rel=\"noopener\">query_module</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS; Not supported in > 2.6</td>\n@@ -255,8 +255,8 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns ENOSYS; Not implemented in Linux</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"afssyscall\"></a>183</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/afssyscall.2.html\" target=\"_blank\" rel=\"noopener\">afssyscall</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"afs_syscall\"></a>183</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/afs_syscall.2.html\" target=\"_blank\" rel=\"noopener\">afs_syscall</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS; Not implemented in Linux</td>\n@@ -367,43 +367,43 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns ENOTSUP; Requires filesystem support</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"setthreadarea\"></a>205</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/setthreadarea.2.html\" target=\"_blank\" rel=\"noopener\">setthreadarea</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"set_thread_area\"></a>205</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/set_thread_area.2.html\" target=\"_blank\" rel=\"noopener\">set_thread_area</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS; Expected to return ENOSYS on 64-bit</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"getthreadarea\"></a>211</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/getthreadarea.2.html\" target=\"_blank\" rel=\"noopener\">getthreadarea</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"get_thread_area\"></a>211</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/get_thread_area.2.html\" target=\"_blank\" rel=\"noopener\">get_thread_area</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS; Expected to return ENOSYS on 64-bit</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"lookupdcookie\"></a>212</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/lookupdcookie.2.html\" target=\"_blank\" rel=\"noopener\">lookupdcookie</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"lookup_dcookie\"></a>212</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/lookup_dcookie.2.html\" target=\"_blank\" rel=\"noopener\">lookup_dcookie</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_admin; ENOSYS otherwise</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"epollctlold\"></a>214</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/epollctlold.2.html\" target=\"_blank\" rel=\"noopener\">epollctlold</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"epoll_ctl_old\"></a>214</td>\n+ <td>epoll_ctl_old</td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS; Deprecated</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"epollwaitold\"></a>215</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/epollwaitold.2.html\" target=\"_blank\" rel=\"noopener\">epollwaitold</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"epoll_wait_old\"></a>215</td>\n+ <td>epoll_wait_old</td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS; Deprecated</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"remapfilepages\"></a>216</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/remapfilepages.2.html\" target=\"_blank\" rel=\"noopener\">remapfilepages</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"remap_file_pages\"></a>216</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/remap_file_pages.2.html\" target=\"_blank\" rel=\"noopener\">remap_file_pages</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS; Deprecated</td>\n@@ -430,57 +430,57 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_nice; ENOSYS otherwise</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"mqopen\"></a>240</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/mqopen.2.html\" target=\"_blank\" rel=\"noopener\">mqopen</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"mq_open\"></a>240</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/mq_open.2.html\" target=\"_blank\" rel=\"noopener\">mq_open</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"mqunlink\"></a>241</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/mqunlink.2.html\" target=\"_blank\" rel=\"noopener\">mqunlink</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"mq_unlink\"></a>241</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/mq_unlink.2.html\" target=\"_blank\" rel=\"noopener\">mq_unlink</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"mqtimedsend\"></a>242</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/mqtimedsend.2.html\" target=\"_blank\" rel=\"noopener\">mqtimedsend</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"mq_timedsend\"></a>242</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/mq_timedsend.2.html\" target=\"_blank\" rel=\"noopener\">mq_timedsend</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"mqtimedreceive\"></a>243</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/mqtimedreceive.2.html\" target=\"_blank\" rel=\"noopener\">mqtimedreceive</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"mq_timedreceive\"></a>243</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/mq_timedreceive.2.html\" target=\"_blank\" rel=\"noopener\">mq_timedreceive</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"mqnotify\"></a>244</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/mqnotify.2.html\" target=\"_blank\" rel=\"noopener\">mqnotify</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"mq_notify\"></a>244</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/mq_notify.2.html\" target=\"_blank\" rel=\"noopener\">mq_notify</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"mqgetsetattr\"></a>245</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/mqgetsetattr.2.html\" target=\"_blank\" rel=\"noopener\">mqgetsetattr</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"mq_getsetattr\"></a>245</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/mq_getsetattr.2.html\" target=\"_blank\" rel=\"noopener\">mq_getsetattr</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"addkey\"></a>248</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/addkey.2.html\" target=\"_blank\" rel=\"noopener\">addkey</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"add_key\"></a>248</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/add_key.2.html\" target=\"_blank\" rel=\"noopener\">add_key</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EACCES; Not available to user</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"requestkey\"></a>249</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/requestkey.2.html\" target=\"_blank\" rel=\"noopener\">requestkey</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"request_key\"></a>249</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/request_key.2.html\" target=\"_blank\" rel=\"noopener\">request_key</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EACCES; Not available to user</td>\n@@ -493,36 +493,36 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns EACCES; Not available to user</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"ioprioset\"></a>251</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/ioprioset.2.html\" target=\"_blank\" rel=\"noopener\">ioprioset</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"ioprio_set\"></a>251</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/ioprio_set.2.html\" target=\"_blank\" rel=\"noopener\">ioprio_set</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_admin; ENOSYS otherwise</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"ioprioget\"></a>252</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/ioprioget.2.html\" target=\"_blank\" rel=\"noopener\">ioprioget</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"ioprio_get\"></a>252</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/ioprio_get.2.html\" target=\"_blank\" rel=\"noopener\">ioprio_get</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_admin; ENOSYS otherwise</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"migratepages\"></a>256</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/migratepages.2.html\" target=\"_blank\" rel=\"noopener\">migratepages</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"migrate_pages\"></a>256</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/migrate_pages.2.html\" target=\"_blank\" rel=\"noopener\">migrate_pages</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_nice; ENOSYS otherwise</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"setrobustlist\"></a>273</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/setrobustlist.2.html\" target=\"_blank\" rel=\"noopener\">setrobustlist</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"set_robust_list\"></a>273</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/set_robust_list.2.html\" target=\"_blank\" rel=\"noopener\">set_robust_list</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS; Obsolete</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"getrobustlist\"></a>274</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/getrobustlist.2.html\" target=\"_blank\" rel=\"noopener\">getrobustlist</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"get_robust_list\"></a>274</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/get_robust_list.2.html\" target=\"_blank\" rel=\"noopener\">get_robust_list</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS; Obsolete</td>\n@@ -549,8 +549,8 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"movepages\"></a>279</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/movepages.2.html\" target=\"_blank\" rel=\"noopener\">movepages</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"move_pages\"></a>279</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/move_pages.2.html\" target=\"_blank\" rel=\"noopener\">move_pages</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_nice; ENOSYS otherwise</td>\n@@ -570,43 +570,43 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"perfeventopen\"></a>298</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/perfeventopen.2.html\" target=\"_blank\" rel=\"noopener\">perfeventopen</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"perf_event_open\"></a>298</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/perf_event_open.2.html\" target=\"_blank\" rel=\"noopener\">perf_event_open</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns ENODEV; No support for perf counters</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"fanotifyinit\"></a>300</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/fanotifyinit.2.html\" target=\"_blank\" rel=\"noopener\">fanotifyinit</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"fanotify_init\"></a>300</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/fanotify_init.2.html\" target=\"_blank\" rel=\"noopener\">fanotify_init</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS; Needs CONFIG_FANOTIFY</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"fanotifymark\"></a>301</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/fanotifymark.2.html\" target=\"_blank\" rel=\"noopener\">fanotifymark</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"fanotify_mark\"></a>301</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/fanotify_mark.2.html\" target=\"_blank\" rel=\"noopener\">fanotify_mark</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS; Needs CONFIG_FANOTIFY</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"nametohandleat\"></a>303</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/nametohandleat.2.html\" target=\"_blank\" rel=\"noopener\">nametohandleat</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"name_to_handle_at\"></a>303</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/name_to_handle_at.2.html\" target=\"_blank\" rel=\"noopener\">name_to_handle_at</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EOPNOTSUPP; Needs filesystem support</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"openbyhandleat\"></a>304</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/openbyhandleat.2.html\" target=\"_blank\" rel=\"noopener\">openbyhandleat</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"open_by_handle_at\"></a>304</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/open_by_handle_at.2.html\" target=\"_blank\" rel=\"noopener\">open_by_handle_at</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EOPNOTSUPP; Needs filesystem support</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"clockadjtime\"></a>305</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/clockadjtime.2.html\" target=\"_blank\" rel=\"noopener\">clockadjtime</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"clock_adjtime\"></a>305</td>\n+ <td>clock_adjtime</td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_module; ENOSYS otherwise</td>\n@@ -619,15 +619,15 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"processvmreadv\"></a>310</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/processvmreadv.2.html\" target=\"_blank\" rel=\"noopener\">processvmreadv</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"process_vm_readv\"></a>310</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/process_vm_readv.2.html\" target=\"_blank\" rel=\"noopener\">process_vm_readv</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"processvmwritev\"></a>311</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/processvmwritev.2.html\" target=\"_blank\" rel=\"noopener\">processvmwritev</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"process_vm_writev\"></a>311</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/process_vm_writev.2.html\" target=\"_blank\" rel=\"noopener\">process_vm_writev</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS</td>\n@@ -640,22 +640,22 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns EPERM or ENOSYS; Requires cap_sys_ptrace</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"finitmodule\"></a>313</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/finitmodule.2.html\" target=\"_blank\" rel=\"noopener\">finitmodule</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"finit_module\"></a>313</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/finit_module.2.html\" target=\"_blank\" rel=\"noopener\">finit_module</a></td>\n<td>Partial</td>\n<td></td>\n<td>Returns EPERM or ENOSYS; Returns EPERM if the process does not have cap_sys_module; ENOSYS otherwise</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"schedsetattr\"></a>314</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/schedsetattr.2.html\" target=\"_blank\" rel=\"noopener\">schedsetattr</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"sched_setattr\"></a>314</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/sched_setattr.2.html\" target=\"_blank\" rel=\"noopener\">sched_setattr</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"schedgetattr\"></a>315</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/schedgetattr.2.html\" target=\"_blank\" rel=\"noopener\">schedgetattr</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"sched_getattr\"></a>315</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/sched_getattr.2.html\" target=\"_blank\" rel=\"noopener\">sched_getattr</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS</td>\n@@ -668,8 +668,8 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"memfdcreate\"></a>319</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/memfdcreate.2.html\" target=\"_blank\" rel=\"noopener\">memfdcreate</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"memfd_create\"></a>319</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/memfd_create.2.html\" target=\"_blank\" rel=\"noopener\">memfd_create</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS</td>\n@@ -703,8 +703,8 @@ syscalls. 231 syscalls are not yet documented.\n<td>Returns ENOSYS</td>\n</tr>\n<tr>\n- <td><a class=\"doc-table-anchor\" id=\"copyfilerange\"></a>326</td>\n- <td><a href=\"http://man7.org/linux/man-pages/man2/copyfilerange.2.html\" target=\"_blank\" rel=\"noopener\">copyfilerange</a></td>\n+ <td><a class=\"doc-table-anchor\" id=\"copy_file_range\"></a>326</td>\n+ <td><a href=\"http://man7.org/linux/man-pages/man2/copy_file_range.2.html\" target=\"_blank\" rel=\"noopener\">copy_file_range</a></td>\n<td>Unimplemented</td>\n<td></td>\n<td>Returns ENOSYS</td>\n" } ]
Go
Apache License 2.0
google/gvisor
Fix links to syscall man pages (refs: #50)
259,858
29.04.2019 10:00:02
25,200
0f5e7e011cd3f023fa69675c10ad022f0dc7f712
build: fix deploy target for Makefile Because the go.mod and related files were not being copying, the packages are resolved within the current $GOROOT, which does not work. This change copies all application files, not just Go source files and app.yaml.
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -5,21 +5,19 @@ NPM := npm\nGCLOUD := gcloud\nGCP_PROJECT := gvisor-website\n-# Source Go files. example: main.go foo/bar.go\n-GO_SOURCE = $(shell find cmd/gvisor-website -type f -name \"*.go\" | sed 's/ /\\\\ /g')\n-# Target Go files. example: public/main.go public/foo/bar.go\n-GO_TARGET = $(shell cd cmd/gvisor-website && find . -type f -name \"*.go\" | sed 's/ /\\\\ /g' | sed 's/^.\\//public\\//')\n+# Source Go files, example: main.go, foo/bar.go.\n+APP_SOURCE = $(wildcard cmd/gvisor-website/*)\n+# Target Go files, example: public/main.go, public/foo/bar.go.\n+APP_TARGET = $(patsubst cmd/gvisor-website/%,public/%,$(APP_SOURCE))\ndefault: website\n.PHONY: default\n-website: all-upstream public/app.yaml $(GO_TARGET) public/static\n+website: all-upstream $(APP_TARGET) public/static\n.PHONY: website\npublic:\nmkdir -p public\n-public/app.yaml: public\n- cp -vr cmd/gvisor-website/app.yaml public/\n# Load repositories.\nupstream:\n@@ -39,8 +37,8 @@ content/docs/community/sigs: upstream/community $(wildcard upstream/community/si\ncat upstream/community/sigs/$$file.md |grep -v -E '^# ' >> content/docs/community/sigs/$$file.md; \\\ndone\n-$(GO_TARGET): public $(GO_SOURCE)\n- cd cmd/gvisor-website && find . -name \"*.go\" -exec cp --parents \\{\\} ../../public \\;\n+$(APP_TARGET): public $(APP_SOURCE)\n+ cp -a cmd/gvisor-website/$(patsubst public/%,%,$@) public/\npublic/static: node_modules config.toml $(shell find archetypes assets content themes -type f | sed 's/ /\\\\ /g')\nHUGO_ENV=\"production\" $(HUGO)\n@@ -56,7 +54,7 @@ server: all-upstream\n.PHONY: server\n# Deploy the website to App Engine.\n-deploy: public/app.yaml\n+deploy: $(APP_TARGET)\ncd public && $(GCLOUD) app deploy\n.PHONY: deploy\n" } ]
Go
Apache License 2.0
google/gvisor
build: fix deploy target for Makefile Because the go.mod and related files were not being copying, the packages are resolved within the current $GOROOT, which does not work. This change copies all application files, not just Go source files and app.yaml.
259,992
07.05.2019 10:53:50
25,200
e5432fa1b365edcebf9c8c01e2c40ade3014f282
Remove defers from gofer.contextFile Most are single line methods in hot paths.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/gofer/context_file.go", "new_path": "pkg/sentry/fs/gofer/context_file.go", "diff": "@@ -28,156 +28,156 @@ type contextFile struct {\nfunc (c *contextFile) walk(ctx context.Context, names []string) ([]p9.QID, contextFile, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\nq, f, err := c.file.Walk(names)\nif err != nil {\n+ ctx.UninterruptibleSleepFinish(false)\nreturn nil, contextFile{}, err\n}\n+ ctx.UninterruptibleSleepFinish(false)\nreturn q, contextFile{file: f}, nil\n}\nfunc (c *contextFile) statFS(ctx context.Context) (p9.FSStat, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.StatFS()\n+ s, err := c.file.StatFS()\n+ ctx.UninterruptibleSleepFinish(false)\n+ return s, err\n}\nfunc (c *contextFile) getAttr(ctx context.Context, req p9.AttrMask) (p9.QID, p9.AttrMask, p9.Attr, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.GetAttr(req)\n+ q, m, a, err := c.file.GetAttr(req)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return q, m, a, err\n}\nfunc (c *contextFile) setAttr(ctx context.Context, valid p9.SetAttrMask, attr p9.SetAttr) error {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.SetAttr(valid, attr)\n+ err := c.file.SetAttr(valid, attr)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return err\n}\nfunc (c *contextFile) rename(ctx context.Context, directory contextFile, name string) error {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.Rename(directory.file, name)\n+ err := c.file.Rename(directory.file, name)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return err\n}\nfunc (c *contextFile) close(ctx context.Context) error {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.Close()\n+ err := c.file.Close()\n+ ctx.UninterruptibleSleepFinish(false)\n+ return err\n}\nfunc (c *contextFile) open(ctx context.Context, mode p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.Open(mode)\n+ f, q, u, err := c.file.Open(mode)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return f, q, u, err\n}\nfunc (c *contextFile) readAt(ctx context.Context, p []byte, offset uint64) (int, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.ReadAt(p, offset)\n+ n, err := c.file.ReadAt(p, offset)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return n, err\n}\nfunc (c *contextFile) writeAt(ctx context.Context, p []byte, offset uint64) (int, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.WriteAt(p, offset)\n+ n, err := c.file.WriteAt(p, offset)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return n, err\n}\nfunc (c *contextFile) fsync(ctx context.Context) error {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.FSync()\n+ err := c.file.FSync()\n+ ctx.UninterruptibleSleepFinish(false)\n+ return err\n}\nfunc (c *contextFile) create(ctx context.Context, name string, flags p9.OpenFlags, permissions p9.FileMode, uid p9.UID, gid p9.GID) (*fd.FD, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\nfd, _, _, _, err := c.file.Create(name, flags, permissions, uid, gid)\n+ ctx.UninterruptibleSleepFinish(false)\nreturn fd, err\n}\nfunc (c *contextFile) mkdir(ctx context.Context, name string, permissions p9.FileMode, uid p9.UID, gid p9.GID) (p9.QID, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.Mkdir(name, permissions, uid, gid)\n+ q, err := c.file.Mkdir(name, permissions, uid, gid)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return q, err\n}\nfunc (c *contextFile) symlink(ctx context.Context, oldName string, newName string, uid p9.UID, gid p9.GID) (p9.QID, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.Symlink(oldName, newName, uid, gid)\n+ q, err := c.file.Symlink(oldName, newName, uid, gid)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return q, err\n}\nfunc (c *contextFile) link(ctx context.Context, target *contextFile, newName string) error {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.Link(target.file, newName)\n+ err := c.file.Link(target.file, newName)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return err\n}\nfunc (c *contextFile) mknod(ctx context.Context, name string, permissions p9.FileMode, major uint32, minor uint32, uid p9.UID, gid p9.GID) (p9.QID, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.Mknod(name, permissions, major, minor, uid, gid)\n+ q, err := c.file.Mknod(name, permissions, major, minor, uid, gid)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return q, err\n}\nfunc (c *contextFile) unlinkAt(ctx context.Context, name string, flags uint32) error {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.UnlinkAt(name, flags)\n+ err := c.file.UnlinkAt(name, flags)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return err\n}\nfunc (c *contextFile) readdir(ctx context.Context, offset uint64, count uint32) ([]p9.Dirent, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.Readdir(offset, count)\n+ d, err := c.file.Readdir(offset, count)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return d, err\n}\nfunc (c *contextFile) readlink(ctx context.Context) (string, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.Readlink()\n+ s, err := c.file.Readlink()\n+ ctx.UninterruptibleSleepFinish(false)\n+ return s, err\n}\nfunc (c *contextFile) flush(ctx context.Context) error {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.Flush()\n+ err := c.file.Flush()\n+ ctx.UninterruptibleSleepFinish(false)\n+ return err\n}\nfunc (c *contextFile) walkGetAttr(ctx context.Context, names []string) ([]p9.QID, contextFile, p9.AttrMask, p9.Attr, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\nq, f, m, a, err := c.file.WalkGetAttr(names)\nif err != nil {\n+ ctx.UninterruptibleSleepFinish(false)\nreturn nil, contextFile{}, p9.AttrMask{}, p9.Attr{}, err\n}\n+ ctx.UninterruptibleSleepFinish(false)\nreturn q, contextFile{file: f}, m, a, nil\n}\nfunc (c *contextFile) connect(ctx context.Context, flags p9.ConnectFlags) (*fd.FD, error) {\nctx.UninterruptibleSleepStart(false)\n- defer ctx.UninterruptibleSleepFinish(false)\n-\n- return c.file.Connect(flags)\n+ f, err := c.file.Connect(flags)\n+ ctx.UninterruptibleSleepFinish(false)\n+ return f, err\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Remove defers from gofer.contextFile Most are single line methods in hot paths. PiperOrigin-RevId: 247050267 Change-Id: I428d78723fe00b57483185899dc8fa9e1f01e2ea
259,854
07.05.2019 14:26:24
25,200
20862f0db27efac0eed3bb23d01b22b09bddfa27
Add gonet.DialContextTCP. Allows cancellation and timeouts.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/adapters/gonet/gonet.go", "new_path": "pkg/tcpip/adapters/gonet/gonet.go", "diff": "package gonet\nimport (\n+ \"context\"\n\"errors\"\n\"io\"\n\"net\"\n@@ -495,6 +496,12 @@ func fullToUDPAddr(addr tcpip.FullAddress) *net.UDPAddr {\n// DialTCP creates a new TCP Conn connected to the specified address.\nfunc DialTCP(s *stack.Stack, addr tcpip.FullAddress, network tcpip.NetworkProtocolNumber) (*Conn, error) {\n+ return DialContextTCP(context.Background(), s, addr, network)\n+}\n+\n+// DialContextTCP creates a new TCP Conn connected to the specified address\n+// with the option of adding cancellation and timeouts.\n+func DialContextTCP(ctx context.Context, s *stack.Stack, addr tcpip.FullAddress, network tcpip.NetworkProtocolNumber) (*Conn, error) {\n// Create TCP endpoint, then connect.\nvar wq waiter.Queue\nep, err := s.NewEndpoint(tcp.ProtocolNumber, network, &wq)\n@@ -509,9 +516,21 @@ func DialTCP(s *stack.Stack, addr tcpip.FullAddress, network tcpip.NetworkProtoc\nwq.EventRegister(&waitEntry, waiter.EventOut)\ndefer wq.EventUnregister(&waitEntry)\n+ select {\n+ case <-ctx.Done():\n+ return nil, ctx.Err()\n+ default:\n+ }\n+\nerr = ep.Connect(addr)\nif err == tcpip.ErrConnectStarted {\n- <-notifyCh\n+ select {\n+ case <-ctx.Done():\n+ ep.Close()\n+ return nil, ctx.Err()\n+ case <-notifyCh:\n+ }\n+\nerr = ep.GetSockOpt(tcpip.ErrorOption{})\n}\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/adapters/gonet/gonet_test.go", "new_path": "pkg/tcpip/adapters/gonet/gonet_test.go", "diff": "package gonet\nimport (\n+ \"context\"\n\"fmt\"\n\"io\"\n\"net\"\n@@ -595,6 +596,48 @@ func TestTCPDialError(t *testing.T) {\n}\n}\n+func TestDialContextTCPCanceled(t *testing.T) {\n+ s, err := newLoopbackStack()\n+ if err != nil {\n+ t.Fatalf(\"newLoopbackStack() = %v\", err)\n+ }\n+\n+ addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211}\n+ s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr)\n+\n+ ctx := context.Background()\n+ ctx, cancel := context.WithCancel(ctx)\n+ cancel()\n+\n+ if _, err := DialContextTCP(ctx, s, addr, ipv4.ProtocolNumber); err != context.Canceled {\n+ t.Errorf(\"got DialContextTCP(...) = %v, want = %v\", err, context.Canceled)\n+ }\n+}\n+\n+func TestDialContextTCPTimeout(t *testing.T) {\n+ s, err := newLoopbackStack()\n+ if err != nil {\n+ t.Fatalf(\"newLoopbackStack() = %v\", err)\n+ }\n+\n+ addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211}\n+ s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr)\n+\n+ fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) {\n+ time.Sleep(time.Second)\n+ r.Complete(true)\n+ })\n+ s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket)\n+\n+ ctx := context.Background()\n+ ctx, cancel := context.WithDeadline(ctx, time.Now().Add(100*time.Millisecond))\n+ defer cancel()\n+\n+ if _, err := DialContextTCP(ctx, s, addr, ipv4.ProtocolNumber); err != context.DeadlineExceeded {\n+ t.Errorf(\"got DialContextTCP(...) = %v, want = %v\", err, context.DeadlineExceeded)\n+ }\n+}\n+\nfunc TestNetTest(t *testing.T) {\nnettest.TestConn(t, makePipe)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Add gonet.DialContextTCP. Allows cancellation and timeouts. PiperOrigin-RevId: 247090428 Change-Id: I91907f12e218677dcd0e0b6d72819deedbd9f20c
259,885
10.05.2019 13:36:56
25,200
5ee8218483ce172400c21780d5dbc1ec2ba54d63
Add pgalloc.DelayedEvictionManual.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/kernel.go", "new_path": "pkg/sentry/kernel/kernel.go", "diff": "@@ -304,10 +304,11 @@ func (k *Kernel) SaveTo(w io.Writer) error {\ndefer k.resumeTimeLocked()\n// Evict all evictable MemoryFile allocations.\n- k.mf.FlushEvictions()\n+ k.mf.StartEvictions()\n+ k.mf.WaitForEvictions()\n// Flush write operations on open files so data reaches backing storage.\n- // This must come after k.mf.FlushEvictions() since eviction may cause file\n+ // This must come after MemoryFile eviction since eviction may cause file\n// writes.\nif err := k.tasks.flushWritesToFiles(ctx); err != nil {\nreturn err\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/pgalloc/pgalloc.go", "new_path": "pkg/sentry/pgalloc/pgalloc.go", "diff": "@@ -190,6 +190,11 @@ const (\n// reclaimer goroutine is out of work (pages to reclaim), then evicts all\n// pending evictable allocations immediately.\nDelayedEvictionEnabled\n+\n+ // DelayedEvictionManual requires that evictable allocations are only\n+ // evicted when MemoryFile.StartEvictions() is called. This is extremely\n+ // dangerous outside of tests.\n+ DelayedEvictionManual\n)\n// usageInfo tracks usage information.\n@@ -264,7 +269,7 @@ func NewMemoryFile(file *os.File, opts MemoryFileOpts) (*MemoryFile, error) {\nswitch opts.DelayedEviction {\ncase DelayedEvictionDefault:\nopts.DelayedEviction = DelayedEvictionEnabled\n- case DelayedEvictionDisabled, DelayedEvictionEnabled:\n+ case DelayedEvictionDisabled, DelayedEvictionEnabled, DelayedEvictionManual:\ndefault:\nreturn nil, fmt.Errorf(\"invalid MemoryFileOpts.DelayedEviction: %v\", opts.DelayedEviction)\n}\n@@ -1075,6 +1080,14 @@ func (f *MemoryFile) markReclaimed(fr platform.FileRange) {\n}\n}\n+// StartEvictions requests that f evict all evictable allocations. It does not\n+// wait for eviction to complete; for this, see MemoryFile.WaitForEvictions.\n+func (f *MemoryFile) StartEvictions() {\n+ f.mu.Lock()\n+ defer f.mu.Unlock()\n+ f.startEvictionsLocked()\n+}\n+\n// Preconditions: f.mu must be locked.\nfunc (f *MemoryFile) startEvictionsLocked() {\nfor user, info := range f.evictable {\n@@ -1122,6 +1135,12 @@ func (f *MemoryFile) startEvictionGoroutineLocked(user EvictableMemoryUser, info\n}()\n}\n+// WaitForEvictions blocks until f is no longer evicting any evictable\n+// allocations.\n+func (f *MemoryFile) WaitForEvictions() {\n+ f.evictionWG.Wait()\n+}\n+\ntype usageSetFunctions struct{}\nfunc (usageSetFunctions) MinKey() uint64 {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/pgalloc/save_restore.go", "new_path": "pkg/sentry/pgalloc/save_restore.go", "diff": "@@ -28,15 +28,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/state\"\n)\n-// FlushEvictions blocks until f has finished evicting all evictable\n-// allocations.\n-func (f *MemoryFile) FlushEvictions() {\n- f.mu.Lock()\n- f.startEvictionsLocked()\n- f.mu.Unlock()\n- f.evictionWG.Wait()\n-}\n-\n// SaveTo writes f's state to the given stream.\nfunc (f *MemoryFile) SaveTo(w io.Writer) error {\n// Wait for reclaim.\n@@ -51,7 +42,7 @@ func (f *MemoryFile) SaveTo(w io.Writer) error {\n// Ensure that there are no pending evictions.\nif len(f.evictable) != 0 {\n- panic(fmt.Sprintf(\"evictions still pending for %d users; call FlushEvictions before SaveTo\", len(f.evictable)))\n+ panic(fmt.Sprintf(\"evictions still pending for %d users; call StartEvictions and WaitForEvictions before SaveTo\", len(f.evictable)))\n}\n// Ensure that all pages that contain data have knownCommitted set, since\n" } ]
Go
Apache License 2.0
google/gvisor
Add pgalloc.DelayedEvictionManual. PiperOrigin-RevId: 247667272 Change-Id: I16b04e11bb93f50b7e05e888992303f730e4a877
259,881
10.05.2019 17:36:42
25,200
c61a2e709a810233c310e409c07b0ed696f4e858
Modernize mknod test
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -1028,6 +1028,7 @@ cc_binary(\nsrcs = [\"mknod.cc\"],\nlinkstatic = 1,\ndeps = [\n+ \"//test/util:file_descriptor\",\n\"//test/util:temp_path\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/mknod.cc", "new_path": "test/syscalls/linux/mknod.cc", "diff": "#include <vector>\n#include \"gtest/gtest.h\"\n+#include \"test/util/file_descriptor.h\"\n#include \"test/util/temp_path.h\"\n#include \"test/util/test_util.h\"\n#include \"test/util/thread_util.h\"\n@@ -31,20 +32,22 @@ namespace testing {\nnamespace {\nTEST(MknodTest, RegularFile) {\n- std::string const node0 = NewTempAbsPathInDir(\"/tmp\");\n- std::string const node1 = NewTempAbsPathInDir(\"/tmp\");\n- ASSERT_THAT(mknod(node0.c_str(), S_IFREG, 0), SyscallSucceeds());\n- ASSERT_THAT(mknod(node1.c_str(), 0, 0), SyscallSucceeds());\n+ const std::string node0 = NewTempAbsPath();\n+ EXPECT_THAT(mknod(node0.c_str(), S_IFREG, 0), SyscallSucceeds());\n+\n+ const std::string node1 = NewTempAbsPath();\n+ EXPECT_THAT(mknod(node1.c_str(), 0, 0), SyscallSucceeds());\n}\nTEST(MknodTest, MknodAtRegularFile) {\n- std::string const fifo_relpath = NewTempRelPath();\n- std::string const fifo = JoinPath(\"/tmp\", fifo_relpath);\n- int dirfd;\n- ASSERT_THAT(dirfd = open(\"/tmp\", O_RDONLY), SyscallSucceeds());\n- ASSERT_THAT(mknodat(dirfd, fifo_relpath.c_str(), S_IFIFO | S_IRUSR, 0),\n+ const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const std::string fifo_relpath = NewTempRelPath();\n+ const std::string fifo = JoinPath(dir.path(), fifo_relpath);\n+\n+ const FileDescriptor dirfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path().c_str(), O_RDONLY));\n+ ASSERT_THAT(mknodat(dirfd.get(), fifo_relpath.c_str(), S_IFIFO | S_IRUSR, 0),\nSyscallSucceeds());\n- EXPECT_THAT(close(dirfd), SyscallSucceeds());\nstruct stat st;\nASSERT_THAT(stat(fifo.c_str(), &st), SyscallSucceeds());\n@@ -52,33 +55,34 @@ TEST(MknodTest, MknodAtRegularFile) {\n}\nTEST(MknodTest, MknodOnExistingPathFails) {\n- std::string const file = NewTempAbsPathInDir(\"/tmp\");\n- std::string const slink = NewTempAbsPathInDir(\"/tmp\");\n- int fd;\n- ASSERT_THAT(fd = open(file.c_str(), O_CREAT | O_RDWR, S_IRUSR | S_IWUSR),\n- SyscallSucceeds());\n- EXPECT_THAT(close(fd), SyscallSucceeds());\n- ASSERT_THAT(symlink(file.c_str(), slink.c_str()), SyscallSucceeds());\n-\n- EXPECT_THAT(mknod(file.c_str(), S_IFREG, 0), SyscallFailsWithErrno(EEXIST));\n- EXPECT_THAT(mknod(file.c_str(), S_IFIFO, 0), SyscallFailsWithErrno(EEXIST));\n- EXPECT_THAT(mknod(slink.c_str(), S_IFREG, 0), SyscallFailsWithErrno(EEXIST));\n- EXPECT_THAT(mknod(slink.c_str(), S_IFIFO, 0), SyscallFailsWithErrno(EEXIST));\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ const TempPath slink = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateSymlinkTo(GetAbsoluteTestTmpdir(), file.path()));\n+\n+ EXPECT_THAT(mknod(file.path().c_str(), S_IFREG, 0),\n+ SyscallFailsWithErrno(EEXIST));\n+ EXPECT_THAT(mknod(file.path().c_str(), S_IFIFO, 0),\n+ SyscallFailsWithErrno(EEXIST));\n+ EXPECT_THAT(mknod(slink.path().c_str(), S_IFREG, 0),\n+ SyscallFailsWithErrno(EEXIST));\n+ EXPECT_THAT(mknod(slink.path().c_str(), S_IFIFO, 0),\n+ SyscallFailsWithErrno(EEXIST));\n}\nTEST(MknodTest, UnimplementedTypesReturnError) {\n+ const std::string path = NewTempAbsPath();\n+\nif (IsRunningOnGvisor()) {\n- ASSERT_THAT(mknod(\"/tmp/a_socket\", S_IFSOCK, 0),\n+ ASSERT_THAT(mknod(path.c_str(), S_IFSOCK, 0),\nSyscallFailsWithErrno(EOPNOTSUPP));\n}\n// These will fail on linux as well since we don't have CAP_MKNOD.\n- ASSERT_THAT(mknod(\"/tmp/a_chardev\", S_IFCHR, 0),\n- SyscallFailsWithErrno(EPERM));\n- ASSERT_THAT(mknod(\"/tmp/a_blkdev\", S_IFBLK, 0), SyscallFailsWithErrno(EPERM));\n+ ASSERT_THAT(mknod(path.c_str(), S_IFCHR, 0), SyscallFailsWithErrno(EPERM));\n+ ASSERT_THAT(mknod(path.c_str(), S_IFBLK, 0), SyscallFailsWithErrno(EPERM));\n}\nTEST(MknodTest, Fifo) {\n- std::string const fifo = NewTempAbsPathInDir(\"/tmp\");\n+ const std::string fifo = NewTempAbsPath();\nASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),\nSyscallSucceeds());\n@@ -91,24 +95,20 @@ TEST(MknodTest, Fifo) {\n// Read-end of the pipe.\nScopedThread t([&fifo, &buf, &msg]() {\n- int fd;\n- ASSERT_THAT(fd = open(fifo.c_str(), O_RDONLY), SyscallSucceeds());\n- EXPECT_THAT(read(fd, buf.data(), buf.size()),\n+ FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(fifo.c_str(), O_RDONLY));\n+ EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()),\nSyscallSucceedsWithValue(msg.length()));\nEXPECT_EQ(msg, std::string(buf.data()));\n- EXPECT_THAT(close(fd), SyscallSucceeds());\n});\n// Write-end of the pipe.\n- int wfd;\n- ASSERT_THAT(wfd = open(fifo.c_str(), O_WRONLY), SyscallSucceeds());\n- EXPECT_THAT(write(wfd, msg.c_str(), msg.length()),\n+ FileDescriptor wfd = ASSERT_NO_ERRNO_AND_VALUE(Open(fifo.c_str(), O_WRONLY));\n+ EXPECT_THAT(WriteFd(wfd.get(), msg.c_str(), msg.length()),\nSyscallSucceedsWithValue(msg.length()));\n- EXPECT_THAT(close(wfd), SyscallSucceeds());\n}\nTEST(MknodTest, FifoOtrunc) {\n- std::string const fifo = NewTempAbsPathInDir(\"/tmp\");\n+ const std::string fifo = NewTempAbsPath();\nASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),\nSyscallSucceeds());\n@@ -120,23 +120,21 @@ TEST(MknodTest, FifoOtrunc) {\nstd::vector<char> buf(512);\n// Read-end of the pipe.\nScopedThread t([&fifo, &buf, &msg]() {\n- int fd;\n- ASSERT_THAT(fd = open(fifo.c_str(), O_RDONLY), SyscallSucceeds());\n- EXPECT_THAT(read(fd, buf.data(), buf.size()),\n+ FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(fifo.c_str(), O_RDONLY));\n+ EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()),\nSyscallSucceedsWithValue(msg.length()));\nEXPECT_EQ(msg, std::string(buf.data()));\n- EXPECT_THAT(close(fd), SyscallSucceeds());\n});\n- int wfd;\n- ASSERT_THAT(wfd = open(fifo.c_str(), O_TRUNC | O_WRONLY), SyscallSucceeds());\n- EXPECT_THAT(write(wfd, msg.c_str(), msg.length()),\n+ // Write-end of the pipe.\n+ FileDescriptor wfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(fifo.c_str(), O_WRONLY | O_TRUNC));\n+ EXPECT_THAT(WriteFd(wfd.get(), msg.c_str(), msg.length()),\nSyscallSucceedsWithValue(msg.length()));\n- EXPECT_THAT(close(wfd), SyscallSucceeds());\n}\nTEST(MknodTest, FifoTruncNoOp) {\n- std::string const fifo = NewTempAbsPathInDir(\"/tmp\");\n+ const std::string fifo = NewTempAbsPath();\nASSERT_THAT(mknod(fifo.c_str(), S_IFIFO | S_IRUSR | S_IWUSR, 0),\nSyscallSucceeds());\n@@ -150,21 +148,18 @@ TEST(MknodTest, FifoTruncNoOp) {\nstd::vector<char> buf(512);\n// Read-end of the pipe.\nScopedThread t([&fifo, &buf, &msg]() {\n- int rfd = 0;\n- ASSERT_THAT(rfd = open(fifo.c_str(), O_RDONLY), SyscallSucceeds());\n- EXPECT_THAT(ReadFd(rfd, buf.data(), buf.size()),\n+ FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(fifo.c_str(), O_RDONLY));\n+ EXPECT_THAT(ReadFd(fd.get(), buf.data(), buf.size()),\nSyscallSucceedsWithValue(msg.length()));\nEXPECT_EQ(msg, std::string(buf.data()));\n- EXPECT_THAT(close(rfd), SyscallSucceeds());\n});\n- int wfd = 0;\n- ASSERT_THAT(wfd = open(fifo.c_str(), O_TRUNC | O_WRONLY), SyscallSucceeds());\n- EXPECT_THAT(ftruncate(wfd, 0), SyscallFailsWithErrno(EINVAL));\n- EXPECT_THAT(WriteFd(wfd, msg.c_str(), msg.length()),\n+ FileDescriptor wfd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(fifo.c_str(), O_WRONLY | O_TRUNC));\n+ EXPECT_THAT(ftruncate(wfd.get(), 0), SyscallFailsWithErrno(EINVAL));\n+ EXPECT_THAT(WriteFd(wfd.get(), msg.c_str(), msg.length()),\nSyscallSucceedsWithValue(msg.length()));\n- EXPECT_THAT(ftruncate(wfd, 0), SyscallFailsWithErrno(EINVAL));\n- EXPECT_THAT(close(wfd), SyscallSucceeds());\n+ EXPECT_THAT(ftruncate(wfd.get(), 0), SyscallFailsWithErrno(EINVAL));\n}\n} // namespace\n" } ]
Go
Apache License 2.0
google/gvisor
Modernize mknod test PiperOrigin-RevId: 247704588 Change-Id: I1e63e2b310145695fbe38429b91e44d72473fcd6
259,853
13.05.2019 10:37:24
25,200
ec248daf29974c3c3f99f4c110059e2c4e50fbe0
gvisor/hostnet: restart epoll_wait after epoll_ctl Otherwise changes of epoll_ctl will not have affect.
[ { "change_type": "MODIFY", "old_path": "pkg/fdnotifier/fdnotifier.go", "new_path": "pkg/fdnotifier/fdnotifier.go", "diff": "@@ -40,6 +40,10 @@ type notifier struct {\n// notifications.\nepFD int\n+ // eventFD is used to restart epoll_wait in waitAndNotify after\n+ // reconfiguring epFD.\n+ eventFD int\n+\n// mu protects fdMap.\nmu sync.Mutex\n@@ -55,11 +59,22 @@ func newNotifier() (*notifier, error) {\nreturn nil, err\n}\n+ eventFD, err := eventFDCreate()\n+ if err != nil {\n+ syscall.Close(epfd)\n+ return nil, err\n+ }\n+\nw := &notifier{\nepFD: epfd,\n+ eventFD: eventFD,\nfdMap: make(map[int32]*fdInfo),\n}\n+ if err := w.waitFD(int32(w.eventFD), &fdInfo{}, waiter.EventIn); err != nil {\n+ return nil, err\n+ }\n+\ngo w.waitAndNotify() // S/R-SAFE: no waiter exists during save / load.\nreturn w, nil\n@@ -91,6 +106,11 @@ func (n *notifier) waitFD(fd int32, fi *fdInfo, mask waiter.EventMask) error {\n}\n}\n+ // Restart epoll_wait in waitAndNotify.\n+ if err := eventFDWrite(n.eventFD, 1); err != nil {\n+ return err\n+ }\n+\nreturn nil\n}\n@@ -156,6 +176,12 @@ func (n *notifier) waitAndNotify() error {\nn.mu.Lock()\nfor i := 0; i < v; i++ {\n+ if e[i].Fd == int32(n.eventFD) {\n+ if _, err := eventFDRead(n.eventFD); err != nil {\n+ return err\n+ }\n+ continue\n+ }\nif fi, ok := n.fdMap[e[i].Fd]; ok {\nfi.queue.Notify(waiter.EventMaskFromLinux(e[i].Events))\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/fdnotifier/poll_unsafe.go", "new_path": "pkg/fdnotifier/poll_unsafe.go", "diff": "@@ -74,3 +74,28 @@ func epollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) {\n}\nreturn int(r), nil\n}\n+\n+func eventFDCreate() (int, error) {\n+ eventFD, _, err := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, 0, 0)\n+ if err != 0 {\n+ return -1, err\n+ }\n+ return int(eventFD), nil\n+}\n+\n+func eventFDWrite(eventFD int, v uint64) error {\n+ if _, _, err := syscall.RawSyscall(syscall.SYS_WRITE, uintptr(eventFD), uintptr(unsafe.Pointer(&v)), 8); err != 0 {\n+ return err\n+ }\n+\n+ return nil\n+}\n+\n+func eventFDRead(eventFD int) (uint64, error) {\n+ var v uint64\n+ if _, _, err := syscall.RawSyscall(syscall.SYS_READ, uintptr(eventFD), uintptr(unsafe.Pointer(&v)), 8); err != 0 {\n+ return 0, err\n+ }\n+\n+ return v, nil\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
gvisor/hostnet: restart epoll_wait after epoll_ctl Otherwise changes of epoll_ctl will not have affect. PiperOrigin-RevId: 247964961 Change-Id: I9fbb35c44766421af45d9ed53760e0c324d80d99
259,858
26.04.2019 10:51:20
25,200
599590c9d7c1f94dfcb69342da9e0e025f0fc94a
Add performance guide.
[ { "change_type": "MODIFY", "old_path": "layouts/partials/head.html", "new_path": "layouts/partials/head.html", "diff": "src=\"https://code.jquery.com/jquery-3.3.1.min.js\"\nintegrity=\"sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8=\"\ncrossorigin=\"anonymous\"></script>\n+<script\n+ src=\"https://d3js.org/d3.v4.min.js\"\n+ integrity=\"sha384-1EOYqz4UgZkewWm70NbT1JBUXSQpOIS2AaJy6/evZH+lXOrt9ITSJbFctNeyBoIJ\"\n+ crossorigin=\"anonymous\"></script>\n{{ partial \"hooks/head-end.html\" . }}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "layouts/shortcodes/graph.html", "diff": "+<svg id=\"{{ .Get \"id\" }}\" width=500 height=200>\n+ <title>{{ .Get \"title\" }}</title>\n+</svg>\n+\n+<script type=\"text/javascript\">\n+d3.csv(\"{{ .Get \"url\" }}\", function(d, i, columns) {\n+ return d; // Transformed below.\n+}, function(error, data) {\n+ if (error) throw(error);\n+\n+ // Create a new data that pivots on runtime.\n+ //\n+ // To start, we have:\n+ // runtime, ..., result\n+ // runc, ..., 1\n+ // runsc, ..., 2\n+ //\n+ // In the end we want:\n+ // ..., runsc, runc\n+ // ..., 1, 2\n+\n+ // Filter by metric, if required.\n+ if (\"{{ .Get \"metric\" }}\" != \"\") {\n+ orig_columns = data.columns;\n+ data = data.filter(d => d.metric == \"{{ .Get \"metric\" }}\");\n+ data.columns = orig_columns;\n+ }\n+\n+ // Filter by method, if required.\n+ if (\"{{ .Get \"method\" }}\" != \"\") {\n+ orig_columns = data.columns;\n+ data = data.filter(d => d.method == \"{{ .Get \"method\" }}\");\n+ data.columns = orig_columns.filter(key => key != \"method\");\n+ }\n+\n+ // Enumerate runtimes.\n+ var runtimes = Array.from(new Set(data.map(d => d.runtime)));\n+ var metrics = Array.from(new Set(data.map(d => d.metric)));\n+ if (metrics.length < 1) {\n+ console.log(data);\n+ throw(\"need at least one metric\");\n+ } else if (metrics.length == 1) {\n+ metric = metrics[0];\n+ data.columns = data.columns.filter(key => key != \"metric\");\n+ } else {\n+ metric = \"\"; // Used for grouping.\n+ }\n+\n+ var isSubset = function(a, sup) {\n+ var ap = Object.getOwnPropertyNames(a);\n+ for (var i = 0; i < ap.length; i++) {\n+ if (a[ap[i]] !== sup[ap[i]]) {\n+ return false;\n+ }\n+ }\n+ return true;\n+ };\n+\n+ // Execute a pivot to include runtimes as attributes.\n+ var new_data = data.map(function(data_item) {\n+ // Generate a prototype data item.\n+ var proto_item = Object.assign({}, data_item);\n+ delete proto_item.runtime;\n+ delete proto_item.result;\n+ var next_item = Object.assign({}, proto_item);\n+\n+ // Find all matching runtime items.\n+ data.forEach(function(d) {\n+ if (isSubset(proto_item, d)) {\n+ // Add the result result.\n+ next_item[d.runtime] = d.result;\n+ }\n+ });\n+ return next_item;\n+ });\n+\n+ // Remove any duplication.\n+ new_data = Array.from(new Set(new_data));\n+ new_data.columns = data.columns;\n+ new_data.columns = new_data.columns.filter(key => key != \"runtime\" && key != \"result\");\n+ new_data.columns = new_data.columns.concat(runtimes);\n+ data = new_data;\n+\n+ // Slice based on the first key.\n+ if (data.columns.length != runtimes.length) {\n+ x0_key = new_data.columns[0];\n+ var x1_domain = data.columns.slice(1);\n+ } else {\n+ x0_key = \"runtime\";\n+ var x1_domain = runtimes;\n+ }\n+\n+ // Determine varaible margins.\n+ var x0_domain = data.map(d => d[x0_key]);\n+ var margin_bottom_pad = 0;\n+ if (x0_domain.length > 8) {\n+ margin_bottom_pad = 50;\n+ }\n+\n+ // Use log scale if required.\n+ var y_min = 0;\n+ if ({{ .Get \"log\" | default false }}) {\n+ // Need to cap lower end of the domain at 1.\n+ y_min = 1;\n+ }\n+\n+ var svg = d3.select(\"#{{ .Get \"id\" }}\"),\n+ margin = {top: 20, right: 20, bottom: 30 + margin_bottom_pad, left: 50},\n+ width = +svg.attr(\"width\") - margin.left - margin.right,\n+ height = +svg.attr(\"height\") - margin.top - margin.bottom,\n+ g = svg.append(\"g\").attr(\"transform\", \"translate(\" + margin.left + \",\" + margin.top + \")\");\n+\n+ var x0 = d3.scaleBand()\n+ .rangeRound([margin.left / 2, width - (4 * margin.right)])\n+ .paddingInner(0.1);\n+\n+ var x1 = d3.scaleBand()\n+ .padding(0.05);\n+\n+ var y = d3.scaleLinear()\n+ .rangeRound([height, 0]);\n+ if ({{ .Get \"log\" | default false }}) {\n+ y = d3.scaleLog()\n+ .rangeRound([height, 0]);\n+ }\n+\n+ var z = d3.scaleOrdinal()\n+ .range([\"#262362\", \"#FBB03B\", \"#286FD7\", \"#6b486b\"]);\n+\n+ // Set all domains.\n+ x0.domain(x0_domain);\n+ x1.domain(x1_domain).rangeRound([0, x0.bandwidth()]);\n+ y.domain([y_min, d3.max(data, d => d3.max(x1_domain, key => parseFloat(d[key])))]).nice();\n+\n+ // The data.\n+ g.append(\"g\")\n+ .selectAll(\"g\")\n+ .data(data)\n+ .enter().append(\"g\")\n+ .attr(\"transform\", function(d) { return \"translate(\" + x0(d[x0_key]) + \",0)\"; })\n+ .selectAll(\"rect\")\n+ .data(d => x1_domain.map(key => ({key, value: d[key]})))\n+ .enter().append(\"rect\")\n+ .attr(\"x\", d => x1(d.key))\n+ .attr(\"y\", d => y(d.value))\n+ .attr(\"width\", x1.bandwidth())\n+ .attr(\"height\", d => y(y_min) - y(d.value))\n+ .attr(\"fill\", d => z(d.key));\n+\n+ // X0 ticks and labels.\n+ var x0_axis = g.append(\"g\")\n+ .attr(\"class\", \"axis\")\n+ .attr(\"transform\", \"translate(0,\" + height + \")\")\n+ .call(d3.axisBottom(x0));\n+ if (x0_domain.length > 8) {\n+ x0_axis.selectAll(\"text\")\n+ .style(\"text-anchor\", \"end\")\n+ .attr(\"dx\", \"-.8em\")\n+ .attr(\"dy\", \".15em\")\n+ .attr(\"transform\", \"rotate(-65)\");\n+ }\n+\n+ // Y ticks and top-label.\n+ if (metric == \"default\") {\n+ metric = \"\"; // Don't display.\n+ }\n+ g.append(\"g\")\n+ .attr(\"class\", \"axis\")\n+ .call(d3.axisLeft(y).ticks(null, \"s\"))\n+ .append(\"text\")\n+ .attr(\"x\", -30.0)\n+ .attr(\"y\", y(y.ticks().pop()) - 10.0)\n+ .attr(\"dy\", \"0.32em\")\n+ .attr(\"fill\", \"#000\")\n+ .attr(\"font-weight\", \"bold\")\n+ .attr(\"text-anchor\", \"start\")\n+ .text(metric);\n+\n+ // The legend.\n+ var legend = g.append(\"g\")\n+ .attr(\"font-family\", \"sans-serif\")\n+ .attr(\"font-size\", 10)\n+ .attr(\"text-anchor\", \"end\")\n+ .selectAll(\"g\")\n+ .data(x1_domain.slice().reverse())\n+ .enter().append(\"g\")\n+ .attr(\"transform\", function(d, i) { return \"translate(0,\" + i * 20 + \")\"; });\n+ legend.append(\"rect\")\n+ .attr(\"x\", width - 19)\n+ .attr(\"width\", 19)\n+ .attr(\"height\", 19)\n+ .attr(\"fill\", z);\n+ legend.append(\"text\")\n+ .attr(\"x\", width - 24)\n+ .attr(\"y\", 9.5)\n+ .attr(\"dy\", \"0.32em\")\n+ .text(function(d) { return d; });\n+});\n+</script>\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/README.md", "diff": "+# Performance data\n+\n+This directory holds the CSVs generated by the\n+[benchmark-tools][benchmark-tools] repository.\n+\n+In the future, these will be automatically posted to a cloud storage bucket and\n+loaded dynamically. At that point, this directory will be removed.\n+\n+[benchmark-tools]: https://gvisor.googlesource.com/benchmark-tools\n" } ]
Go
Apache License 2.0
google/gvisor
Add performance guide.
259,975
26.04.2019 16:37:31
25,200
b4eea426529db8b386560f7301a09421fa7031ca
Adding data from working benchmarks
[ { "change_type": "ADD", "old_path": null, "new_path": "static/performance/density.csv", "diff": "+runtime,method,metric,result\n+runc,density.empty,memory_usage,2460794.88\n+runc,density.node,memory_usage,18692915.2\n+runc,density.ruby,memory_usage,18854543.36\n+runsc,density.empty,memory_usage,22531850.24\n+runsc,density.node,memory_usage,43994398.72\n+runsc,density.ruby,memory_usage,51410862.08\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/ffmpeg.csv", "diff": "+runtime,metric,result\n+runc,run_time,77.705753\n+runsc,run_time,84.245589\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/fio-tmpfs.csv", "diff": "+runtime,method,metric,result\n+runc,fio.read,bandwidth,4573005824\n+runc,fio.write,bandwidth,3253763072\n+runsc,fio.read,bandwidth,2572451840\n+runsc,fio.write,bandwidth,1125278720\n+runc,fio.randread,bandwidth,1139651584\n+runc,fio.randwrite,bandwidth,998289408\n+runsc,fio.randread,bandwidth,91157504\n+runsc,fio.randwrite,bandwidth,83028992\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/fio.csv", "diff": "+runtime,method,metric,result\n+runc,fio.read,bandwidth,252633088\n+runc,fio.write,bandwidth,457222144\n+runsc,fio.read,bandwidth,252442624\n+runsc,fio.write,bandwidth,436373504\n+runc,fio.randread,bandwidth,5781504\n+runc,fio.randwrite,bandwidth,109732864\n+runsc,fio.randread,bandwidth,4793344\n+runsc,fio.randwrite,bandwidth,60702720\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/httpd100k.csv", "diff": "+connections,runtime,metric,result\n+1,runc,transfer_rate,922.54\n+1,runc,latency,0.0\n+1,runsc,transfer_rate,402.11\n+1,runsc,latency,1.0\n+5,runc,transfer_rate,4278.39\n+5,runc,latency,1.0\n+5,runsc,transfer_rate,1205.09\n+5,runsc,latency,2.0\n+10,runc,transfer_rate,5170.11\n+10,runc,latency,1.0\n+10,runsc,transfer_rate,1390.26\n+10,runsc,latency,3.0\n+25,runc,transfer_rate,5435.27\n+25,runc,latency,2.0\n+25,runsc,transfer_rate,1067.27\n+25,runsc,latency,10.0\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/httpd10240k.csv", "diff": "+connections,runtime,metric,result\n+1,runc,transfer_rate,880.36\n+1,runc,latency,0.0\n+1,runsc,transfer_rate,404.93\n+1,runsc,latency,1.0\n+5,runc,transfer_rate,4263.6\n+5,runc,latency,1.0\n+5,runsc,transfer_rate,1211.68\n+5,runsc,latency,2.0\n+10,runc,transfer_rate,5172.49\n+10,runc,latency,1.0\n+10,runsc,transfer_rate,1338.98\n+10,runsc,latency,3.0\n+25,runc,transfer_rate,5485.12\n+25,runc,latency,2.0\n+25,runsc,transfer_rate,1077.82\n+25,runsc,latency,11.0\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/iperf.csv", "diff": "+runtime,method,metric,result\n+runc,network.download,bandwidth,769515500.0\n+runc,network.upload,bandwidth,790282500.0\n+runsc,network.download,bandwidth,711784500.0\n+runsc,network.upload,bandwidth,506225500.0\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/redis.csv", "diff": "+runtime,metric,result\n+runc,PING_INLINE,31328.32\n+runc,PING_BULK,30797.66\n+runc,SET,30854.68\n+runc,GET,30826.14\n+runc,INCR,31725.89\n+runc,LPUSH,31515.91\n+runc,RPUSH,31210.99\n+runc,LPOP,30637.26\n+runc,RPOP,30835.65\n+runc,SADD,31123.56\n+runc,HSET,30778.7\n+runc,SPOP,31017.37\n+runc,LRANGE_100,24654.83\n+runc,LRANGE_300,14692.92\n+runc,LRANGE_500,12253.4\n+runc,LRANGE_600,10389.61\n+runc,MSET,31535.79\n+runsc,PING_INLINE,14932.06\n+runsc,PING_BULK,15772.87\n+runsc,SET,15532.77\n+runsc,GET,15603.06\n+runsc,INCR,15542.43\n+runsc,LPUSH,15253.2\n+runsc,RPUSH,15332.72\n+runsc,LPOP,15391.72\n+runsc,RPOP,15408.32\n+runsc,SADD,15520.72\n+runsc,HSET,15283.51\n+runsc,SPOP,15644.56\n+runsc,LRANGE_100,13635.12\n+runsc,LRANGE_300,9993.0\n+runsc,LRANGE_500,8588.11\n+runsc,LRANGE_600,7231.18\n+runsc,MSET,14669.21\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/startup.csv", "diff": "+runtime,method,metric,result\n+runc,startup.empty,startup_time_ms,1114.56914\n+runc,startup.node,startup_time_ms,2311.1579199999996\n+runc,startup.ruby,startup_time_ms,2297.36332\n+runsc,startup.empty,startup_time_ms,1071.5360200000002\n+runsc,startup.node,startup_time_ms,2308.90646\n+runsc,startup.ruby,startup_time_ms,2336.6960599999998\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/sysbench-cpu.csv", "diff": "+runtime,metric,result\n+runc,cpu_events_per_second,104.35\n+runsc,cpu_events_per_second,103.86\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/sysbench-memory.csv", "diff": "+runtime,metric,result\n+runc,memory_ops_per_second,13274.43\n+runsc,memory_ops_per_second,13204.74\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/syscall.csv", "diff": "+runtime,metric,result\n+runc,syscall_time_ns,1929.0\n+runsc,syscall_time_ns,36011.0\n+runsc-kvm,syscall_time_ns,754.0\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/performance/tensorflow.csv", "diff": "+runtime,metric,result\n+runc,run_time,192.232783\n+runsc,run_time,223.366044\n" } ]
Go
Apache License 2.0
google/gvisor
Adding data from working benchmarks
259,975
01.05.2019 12:20:26
25,200
b879aa444d0a2a87c892296ed5f7ec62842da6eb
Update applications numbers after node/ruby refactor
[ { "change_type": "ADD", "old_path": null, "new_path": "static/performance/applications.csv", "diff": "+runtime,method,metric,result\n+runc,http.node,transfer_rate,3880.44\n+runc,http.node,latency,11.0\n+runc,http.node,requests_per_second,901.04\n+runc,http.ruby,transfer_rate,3182.51\n+runc,http.ruby,latency,17.0\n+runc,http.ruby,requests_per_second,597.85\n+runsc,http.node,transfer_rate,1617.49\n+runsc,http.node,latency,27.0\n+runsc,http.node,requests_per_second,375.81\n+runsc,http.ruby,transfer_rate,1414.86\n+runsc,http.ruby,latency,38.0\n+runsc,http.ruby,requests_per_second,265.79\n" } ]
Go
Apache License 2.0
google/gvisor
Update applications numbers after node/ruby refactor
259,975
02.05.2019 10:02:44
25,200
07c8695865899ef7b60c92cb861e500dfc65c3ff
Update fio to display in log scale
[ { "change_type": "MODIFY", "old_path": "content/docs/architecture_guide/performance.md", "new_path": "content/docs/architecture_guide/performance.md", "diff": "@@ -205,13 +205,13 @@ through the [Gofer](../) as a result of our [security model](../security/), but\nin most cases are dominated by **implementation costs**, due to an internal\n[Virtual File System][vfs] (VFS) implementation the needs improvement.\n-{{< graph id=\"fio-bw\" url=\"/performance/fio.csv\" title=\"perf.py fio --engine=sync --runtime=runc --runtime=runsc\" >}}\n+{{< graph id=\"fio-bw\" url=\"/performance/fio.csv\" title=\"perf.py fio --engine=sync --runtime=runc --runtime=runsc\" log=\"true\" >}}\nThe above figures demonstrate the results of `fio` for reads and writes to and\nfrom the disk. In this case, the disk quickly becomes the bottleneck and\ndominates other costs.\n-{{< graph id=\"fio-tmpfs-bw\" url=\"/performance/fio-tmpfs.csv\" title=\"perf.py fio --engine=sync --runtime=runc --tmpfs=True --runtime=runsc\" >}}\n+{{< graph id=\"fio-tmpfs-bw\" url=\"/performance/fio-tmpfs.csv\" title=\"perf.py fio --engine=sync --runtime=runc --tmpfs=True --runtime=runsc\" log=\"true\" >}}\nThe above figure shows the raw I/O performance of using a `tmpfs` mount which is\nsandbox-internal in the case of `runsc`. Generally these operations are\n" } ]
Go
Apache License 2.0
google/gvisor
Update fio to display in log scale
259,858
07.05.2019 13:26:42
25,200
e86a939edf3a4aaff8c11f70c3b7acb44eb7cd7a
Add clarifying notes.
[ { "change_type": "MODIFY", "old_path": "content/docs/architecture_guide/performance.md", "new_path": "content/docs/architecture_guide/performance.md", "diff": "@@ -92,6 +92,11 @@ This test is the result of running many instances of a container (typically 50)\nand calculating available memory on the host before and afterwards, and dividing\nthe difference by the number of containers.\n+> Note: the above technique is used for measuring memory usage over the\n+> `usage_in_bytes` value of the container cgroup because we found that some\n+> container runtimes, other than `runc` and `runsc` do not use an individual\n+> container cgroup.\n+\nThe first application is an instance of `sleep`: a trivial application that does\nnothing. The second application is a synthetic `node` application which imports\na number of modules and listens for requests. The third application is a similar\n@@ -107,8 +112,8 @@ imposed for CPU operations.\nThe above figure demonstrates the `sysbench` measurement of CPU events per\nsecond. Events per second is based on a CPU-bound loop that calculates all prime\n-numbers in a specified range. We note that `runsc` does not impose substantial\n-degradation, as the code is executing natively in both cases.\n+numbers in a specified range. We note that `runsc` does not impose a performance\n+penalty, as the code is executing natively in both cases.\nThis has important consequences for classes of workloads that are often\nCPU-bound, such as data processing or machine learning. In these cases, `runsc`\n@@ -168,6 +173,11 @@ loads a number of modules and binds an HTTP server. The time is measured by a\nsuccessful request to the bound port. Finally, a `ruby` application that\nsimilarly loads a number of modules and binds an HTTP server.\n+> Note: most of the time overhead above is associated Docker itself. This is\n+> evident with the empty `runc` benchmark. To avoid these costs with `runsc`,\n+> you may also consider using `runsc do` mode or invoking the [OCI\n+> runtime](../../user_guide/oci) directly.\n+\n## Network\nNetworking is mostly bound by **implementation costs**, and gVisor's network stack\n@@ -225,8 +235,8 @@ such operations in the hot path for serviing requests, for example. The above\nfigure shows the result of using gVisor to serve small pieces of static content\nwith predictably poor results. This workload represents `apache` serving a\nsingle file sized 100k to a client running [ApacheBench][ab] with varying levels\n-of concurrency. The high overhead comes principles from a VFS implementation\n-needs improvement, with several internal serialization points (since all\n+of concurrency. The high overhead comes principally from the VFS implementation\n+that needs improvement, with several internal serialization points (since all\nrequests are reading the same file). Note that some of some of network stack\nperformance issues also impact this benchmark.\n" } ]
Go
Apache License 2.0
google/gvisor
Add clarifying notes.
259,858
13.05.2019 14:50:57
25,200
5dcfe3c758e9b2ca0c87d6bb843094b083a9515b
Clarify sizes and file locations.
[ { "change_type": "MODIFY", "old_path": "content/docs/architecture_guide/performance.md", "new_path": "content/docs/architecture_guide/performance.md", "diff": "@@ -235,17 +235,18 @@ The high costs of VFS operations can manifest in benchmarks that execute many\nsuch operations in the hot path for serviing requests, for example. The above\nfigure shows the result of using gVisor to serve small pieces of static content\nwith predictably poor results. This workload represents `apache` serving a\n-single file sized 100k to a client running [ApacheBench][ab] with varying levels\n-of concurrency. The high overhead comes principally from the VFS implementation\n-that needs improvement, with several internal serialization points (since all\n-requests are reading the same file). Note that some of some of network stack\n-performance issues also impact this benchmark.\n+single file sized 100k from the container image to a client running\n+[ApacheBench][ab] with varying levels of concurrency. The high overhead comes\n+principally from the VFS implementation that needs improvement, with several\n+internal serialization points (since all requests are reading the same file).\n+Note that some of some of network stack performance issues also impact this\n+benchmark.\n{{< graph id=\"ffmpeg\" url=\"/performance/ffmpeg.csv\" title=\"perf.py media.ffmpeg --runtime=runc --runtime=runsc\" >}}\nFor benchmarks that are bound by raw disk I/O and a mix of compute, file system\noperations are less of an issue. The above figure shows the total time required\n-for an `ffmpeg` container to start, load and transcode an input video.\n+for an `ffmpeg` container to start, load and transcode a 27MB input video.\n[ab]: https://en.wikipedia.org/wiki/ApacheBench\n[benchmark-tools]: https://gvisor.googlesource.com/benchmark-tools\n" } ]
Go
Apache License 2.0
google/gvisor
Clarify sizes and file locations.
259,975
13.05.2019 15:03:34
25,200
5b3f25047fe8dd4d04262ea56c38a134291d0067
Upload latest set of benchmarks Also includes redis density benchmark.
[ { "change_type": "MODIFY", "old_path": "content/docs/architecture_guide/performance.md", "new_path": "content/docs/architecture_guide/performance.md", "diff": "@@ -85,7 +85,7 @@ For many use cases, fixed memory overheads are a primary concern. This may be\nbecause sandboxed containers handle a low volume of requests, and it is\ntherefore important to achieve high densities for efficiency.\n-{{< graph id=\"density\" url=\"/performance/density.csv\" title=\"perf.py density --runtime=runc --runtime=runsc\" >}}\n+{{< graph id=\"density\" url=\"/performance/density.csv\" title=\"perf.py density --runtime=runc --runtime=runsc\" log=\"true\" y_min=\"100000\">}}\nThe above figure demonstrates these costs based on three sample applications.\nThis test is the result of running many instances of a container (typically 50)\n@@ -134,7 +134,7 @@ supports a variety of platforms. These platforms present distinct performance,\ncompatibility and security trade-offs. For example, the KVM platform has low\noverhead system call interception but runs poorly with nested virtualization.\n-{{< graph id=\"syscall\" url=\"/performance/syscall.csv\" title=\"perf.py syscall --runtime=runc --runtime=runsc-ptrace --runtime=runsc-kvm\" log=\"true\" >}}\n+{{< graph id=\"syscall\" url=\"/performance/syscall.csv\" title=\"perf.py syscall --runtime=runc --runtime=runsc-ptrace --runtime=runsc-kvm\" y_min=\"100\" log=\"true\" >}}\nThe above figure demonstrates the time required for a raw system call on various\nplatforms. The test is implemented by a custom binary which performs a large\n" }, { "change_type": "MODIFY", "old_path": "layouts/shortcodes/graph.html", "new_path": "layouts/shortcodes/graph.html", "diff": "@@ -104,6 +104,10 @@ d3.csv(\"{{ .Get \"url\" }}\", function(d, i, columns) {\ny_min = 1;\n}\n+ if ({{ .Get \"y_min\" | default false }}) {\n+ y_min = \"{{ .Get \"y_min\" }}\";\n+ }\n+\nvar svg = d3.select(\"#{{ .Get \"id\" }}\"),\nmargin = {top: 20, right: 20, bottom: 30 + margin_bottom_pad, left: 50},\nwidth = +svg.attr(\"width\") - margin.left - margin.right,\n" }, { "change_type": "MODIFY", "old_path": "static/performance/applications.csv", "new_path": "static/performance/applications.csv", "diff": "runtime,method,metric,result\n-runc,http.node,transfer_rate,3880.44\n+runc,http.node,transfer_rate,3814.85\nrunc,http.node,latency,11.0\n-runc,http.node,requests_per_second,901.04\n-runc,http.ruby,transfer_rate,3182.51\n-runc,http.ruby,latency,17.0\n-runc,http.ruby,requests_per_second,597.85\n-runsc,http.node,transfer_rate,1617.49\n+runc,http.node,requests_per_second,885.81\n+runc,http.ruby,transfer_rate,2874.38\n+runc,http.ruby,latency,18.0\n+runc,http.ruby,requests_per_second,539.97\n+runsc,http.node,transfer_rate,1615.54\nrunsc,http.node,latency,27.0\n-runsc,http.node,requests_per_second,375.81\n-runsc,http.ruby,transfer_rate,1414.86\n+runsc,http.node,requests_per_second,375.13\n+runsc,http.ruby,transfer_rate,1382.71\nrunsc,http.ruby,latency,38.0\n-runsc,http.ruby,requests_per_second,265.79\n+runsc,http.ruby,requests_per_second,259.75\n" }, { "change_type": "MODIFY", "old_path": "static/performance/density.csv", "new_path": "static/performance/density.csv", "diff": "runtime,method,metric,result\n-runc,density.empty,memory_usage,2460794.88\n-runc,density.node,memory_usage,18692915.2\n-runc,density.ruby,memory_usage,18854543.36\n-runsc,density.empty,memory_usage,22531850.24\n-runsc,density.node,memory_usage,43994398.72\n-runsc,density.ruby,memory_usage,51410862.08\n+runc,density.empty,memory_usage,4092149.76\n+runc,density.node,memory_usage,76709888.0\n+runc,density.ruby,memory_usage,45737000.96\n+runsc,density.empty,memory_usage,23695032.32\n+runsc,density.node,memory_usage,124076605.44\n+runsc,density.ruby,memory_usage,106141777.92\n+runc,density.redis,memory_usage,1055323750.4\n+runsc,density.redis,memory_usage,1076686028.8\n" }, { "change_type": "MODIFY", "old_path": "static/performance/ffmpeg.csv", "new_path": "static/performance/ffmpeg.csv", "diff": "runtime,metric,result\n-runc,run_time,77.705753\n-runsc,run_time,84.245589\n+runc,run_time,82.000625\n+runsc,run_time,88.24018\n" }, { "change_type": "MODIFY", "old_path": "static/performance/fio-tmpfs.csv", "new_path": "static/performance/fio-tmpfs.csv", "diff": "runtime,method,metric,result\n-runc,fio.read,bandwidth,4573005824\n-runc,fio.write,bandwidth,3253763072\n-runsc,fio.read,bandwidth,2572451840\n-runsc,fio.write,bandwidth,1125278720\n-runc,fio.randread,bandwidth,1139651584\n-runc,fio.randwrite,bandwidth,998289408\n-runsc,fio.randread,bandwidth,91157504\n-runsc,fio.randwrite,bandwidth,83028992\n+runc,fio.read,bandwidth,4240686080\n+runc,fio.write,bandwidth,3029744640\n+runsc,fio.read,bandwidth,2533604352\n+runsc,fio.write,bandwidth,1207536640\n+runc,fio.randread,bandwidth,1221472256\n+runc,fio.randwrite,bandwidth,1046094848\n+runsc,fio.randread,bandwidth,68940800\n+runsc,fio.randwrite,bandwidth,67286016\n" }, { "change_type": "MODIFY", "old_path": "static/performance/fio.csv", "new_path": "static/performance/fio.csv", "diff": "runtime,method,metric,result\n-runc,fio.read,bandwidth,252633088\n-runc,fio.write,bandwidth,457222144\n-runsc,fio.read,bandwidth,252442624\n-runsc,fio.write,bandwidth,436373504\n-runc,fio.randread,bandwidth,5781504\n-runc,fio.randwrite,bandwidth,109732864\n-runsc,fio.randread,bandwidth,4793344\n-runsc,fio.randwrite,bandwidth,60702720\n+runc,fio.read,bandwidth,252253184\n+runc,fio.write,bandwidth,457767936\n+runsc,fio.read,bandwidth,252323840\n+runsc,fio.write,bandwidth,431845376\n+runc,fio.randread,bandwidth,5284864\n+runc,fio.randwrite,bandwidth,107758592\n+runsc,fio.randread,bandwidth,4403200\n+runsc,fio.randwrite,bandwidth,69161984\n" }, { "change_type": "MODIFY", "old_path": "static/performance/httpd100k.csv", "new_path": "static/performance/httpd100k.csv", "diff": "connections,runtime,metric,result\n-1,runc,transfer_rate,922.54\n-1,runc,latency,0.0\n-1,runsc,transfer_rate,402.11\n-1,runsc,latency,1.0\n-5,runc,transfer_rate,4278.39\n+1,runc,transfer_rate,565.35\n+1,runc,latency,1.0\n+1,runsc,transfer_rate,282.84\n+1,runsc,latency,2.0\n+5,runc,transfer_rate,3260.57\n5,runc,latency,1.0\n-5,runsc,transfer_rate,1205.09\n-5,runsc,latency,2.0\n-10,runc,transfer_rate,5170.11\n+5,runsc,transfer_rate,832.69\n+5,runsc,latency,3.0\n+10,runc,transfer_rate,4672.01\n10,runc,latency,1.0\n-10,runsc,transfer_rate,1390.26\n-10,runsc,latency,3.0\n-25,runc,transfer_rate,5435.27\n+10,runsc,transfer_rate,1095.47\n+10,runsc,latency,4.0\n+25,runc,transfer_rate,4964.14\n25,runc,latency,2.0\n-25,runsc,transfer_rate,1067.27\n-25,runsc,latency,10.0\n+25,runsc,transfer_rate,961.03\n+25,runsc,latency,12.0\n" }, { "change_type": "MODIFY", "old_path": "static/performance/httpd10240k.csv", "new_path": "static/performance/httpd10240k.csv", "diff": "connections,runtime,metric,result\n-1,runc,transfer_rate,880.36\n-1,runc,latency,0.0\n-1,runsc,transfer_rate,404.93\n-1,runsc,latency,1.0\n-5,runc,transfer_rate,4263.6\n+1,runc,transfer_rate,674.05\n+1,runc,latency,1.0\n+1,runsc,transfer_rate,243.35\n+1,runsc,latency,2.0\n+5,runc,transfer_rate,3089.83\n5,runc,latency,1.0\n-5,runsc,transfer_rate,1211.68\n+5,runsc,transfer_rate,981.91\n5,runsc,latency,2.0\n-10,runc,transfer_rate,5172.49\n+10,runc,transfer_rate,4701.2\n10,runc,latency,1.0\n-10,runsc,transfer_rate,1338.98\n-10,runsc,latency,3.0\n-25,runc,transfer_rate,5485.12\n+10,runsc,transfer_rate,1135.08\n+10,runsc,latency,4.0\n+25,runc,transfer_rate,5021.36\n25,runc,latency,2.0\n-25,runsc,transfer_rate,1077.82\n-25,runsc,latency,11.0\n+25,runsc,transfer_rate,963.26\n+25,runsc,latency,12.0\n" }, { "change_type": "MODIFY", "old_path": "static/performance/iperf.csv", "new_path": "static/performance/iperf.csv", "diff": "runtime,method,metric,result\n-runc,network.download,bandwidth,769515500.0\n-runc,network.upload,bandwidth,790282500.0\n-runsc,network.download,bandwidth,711784500.0\n-runsc,network.upload,bandwidth,506225500.0\n+runc,network.download,bandwidth,746386000.0\n+runc,network.upload,bandwidth,709808000.0\n+runsc,network.download,bandwidth,640303500.0\n+runsc,network.upload,bandwidth,482254000.0\n" }, { "change_type": "MODIFY", "old_path": "static/performance/redis.csv", "new_path": "static/performance/redis.csv", "diff": "runtime,metric,result\n-runc,PING_INLINE,31328.32\n-runc,PING_BULK,30797.66\n-runc,SET,30854.68\n-runc,GET,30826.14\n-runc,INCR,31725.89\n-runc,LPUSH,31515.91\n-runc,RPUSH,31210.99\n-runc,LPOP,30637.26\n-runc,RPOP,30835.65\n-runc,SADD,31123.56\n-runc,HSET,30778.7\n-runc,SPOP,31017.37\n-runc,LRANGE_100,24654.83\n-runc,LRANGE_300,14692.92\n-runc,LRANGE_500,12253.4\n-runc,LRANGE_600,10389.61\n-runc,MSET,31535.79\n-runsc,PING_INLINE,14932.06\n-runsc,PING_BULK,15772.87\n-runsc,SET,15532.77\n-runsc,GET,15603.06\n-runsc,INCR,15542.43\n-runsc,LPUSH,15253.2\n-runsc,RPUSH,15332.72\n-runsc,LPOP,15391.72\n-runsc,RPOP,15408.32\n-runsc,SADD,15520.72\n-runsc,HSET,15283.51\n-runsc,SPOP,15644.56\n-runsc,LRANGE_100,13635.12\n-runsc,LRANGE_300,9993.0\n-runsc,LRANGE_500,8588.11\n-runsc,LRANGE_600,7231.18\n-runsc,MSET,14669.21\n+runc,PING_INLINE,30525.03\n+runc,PING_BULK,30293.85\n+runc,SET,30257.19\n+runc,GET,30312.21\n+runc,INCR,30525.03\n+runc,LPUSH,30712.53\n+runc,RPUSH,30459.95\n+runc,LPOP,30367.45\n+runc,RPOP,30665.44\n+runc,SADD,30030.03\n+runc,HSET,30656.04\n+runc,SPOP,29940.12\n+runc,LRANGE_100,24224.81\n+runc,LRANGE_300,14302.06\n+runc,LRANGE_500,11728.83\n+runc,LRANGE_600,9900.99\n+runc,MSET,30120.48\n+runsc,PING_INLINE,14528.55\n+runsc,PING_BULK,15627.44\n+runsc,SET,15403.57\n+runsc,GET,15325.67\n+runsc,INCR,15269.51\n+runsc,LPUSH,15172.2\n+runsc,RPUSH,15117.16\n+runsc,LPOP,15257.86\n+runsc,RPOP,15188.33\n+runsc,SADD,15432.1\n+runsc,HSET,15163.0\n+runsc,SPOP,15561.78\n+runsc,LRANGE_100,13365.41\n+runsc,LRANGE_300,9520.18\n+runsc,LRANGE_500,8248.78\n+runsc,LRANGE_600,6544.07\n+runsc,MSET,14367.82\n" }, { "change_type": "MODIFY", "old_path": "static/performance/startup.csv", "new_path": "static/performance/startup.csv", "diff": "runtime,method,metric,result\n-runc,startup.empty,startup_time_ms,1114.56914\n-runc,startup.node,startup_time_ms,2311.1579199999996\n-runc,startup.ruby,startup_time_ms,2297.36332\n-runsc,startup.empty,startup_time_ms,1071.5360200000002\n-runsc,startup.node,startup_time_ms,2308.90646\n-runsc,startup.ruby,startup_time_ms,2336.6960599999998\n+runc,startup.empty,startup_time_ms,1193.10768\n+runc,startup.node,startup_time_ms,2557.95336\n+runc,startup.ruby,startup_time_ms,2530.12624\n+runsc,startup.empty,startup_time_ms,1144.1775\n+runsc,startup.node,startup_time_ms,2441.90284\n+runsc,startup.ruby,startup_time_ms,2455.69882\n" }, { "change_type": "MODIFY", "old_path": "static/performance/sysbench-cpu.csv", "new_path": "static/performance/sysbench-cpu.csv", "diff": "runtime,metric,result\n-runc,cpu_events_per_second,104.35\n-runsc,cpu_events_per_second,103.86\n+runc,cpu_events_per_second,103.62\n+runsc,cpu_events_per_second,103.21\n" }, { "change_type": "MODIFY", "old_path": "static/performance/sysbench-memory.csv", "new_path": "static/performance/sysbench-memory.csv", "diff": "runtime,metric,result\n-runc,memory_ops_per_second,13274.43\n-runsc,memory_ops_per_second,13204.74\n+runc,memory_ops_per_second,13098.73\n+runsc,memory_ops_per_second,13107.44\n" }, { "change_type": "MODIFY", "old_path": "static/performance/syscall.csv", "new_path": "static/performance/syscall.csv", "diff": "runtime,metric,result\n-runc,syscall_time_ns,1929.0\n-runsc,syscall_time_ns,36011.0\n-runsc-kvm,syscall_time_ns,754.0\n+runc,syscall_time_ns,1939.0\n+runsc,syscall_time_ns,38219.0\n+runsc-kvm,syscall_time_ns,763.0\n+runsc-kvm-bare,syscall_time_ns,455.0\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "static/performance/tensorflow.csv", "new_path": "static/performance/tensorflow.csv", "diff": "runtime,metric,result\n-runc,run_time,192.232783\n-runsc,run_time,223.366044\n+runc,run_time,207.1118165\n+runsc,run_time,244.473401\n" } ]
Go
Apache License 2.0
google/gvisor
Upload latest set of benchmarks Also includes redis density benchmark.
259,858
13.05.2019 15:12:03
25,200
9ea68ce1655ddadc8e3ca4c65fd2922d2564b33d
Drop confusing kvm-bare syscall time The difference is only due to different machines.
[ { "change_type": "MODIFY", "old_path": "content/docs/architecture_guide/performance.md", "new_path": "content/docs/architecture_guide/performance.md", "diff": "@@ -90,17 +90,16 @@ therefore important to achieve high densities for efficiency.\nThe above figure demonstrates these costs based on three sample applications.\nThis test is the result of running many instances of a container (typically 50)\nand calculating available memory on the host before and afterwards, and dividing\n-the difference by the number of containers.\n-\n-> Note: the above technique is used for measuring memory usage over the\n-> `usage_in_bytes` value of the container cgroup because we found that some\n-> container runtimes, other than `runc` and `runsc` do not use an individual\n-> container cgroup.\n+the difference by the number of containers. This technique is used for measuring\n+memory usage over the `usage_in_bytes` value of the container cgroup because we\n+found that some container runtimes, other than `runc` and `runsc`, do not use an\n+individual container cgroup.\nThe first application is an instance of `sleep`: a trivial application that does\nnothing. The second application is a synthetic `node` application which imports\na number of modules and listens for requests. The third application is a similar\n-synthetic `ruby` application which does the same. In all cases, the sandbox\n+synthetic `ruby` application which does the same. Finally, we include an\n+instance of `redis` storing approximately 1GB of data. In all cases, the sandbox\nitself is responsible for a small, mostly fixed amount of memory overhead.\n## CPU performance\n" }, { "change_type": "MODIFY", "old_path": "static/performance/syscall.csv", "new_path": "static/performance/syscall.csv", "diff": "@@ -2,4 +2,3 @@ runtime,metric,result\nrunc,syscall_time_ns,1939.0\nrunsc,syscall_time_ns,38219.0\nrunsc-kvm,syscall_time_ns,763.0\n-runsc-kvm-bare,syscall_time_ns,455.0\n\\ No newline at end of file\n" } ]
Go
Apache License 2.0
google/gvisor
Drop confusing kvm-bare syscall time The difference is only due to different machines.
259,858
13.05.2019 15:20:45
25,200
ad7ef8410f908aa8b1286f9796692c5aaeb68447
Fixup redis container count.
[ { "change_type": "MODIFY", "old_path": "content/docs/architecture_guide/performance.md", "new_path": "content/docs/architecture_guide/performance.md", "diff": "@@ -88,12 +88,12 @@ therefore important to achieve high densities for efficiency.\n{{< graph id=\"density\" url=\"/performance/density.csv\" title=\"perf.py density --runtime=runc --runtime=runsc\" log=\"true\" y_min=\"100000\" >}}\nThe above figure demonstrates these costs based on three sample applications.\n-This test is the result of running many instances of a container (typically 50)\n-and calculating available memory on the host before and afterwards, and dividing\n-the difference by the number of containers. This technique is used for measuring\n-memory usage over the `usage_in_bytes` value of the container cgroup because we\n-found that some container runtimes, other than `runc` and `runsc`, do not use an\n-individual container cgroup.\n+This test is the result of running many instances of a container (50, or 5 in\n+the case of redis) and calculating available memory on the host before and\n+afterwards, and dividing the difference by the number of containers. This\n+technique is used for measuring memory usage over the `usage_in_bytes` value of\n+the container cgroup because we found that some container runtimes, other than\n+`runc` and `runsc`, do not use an individual container cgroup.\nThe first application is an instance of `sleep`: a trivial application that does\nnothing. The second application is a synthetic `node` application which imports\n" } ]
Go
Apache License 2.0
google/gvisor
Fixup redis container count.
259,853
14.05.2019 16:00:53
25,200
fff21b99e45136510a0148eada57ff28966dc27e
kokoro: run tests with a default docker container runtime We want to know that our environment set up properly and docker tests pass with a native runtime.
[ { "change_type": "MODIFY", "old_path": "kokoro/run_tests.sh", "new_path": "kokoro/run_tests.sh", "diff": "@@ -142,10 +142,21 @@ EOF\nrun_docker_tests() {\ncd ${WORKSPACE_DIR}\n+ # Run tests with a default runtime (runc).\n+ bazel test \\\n+ \"${BAZEL_BUILD_FLAGS[@]}\" \\\n+ --test_env=RUNSC_RUNTIME=\"\" \\\n+ --test_output=all \\\n+ //runsc/test/image:image_test\n+\n# These names are used to exclude tests not supported in certain\n# configuration, e.g. save/restore not supported with hostnet.\ndeclare -a variations=(\"\" \"-kvm\" \"-hostnet\" \"-overlay\")\nfor v in \"${variations[@]}\"; do\n+ # FIXME(b/132073574): we need to flush arp tables, otherwise tests fail with\n+ # timeout.\n+ sudo ip neigh show\n+ sudo ip neigh flush dev docker0\n# Run runsc tests with docker that are tagged manual.\nbazel test \\\n\"${BAZEL_BUILD_FLAGS[@]}\" \\\n" }, { "change_type": "MODIFY", "old_path": "runsc/test/testutil/docker.go", "new_path": "runsc/test/testutil/docker.go", "diff": "@@ -31,8 +31,8 @@ import (\n)\nfunc getRuntime() string {\n- r := os.Getenv(\"RUNSC_RUNTIME\")\n- if r == \"\" {\n+ r, ok := os.LookupEnv(\"RUNSC_RUNTIME\")\n+ if !ok {\nreturn \"runsc-test\"\n}\nreturn r\n" } ]
Go
Apache License 2.0
google/gvisor
kokoro: run tests with a default docker container runtime We want to know that our environment set up properly and docker tests pass with a native runtime. PiperOrigin-RevId: 248229294 Change-Id: I06c221e5eeed6e01bdd1aa935333c57e8eadc498
259,853
15.05.2019 11:10:56
25,200
85380ff03d21da417ad74d28b293c768d7effb4f
gvisor/runsc: use a veth link address instead of generating a new one
[ { "change_type": "MODIFY", "old_path": "kokoro/run_tests.sh", "new_path": "kokoro/run_tests.sh", "diff": "@@ -153,10 +153,6 @@ run_docker_tests() {\n# configuration, e.g. save/restore not supported with hostnet.\ndeclare -a variations=(\"\" \"-kvm\" \"-hostnet\" \"-overlay\")\nfor v in \"${variations[@]}\"; do\n- # FIXME(b/132073574): we need to flush arp tables, otherwise tests fail with\n- # timeout.\n- sudo ip neigh show\n- sudo ip neigh flush dev docker0\n# Run runsc tests with docker that are tagged manual.\nbazel test \\\n\"${BAZEL_BUILD_FLAGS[@]}\" \\\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/network.go", "new_path": "runsc/boot/network.go", "diff": "@@ -16,7 +16,6 @@ package boot\nimport (\n\"fmt\"\n- \"math/rand\"\n\"net\"\n\"syscall\"\n@@ -57,6 +56,7 @@ type FDBasedLink struct {\nAddresses []net.IP\nRoutes []Route\nGSOMaxSize uint32\n+ LinkAddress []byte\n}\n// LoopbackLink configures a loopback li nk.\n@@ -134,7 +134,7 @@ func (n *Network) CreateLinksAndRoutes(args *CreateLinksAndRoutesArgs, _ *struct\nreturn fmt.Errorf(\"failed to dup FD %v: %v\", oldFD, err)\n}\n- mac := tcpip.LinkAddress(generateRndMac())\n+ mac := tcpip.LinkAddress(link.LinkAddress)\nlinkEP, err := fdbased.New(&fdbased.Options{\nFD: newFD,\nMTU: uint32(link.MTU),\n@@ -220,13 +220,3 @@ func ipToAddressMask(ip net.IP) tcpip.AddressMask {\n_, addr := ipToAddressAndProto(ip)\nreturn tcpip.AddressMask(addr)\n}\n-\n-// generateRndMac returns a random local MAC address.\n-// Copied from eth_random_addr() (include/linux/etherdevice.h)\n-func generateRndMac() net.HardwareAddr {\n- mac := make(net.HardwareAddr, 6)\n- rand.Read(mac)\n- mac[0] &^= 0x1 // clear multicast bit\n- mac[0] |= 0x2 // set local assignment bit (IEEE802)\n- return mac\n-}\n" }, { "change_type": "MODIFY", "old_path": "runsc/sandbox/network.go", "new_path": "runsc/sandbox/network.go", "diff": "@@ -246,6 +246,7 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, enableGSO\nif err != nil {\nreturn fmt.Errorf(\"getting link for interface %q: %v\", iface.Name, err)\n}\n+ link.LinkAddress = []byte(ifaceLink.Attrs().HardwareAddr)\nif enableGSO {\ngso, err := isGSOEnabled(fd, iface.Name)\n" } ]
Go
Apache License 2.0
google/gvisor
gvisor/runsc: use a veth link address instead of generating a new one PiperOrigin-RevId: 248367340 Change-Id: Id792afcfff9c9d2cfd62cae21048316267b4a924
259,992
15.05.2019 14:35:30
25,200
ecb0f00e10017e82698c326b4d83294c9e20dfbd
Cleanup around urpc file payload handling urpc always closes all files once the RPC function returns.
[ { "change_type": "MODIFY", "old_path": "runsc/boot/controller.go", "new_path": "runsc/boot/controller.go", "diff": "@@ -211,12 +211,6 @@ type StartArgs struct {\nfunc (cm *containerManager) Start(args *StartArgs, _ *struct{}) error {\nlog.Debugf(\"containerManager.Start: %+v\", args)\n- defer func() {\n- for _, f := range args.FilePayload.Files {\n- f.Close()\n- }\n- }()\n-\n// Validate arguments.\nif args == nil {\nreturn errors.New(\"start missing arguments\")\n@@ -305,21 +299,19 @@ type RestoreOpts struct {\nfunc (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\nlog.Debugf(\"containerManager.Restore\")\n- var specFile *os.File\n- deviceFD := -1\n+ var specFile, deviceFile *os.File\nswitch numFiles := len(o.FilePayload.Files); numFiles {\ncase 2:\n- var err error\n// The device file is donated to the platform.\n// Can't take ownership away from os.File. dup them to get a new FD.\n- deviceFD, err = syscall.Dup(int(o.FilePayload.Files[1].Fd()))\n+ fd, err := syscall.Dup(int(o.FilePayload.Files[1].Fd()))\nif err != nil {\nreturn fmt.Errorf(\"failed to dup file: %v\", err)\n}\n+ deviceFile = os.NewFile(uintptr(fd), \"platform device\")\nfallthrough\ncase 1:\nspecFile = o.FilePayload.Files[0]\n- defer specFile.Close()\ncase 0:\nreturn fmt.Errorf(\"at least one file must be passed to Restore\")\ndefault:\n@@ -331,7 +323,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\ncm.l.k.Pause()\ncm.l.k.Destroy()\n- p, err := createPlatform(cm.l.conf, deviceFD)\n+ p, err := createPlatform(cm.l.conf, deviceFile)\nif err != nil {\nreturn fmt.Errorf(\"creating platform: %v\", err)\n}\n@@ -357,7 +349,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\nif eps, ok := networkStack.(*epsocket.Stack); ok {\nstack.StackFromEnv = eps.Stack // FIXME(b/36201077)\n}\n- info, err := o.FilePayload.Files[0].Stat()\n+ info, err := specFile.Stat()\nif err != nil {\nreturn err\n}\n@@ -366,9 +358,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\n}\n// Load the state.\n- loadOpts := state.LoadOpts{\n- Source: o.FilePayload.Files[0],\n- }\n+ loadOpts := state.LoadOpts{Source: specFile}\nif err := loadOpts.Load(k, networkStack); err != nil {\nreturn err\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -152,8 +152,8 @@ type Args struct {\nConf *Config\n// ControllerFD is the FD to the URPC controller.\nControllerFD int\n- // DeviceFD is an optional argument that is passed to the platform.\n- DeviceFD int\n+ // Device is an optional argument that is passed to the platform.\n+ Device *os.File\n// GoferFDs is an array of FDs used to connect with the Gofer.\nGoferFDs []int\n// StdioFDs is the stdio for the application.\n@@ -183,7 +183,7 @@ func New(args Args) (*Loader, error) {\n}\n// Create kernel and platform.\n- p, err := createPlatform(args.Conf, args.DeviceFD)\n+ p, err := createPlatform(args.Conf, args.Device)\nif err != nil {\nreturn nil, fmt.Errorf(\"creating platform: %v\", err)\n}\n@@ -401,17 +401,17 @@ func (l *Loader) Destroy() {\nl.watchdog.Stop()\n}\n-func createPlatform(conf *Config, deviceFD int) (platform.Platform, error) {\n+func createPlatform(conf *Config, deviceFile *os.File) (platform.Platform, error) {\nswitch conf.Platform {\ncase PlatformPtrace:\nlog.Infof(\"Platform: ptrace\")\nreturn ptrace.New()\ncase PlatformKVM:\nlog.Infof(\"Platform: kvm\")\n- if deviceFD < 0 {\n- return nil, fmt.Errorf(\"kvm device FD must be provided\")\n+ if deviceFile == nil {\n+ return nil, fmt.Errorf(\"kvm device file must be provided\")\n}\n- return kvm.New(os.NewFile(uintptr(deviceFD), \"kvm device\"))\n+ return kvm.New(deviceFile)\ndefault:\nreturn nil, fmt.Errorf(\"invalid platform %v\", conf.Platform)\n}\n@@ -590,18 +590,22 @@ func (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config\nreturn fmt.Errorf(\"creating new process: %v\", err)\n}\n+ // setupContainerFS() dups stdioFDs, so we don't need to dup them here.\n+ var stdioFDs []int\n+ for _, f := range files[:3] {\n+ stdioFDs = append(stdioFDs, int(f.Fd()))\n+ }\n+\n// Can't take ownership away from os.File. dup them to get a new FDs.\n- var ioFDs []int\n- for _, f := range files {\n+ var goferFDs []int\n+ for _, f := range files[3:] {\nfd, err := syscall.Dup(int(f.Fd()))\nif err != nil {\nreturn fmt.Errorf(\"failed to dup file: %v\", err)\n}\n- ioFDs = append(ioFDs, fd)\n+ goferFDs = append(goferFDs, fd)\n}\n- stdioFDs := ioFDs[:3]\n- goferFDs := ioFDs[3:]\nif err := setupContainerFS(\n&procArgs,\nspec,\n@@ -616,13 +620,6 @@ func (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config\nreturn fmt.Errorf(\"configuring container FS: %v\", err)\n}\n- // setFileSystemForProcess dup'd stdioFDs, so we can close them.\n- for i, fd := range stdioFDs {\n- if err := syscall.Close(fd); err != nil {\n- return fmt.Errorf(\"closing stdio FD #%d: %v\", i, fd)\n- }\n- }\n-\nctx := procArgs.NewContext(l.k)\nmns := k.RootMountNamespace()\nif err := setExecutablePath(ctx, mns, &procArgs); err != nil {\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader_test.go", "new_path": "runsc/boot/loader_test.go", "diff": "@@ -115,7 +115,6 @@ func createLoader() (*Loader, func(), error) {\nSpec: spec,\nConf: conf,\nControllerFD: fd,\n- DeviceFD: -1,\nGoferFDs: []int{sandEnd},\nStdioFDs: stdio,\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/boot.go", "new_path": "runsc/cmd/boot.go", "diff": "@@ -213,7 +213,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nSpec: spec,\nConf: conf,\nControllerFD: b.controllerFD,\n- DeviceFD: b.deviceFD,\n+ Device: os.NewFile(uintptr(b.deviceFD), \"platform device\"),\nGoferFDs: b.ioFDs.GetArray(),\nStdioFDs: b.stdioFDs.GetArray(),\nConsole: b.console,\n" } ]
Go
Apache License 2.0
google/gvisor
Cleanup around urpc file payload handling urpc always closes all files once the RPC function returns. PiperOrigin-RevId: 248406857 Change-Id: I400a8562452ec75c8e4bddc2154948567d572950
259,853
15.05.2019 17:20:14
25,200
2105158d4bee4fb36658dba32eb8104cf4e96467
gofer: don't call hostfile.Close if hostFile is nil
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/gofer/path.go", "new_path": "pkg/sentry/fs/gofer/path.go", "diff": "@@ -121,13 +121,17 @@ func (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string\nqids, unopened, mask, p9attr, err := i.fileState.file.walkGetAttr(ctx, []string{name})\nif err != nil {\nnewFile.close(ctx)\n+ if hostFile != nil {\nhostFile.Close()\n+ }\nreturn nil, err\n}\nif len(qids) != 1 {\nlog.Warningf(\"WalkGetAttr(%s) succeeded, but returned %d QIDs (%v), wanted 1\", name, len(qids), qids)\nnewFile.close(ctx)\n+ if hostFile != nil {\nhostFile.Close()\n+ }\nunopened.close(ctx)\nreturn nil, syserror.EIO\n}\n" } ]
Go
Apache License 2.0
google/gvisor
gofer: don't call hostfile.Close if hostFile is nil PiperOrigin-RevId: 248437159 Change-Id: Ife71f6ca032fca59ec97a82961000ed0af257101
259,854
16.05.2019 11:58:10
25,200
40419a16eb8bfbfd9bbb4e20957a5ec9e846a22a
Add test for duplicate proc entries. The issue with duplicate /proc/sys entries seems to have been fixed in: Git hash Fixes google/gvisor#125
[ { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -214,6 +214,8 @@ syscall_test(test = \"//test/syscalls/linux:priority_test\")\nsyscall_test(\nsize = \"medium\",\n+ # We don't want our proc changing out from under us.\n+ parallel = False,\ntest = \"//test/syscalls/linux:proc_test\",\n)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc.cc", "new_path": "test/syscalls/linux/proc.cc", "diff": "#include <algorithm>\n#include <atomic>\n#include <functional>\n+#include <iostream>\n#include <map>\n#include <memory>\n+#include <ostream>\n#include <string>\n+#include <unordered_set>\n#include <utility>\n#include <vector>\n@@ -1838,6 +1841,47 @@ TEST(ProcSelfMounts, RequiredFieldsArePresent) {\n// Root mount.\nContainsRegex(R\"(\\S+ /proc \\S+ rw\\S* [0-9]+ [0-9]+\\s)\")));\n}\n+\n+void CheckDuplicatesRecursively(std::string path) {\n+ errno = 0;\n+ DIR* dir = opendir(path.c_str());\n+ if (dir == nullptr) {\n+ ASSERT_THAT(errno, ::testing::AnyOf(EPERM, EACCES)) << path;\n+ return;\n+ }\n+ auto dir_closer = Cleanup([&dir]() { closedir(dir); });\n+ std::unordered_set<std::string> children;\n+ while (true) {\n+ // Readdir(3): If the end of the directory stream is reached, NULL is\n+ // returned and errno is not changed. If an error occurs, NULL is returned\n+ // and errno is set appropriately. To distinguish end of stream and from an\n+ // error, set errno to zero before calling readdir() and then check the\n+ // value of errno if NULL is returned.\n+ errno = 0;\n+ struct dirent* dp = readdir(dir);\n+ if (dp == nullptr) {\n+ ASSERT_EQ(errno, 0) << path;\n+ break; // We're done.\n+ }\n+\n+ if (strcmp(dp->d_name, \".\") == 0 || strcmp(dp->d_name, \"..\") == 0) {\n+ continue;\n+ }\n+\n+ ASSERT_EQ(children.find(std::string(dp->d_name)), children.end()) << dp->d_name;\n+ children.insert(std::string(dp->d_name));\n+\n+ ASSERT_NE(dp->d_type, DT_UNKNOWN);\n+\n+ if (dp->d_type != DT_DIR) {\n+ continue;\n+ }\n+ CheckDuplicatesRecursively(absl::StrCat(path, \"/\", dp->d_name));\n+ }\n+}\n+\n+TEST(Proc, NoDuplicates) { CheckDuplicatesRecursively(\"/proc\"); }\n+\n} // namespace\n} // namespace testing\n} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Add test for duplicate proc entries. The issue with duplicate /proc/sys entries seems to have been fixed in: PiperOrigin-RevId 229305982 Git hash dc8450b5676d4c4ac9bcfa23cabd862e0060527d Fixes google/gvisor#125 PiperOrigin-RevId: 248571903 Change-Id: I76ff3b525c93dafb92da6e5cf56e440187f14579
259,881
17.05.2019 13:04:44
25,200
04105781ad558662e1e48bad17197df244ff7841
Fix gofer rename ctime and cleanup stat_times test There is a lot of redundancy that we can simplify in the stat_times test. This will make it easier to add new tests. However, the simplification reveals that cached uattrs on goferfs don't properly update ctime on rename.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/fsutil/inode.go", "new_path": "pkg/sentry/fs/fsutil/inode.go", "diff": "@@ -341,7 +341,7 @@ func (InodeNotDirectory) RemoveDirectory(context.Context, *fs.Inode, string) err\n}\n// Rename implements fs.FileOperations.Rename.\n-func (InodeNotDirectory) Rename(context.Context, *fs.Inode, string, *fs.Inode, string, bool) error {\n+func (InodeNotDirectory) Rename(context.Context, *fs.Inode, *fs.Inode, string, *fs.Inode, string, bool) error {\nreturn syserror.EINVAL\n}\n@@ -381,7 +381,7 @@ func (InodeNoopTruncate) Truncate(context.Context, *fs.Inode, int64) error {\ntype InodeNotRenameable struct{}\n// Rename implements fs.InodeOperations.Rename.\n-func (InodeNotRenameable) Rename(context.Context, *fs.Inode, string, *fs.Inode, string, bool) error {\n+func (InodeNotRenameable) Rename(context.Context, *fs.Inode, *fs.Inode, string, *fs.Inode, string, bool) error {\nreturn syserror.EINVAL\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/fsutil/inode_cached.go", "new_path": "pkg/sentry/fs/fsutil/inode_cached.go", "diff": "@@ -451,6 +451,14 @@ func (c *CachingInodeOperations) touchModificationTimeLocked(now time.Time) {\nc.dirtyAttr.StatusChangeTime = true\n}\n+// TouchStatusChangeTime updates the cached status change time in-place to the\n+// current time.\n+func (c *CachingInodeOperations) TouchStatusChangeTime(ctx context.Context) {\n+ c.attrMu.Lock()\n+ c.touchStatusChangeTimeLocked(ktime.NowFromContext(ctx))\n+ c.attrMu.Unlock()\n+}\n+\n// touchStatusChangeTimeLocked updates the cached status change time\n// in-place to the current time.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/gofer/path.go", "new_path": "pkg/sentry/fs/gofer/path.go", "diff": "@@ -344,7 +344,7 @@ func (i *inodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, na\n}\n// Rename renames this node.\n-func (i *inodeOperations) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\n+func (i *inodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\nif len(newName) > maxFilenameLen {\nreturn syserror.ENAMETOOLONG\n}\n@@ -389,6 +389,11 @@ func (i *inodeOperations) Rename(ctx context.Context, oldParent *fs.Inode, oldNa\nnewParentInodeOperations.markDirectoryDirty()\n}\n}\n+\n+ // Rename always updates ctime.\n+ if i.session().cachePolicy.cacheUAttrs(inode) {\n+ i.cachingInodeOps.TouchStatusChangeTime(ctx)\n+ }\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/inode.go", "new_path": "pkg/sentry/fs/host/inode.go", "diff": "@@ -301,7 +301,7 @@ func (i *inodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, na\n}\n// Rename implements fs.InodeOperations.Rename.\n-func (i *inodeOperations) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\n+func (i *inodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\nop, ok := oldParent.InodeOperations.(*inodeOperations)\nif !ok {\nreturn syscall.EXDEV\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/inode.go", "new_path": "pkg/sentry/fs/inode.go", "diff": "@@ -216,7 +216,7 @@ func (i *Inode) Rename(ctx context.Context, oldParent *Dirent, renamed *Dirent,\nif i.overlay != nil {\nreturn overlayRename(ctx, i.overlay, oldParent, renamed, newParent, newName, replacement)\n}\n- return i.InodeOperations.Rename(ctx, oldParent.Inode, renamed.name, newParent.Inode, newName, replacement)\n+ return i.InodeOperations.Rename(ctx, renamed.Inode, oldParent.Inode, renamed.name, newParent.Inode, newName, replacement)\n}\n// Bind calls i.InodeOperations.Bind with i as the directory.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/inode_operations.go", "new_path": "pkg/sentry/fs/inode_operations.go", "diff": "@@ -131,14 +131,15 @@ type InodeOperations interface {\nRemoveDirectory(ctx context.Context, dir *Inode, name string) error\n// Rename atomically renames oldName under oldParent to newName under\n- // newParent where oldParent and newParent are directories.\n+ // newParent where oldParent and newParent are directories. inode is\n+ // the Inode of this InodeOperations.\n//\n// If replacement is true, then newName already exists and this call\n// will replace it with oldName.\n//\n// Implementations are responsible for rejecting renames that replace\n// non-empty directories.\n- Rename(ctx context.Context, oldParent *Inode, oldName string, newParent *Inode, newName string, replacement bool) error\n+ Rename(ctx context.Context, inode *Inode, oldParent *Inode, oldName string, newParent *Inode, newName string, replacement bool) error\n// Bind binds a new socket under dir at the given name.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/inode_overlay.go", "new_path": "pkg/sentry/fs/inode_overlay.go", "diff": "@@ -389,7 +389,7 @@ func overlayRename(ctx context.Context, o *overlayEntry, oldParent *Dirent, rena\nreturn err\n}\noldName := renamed.name\n- if err := o.upper.InodeOperations.Rename(ctx, oldParent.Inode.overlay.upper, oldName, newParent.Inode.overlay.upper, newName, replacement); err != nil {\n+ if err := o.upper.InodeOperations.Rename(ctx, renamed.Inode.overlay.upper, oldParent.Inode.overlay.upper, oldName, newParent.Inode.overlay.upper, newName, replacement); err != nil {\nreturn err\n}\nif renamed.Inode.overlay.lowerExists {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/mock.go", "new_path": "pkg/sentry/fs/mock.go", "diff": "@@ -132,7 +132,7 @@ func (n *MockInodeOperations) CreateDirectory(context.Context, *Inode, string, F\n}\n// Rename implements fs.InodeOperations.Rename.\n-func (n *MockInodeOperations) Rename(ctx context.Context, oldParent *Inode, oldName string, newParent *Inode, newName string, replacement bool) error {\n+func (n *MockInodeOperations) Rename(ctx context.Context, inode *Inode, oldParent *Inode, oldName string, newParent *Inode, newName string, replacement bool) error {\nn.renameCalled = true\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/ramfs/dir.go", "new_path": "pkg/sentry/fs/ramfs/dir.go", "diff": "@@ -401,7 +401,7 @@ func (d *Dir) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags\n}\n// Rename implements fs.InodeOperations.Rename.\n-func (*Dir) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\n+func (*Dir) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\nreturn Rename(ctx, oldParent.InodeOperations, oldName, newParent.InodeOperations, newName, replacement)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/tmpfs/inode_file.go", "new_path": "pkg/sentry/fs/tmpfs/inode_file.go", "diff": "@@ -149,7 +149,7 @@ func (f *fileInodeOperations) Mappable(*fs.Inode) memmap.Mappable {\n}\n// Rename implements fs.InodeOperations.Rename.\n-func (*fileInodeOperations) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\n+func (*fileInodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\nreturn rename(ctx, oldParent, oldName, newParent, newName, replacement)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/tmpfs/tmpfs.go", "new_path": "pkg/sentry/fs/tmpfs/tmpfs.go", "diff": "@@ -238,7 +238,7 @@ func (d *Dir) newCreateOps() *ramfs.CreateOps {\n}\n// Rename implements fs.InodeOperations.Rename.\n-func (d *Dir) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\n+func (d *Dir) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\nreturn rename(ctx, oldParent, oldName, newParent, newName, replacement)\n}\n@@ -271,7 +271,7 @@ func NewSymlink(ctx context.Context, target string, owner fs.FileOwner, msrc *fs\n}\n// Rename implements fs.InodeOperations.Rename.\n-func (s *Symlink) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\n+func (s *Symlink) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\nreturn rename(ctx, oldParent, oldName, newParent, newName, replacement)\n}\n@@ -301,7 +301,7 @@ func NewSocket(ctx context.Context, socket transport.BoundEndpoint, owner fs.Fil\n}\n// Rename implements fs.InodeOperations.Rename.\n-func (s *Socket) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\n+func (s *Socket) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\nreturn rename(ctx, oldParent, oldName, newParent, newName, replacement)\n}\n@@ -338,7 +338,7 @@ func NewFifo(ctx context.Context, owner fs.FileOwner, perms fs.FilePermissions,\n}\n// Rename implements fs.InodeOperations.Rename.\n-func (f *Fifo) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\n+func (f *Fifo) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {\nreturn rename(ctx, oldParent, oldName, newParent, newName, replacement)\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/stat_times.cc", "new_path": "test/syscalls/linux/stat_times.cc", "diff": "@@ -33,8 +33,6 @@ namespace {\nusing ::testing::IsEmpty;\nusing ::testing::Not;\n-class StatTimesTest : public ::testing::Test {\n- protected:\nstd::tuple<absl::Time, absl::Time, absl::Time> GetTime(const TempPath& file) {\nstruct stat statbuf = {};\nEXPECT_THAT(stat(file.path().c_str(), &statbuf), SyscallSucceeds());\n@@ -44,12 +42,79 @@ class StatTimesTest : public ::testing::Test {\nconst auto ctime = absl::TimeFromTimespec(statbuf.st_ctim);\nreturn std::make_tuple(atime, mtime, ctime);\n}\n+\n+enum class AtimeEffect {\n+ Unchanged,\n+ Changed,\n+};\n+\n+enum class MtimeEffect {\n+ Unchanged,\n+ Changed,\n+};\n+\n+enum class CtimeEffect {\n+ Unchanged,\n+ Changed,\n};\n-TEST_F(StatTimesTest, FileCreationTimes) {\n+// Tests that fn modifies the atime/mtime/ctime of path as specified.\n+void CheckTimes(const TempPath& path, std::function<void()> fn,\n+ AtimeEffect atime_effect, MtimeEffect mtime_effect,\n+ CtimeEffect ctime_effect) {\n+ absl::Time atime, mtime, ctime;\n+ std::tie(atime, mtime, ctime) = GetTime(path);\n+\n+ // FIXME(b/132819225): gVisor filesystem timestamps inconsistently use the\n+ // internal or host clock, which may diverge slightly. Allow some slack on\n+ // times to account for the difference.\n+ //\n+ // Here we sleep for 1s so that initial creation of path doesn't fall within\n+ // the before slack window.\n+ absl::SleepFor(absl::Seconds(1));\n+\n+ const absl::Time before = absl::Now() - absl::Seconds(1);\n+\n+ // Perform the op.\n+ fn();\n+\n+ const absl::Time after = absl::Now() + absl::Seconds(1);\n+\n+ absl::Time atime2, mtime2, ctime2;\n+ std::tie(atime2, mtime2, ctime2) = GetTime(path);\n+\n+ if (atime_effect == AtimeEffect::Changed) {\n+ EXPECT_LE(before, atime2);\n+ EXPECT_GE(after, atime2);\n+ EXPECT_GT(atime2, atime);\n+ } else {\n+ EXPECT_EQ(atime2, atime);\n+ }\n+\n+ if (mtime_effect == MtimeEffect::Changed) {\n+ EXPECT_LE(before, mtime2);\n+ EXPECT_GE(after, mtime2);\n+ EXPECT_GT(mtime2, mtime);\n+ } else {\n+ EXPECT_EQ(mtime2, mtime);\n+ }\n+\n+ if (ctime_effect == CtimeEffect::Changed) {\n+ EXPECT_LE(before, ctime2);\n+ EXPECT_GE(after, ctime2);\n+ EXPECT_GT(ctime2, ctime);\n+ } else {\n+ EXPECT_EQ(ctime2, ctime);\n+ }\n+}\n+\n+// File creation time is reflected in atime, mtime, and ctime.\n+TEST(StatTimesTest, FileCreation) {\nconst DisableSave ds; // Timing-related test.\n// Get a time for when the file is created.\n+ //\n+ // FIXME(b/132819225): See above.\nconst absl::Time before = absl::Now() - absl::Seconds(1);\nconst TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\nconst absl::Time after = absl::Now() + absl::Seconds(1);\n@@ -65,153 +130,137 @@ TEST_F(StatTimesTest, FileCreationTimes) {\nEXPECT_GE(after, ctime);\n}\n-TEST_F(StatTimesTest, FileCtimeChanges) {\n- auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+// Calling chmod on a file changes ctime.\n+TEST(StatTimesTest, FileChmod) {\n+ TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n- MaybeSave(); // FIXME(b/69865927): ctime is inconsistent.\n+ auto fn = [&] {\n+ EXPECT_THAT(chmod(file.path().c_str(), 0666), SyscallSucceeds());\n+ };\n+ CheckTimes(file, fn, AtimeEffect::Unchanged, MtimeEffect::Unchanged,\n+ CtimeEffect::Changed);\n+}\n- absl::Time atime, mtime, ctime;\n- std::tie(atime, mtime, ctime) = GetTime(file);\n+// Renaming a file changes ctime.\n+TEST(StatTimesTest, FileRename) {\n+ TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n- absl::SleepFor(absl::Seconds(1));\n+ const std::string newpath = NewTempAbsPath();\n- // Chmod should only change ctime.\n- EXPECT_THAT(chmod(file.path().c_str(), 0666), SyscallSucceeds());\n+ auto fn = [&] {\n+ ASSERT_THAT(rename(file.release().c_str(), newpath.c_str()),\n+ SyscallSucceeds());\n+ file.reset(newpath);\n+ };\n+ CheckTimes(file, fn, AtimeEffect::Unchanged, MtimeEffect::Unchanged,\n+ CtimeEffect::Changed);\n+}\n- absl::Time atime2, mtime2, ctime2;\n- std::tie(atime2, mtime2, ctime2) = GetTime(file);\n- EXPECT_EQ(atime2, atime);\n- EXPECT_EQ(mtime2, mtime);\n- EXPECT_GT(ctime2, ctime);\n+// Renaming a file changes ctime, even with an open FD.\n+//\n+// NOTE(b/132732387): This is a regression test for fs/gofer failing to update\n+// cached ctime.\n+TEST(StatTimesTest, FileRenameOpenFD) {\n+ TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n- absl::SleepFor(absl::Seconds(1));\n+ // Holding an FD shouldn't affect behavior.\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY));\n- // Rename should only change ctime.\n- const auto newpath = NewTempAbsPath();\n- EXPECT_THAT(rename(file.path().c_str(), newpath.c_str()), SyscallSucceeds());\n- file.reset(newpath);\n+ const std::string newpath = NewTempAbsPath();\n- std::tie(atime, mtime, ctime) = GetTime(file);\n- EXPECT_EQ(atime, atime2);\n- EXPECT_EQ(mtime, mtime2);\n- EXPECT_GT(ctime, ctime2);\n+ // FIXME(b/132814682): Restore fails with an uncached gofer and an open FD\n+ // across rename.\n+ //\n+ // N.B. The logic here looks backwards because it isn't possible to\n+ // conditionally disable save, only conditionally re-enable it.\n+ DisableSave ds;\n+ if (!getenv(\"GVISOR_GOFER_UNCACHED\")) {\n+ ds.reset();\n+ }\n- absl::SleepFor(absl::Seconds(1));\n+ auto fn = [&] {\n+ ASSERT_THAT(rename(file.release().c_str(), newpath.c_str()),\n+ SyscallSucceeds());\n+ file.reset(newpath);\n+ };\n+ CheckTimes(file, fn, AtimeEffect::Unchanged, MtimeEffect::Unchanged,\n+ CtimeEffect::Changed);\n+}\n- // Utimes should only change ctime and the time that we ask to change (atime\n- // to now in this case).\n- const absl::Time before = absl::Now() - absl::Seconds(1);\n+// Calling utimes on a file changes ctime and the time that we ask to change\n+// (atime to now in this case).\n+TEST(StatTimesTest, FileUtimes) {\n+ TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+\n+ auto fn = [&] {\nconst struct timespec ts[2] = {{0, UTIME_NOW}, {0, UTIME_OMIT}};\nASSERT_THAT(utimensat(AT_FDCWD, file.path().c_str(), ts, 0),\nSyscallSucceeds());\n- const absl::Time after = absl::Now() + absl::Seconds(1);\n-\n- std::tie(atime2, mtime2, ctime2) = GetTime(file);\n- EXPECT_LE(before, atime2);\n- EXPECT_GE(after, atime2);\n- EXPECT_EQ(mtime2, mtime);\n- EXPECT_GT(ctime2, ctime);\n+ };\n+ CheckTimes(file, fn, AtimeEffect::Changed, MtimeEffect::Unchanged,\n+ CtimeEffect::Changed);\n}\n-TEST_F(StatTimesTest, FileMtimeChanges) {\n- const auto file = ASSERT_NO_ERRNO_AND_VALUE(\n+// Truncating a file changes mtime and ctime.\n+TEST(StatTimesTest, FileTruncate) {\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateFileWith(GetAbsoluteTestTmpdir(), \"yaaass\", 0666));\n- absl::Time atime, mtime, ctime;\n- std::tie(atime, mtime, ctime) = GetTime(file);\n-\n- absl::SleepFor(absl::Seconds(1));\n-\n- // Truncate should only change mtime and ctime.\n+ auto fn = [&] {\nEXPECT_THAT(truncate(file.path().c_str(), 0), SyscallSucceeds());\n+ };\n+ CheckTimes(file, fn, AtimeEffect::Unchanged, MtimeEffect::Changed,\n+ CtimeEffect::Changed);\n+}\n- absl::Time atime2, mtime2, ctime2;\n- std::tie(atime2, mtime2, ctime2) = GetTime(file);\n- EXPECT_EQ(atime2, atime);\n- EXPECT_GT(mtime2, mtime);\n- EXPECT_GT(ctime2, ctime);\n+// Writing a file changes mtime and ctime.\n+TEST(StatTimesTest, FileWrite) {\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateFileWith(GetAbsoluteTestTmpdir(), \"yaaass\", 0666));\n- absl::SleepFor(absl::Seconds(1));\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0));\n- // Write should only change mtime and ctime.\n- const auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR, 0));\n+ auto fn = [&] {\nconst std::string contents = \"all the single dollars\";\n- EXPECT_THAT(write(fd.get(), contents.data(), contents.size()),\n+ EXPECT_THAT(WriteFd(fd.get(), contents.data(), contents.size()),\nSyscallSucceeds());\n-\n- std::tie(atime, mtime, ctime) = GetTime(file);\n- EXPECT_EQ(atime, atime2);\n- EXPECT_GT(mtime, mtime2);\n- EXPECT_GT(ctime, ctime2);\n+ };\n+ CheckTimes(file, fn, AtimeEffect::Unchanged, MtimeEffect::Changed,\n+ CtimeEffect::Changed);\n}\n-TEST_F(StatTimesTest, FileAtimeChanges) {\n+// Reading a file changes atime.\n+TEST(StatTimesTest, FileRead) {\nconst std::string contents = \"bills bills bills\";\n- const auto file = ASSERT_NO_ERRNO_AND_VALUE(\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateFileWith(GetAbsoluteTestTmpdir(), contents, 0666));\n- MaybeSave(); // FIXME(b/69865927): ctime is inconsistent.\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY, 0));\n- absl::Time atime, mtime, ctime;\n- std::tie(atime, mtime, ctime) = GetTime(file);\n-\n- absl::SleepFor(absl::Seconds(1));\n-\n- const auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDONLY, 0));\n-\n- // Read should only change atime.\n+ auto fn = [&] {\nchar buf[20];\n- const absl::Time before = absl::Now() - absl::Seconds(1);\n- int read_result;\n- ASSERT_THAT(read_result = read(fd.get(), buf, sizeof(buf)),\n- SyscallSucceeds());\n- const absl::Time after = absl::Now() + absl::Seconds(1);\n-\n- EXPECT_EQ(std::string(buf, read_result), contents);\n-\n- absl::Time atime2, mtime2, ctime2;\n- std::tie(atime2, mtime2, ctime2) = GetTime(file);\n-\n- EXPECT_LE(before, atime2);\n- EXPECT_GE(after, atime2);\n- EXPECT_GT(atime2, atime);\n- EXPECT_EQ(mtime2, mtime);\n- EXPECT_EQ(ctime2, ctime);\n+ ASSERT_THAT(ReadFd(fd.get(), buf, sizeof(buf)),\n+ SyscallSucceedsWithValue(contents.size()));\n+ };\n+ CheckTimes(file, fn, AtimeEffect::Changed, MtimeEffect::Unchanged,\n+ CtimeEffect::Unchanged);\n}\n-TEST_F(StatTimesTest, DirAtimeChanges) {\n- const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n- const auto file =\n+// Listing files in a directory changes atime.\n+TEST(StatTimesTest, DirList) {\n+ const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const TempPath file =\nASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));\n- MaybeSave(); // FIXME(b/69865927): ctime is inconsistent.\n-\n- absl::Time atime, mtime, ctime;\n- std::tie(atime, mtime, ctime) = GetTime(dir);\n-\n- absl::SleepFor(absl::Seconds(1));\n-\n- const absl::Time before = absl::Now() - absl::Seconds(1);\n-\n- // NOTE(b/37756234): Keep an fd open. This ensures that the inode backing the\n- // directory won't be destroyed before the final GetTime to avoid writing out\n- // timestamps and causing side effects.\n- const auto fd = ASSERT_NO_ERRNO_AND_VALUE(Open(dir.path(), O_RDONLY, 0));\n-\n- // Listing the directory contents should only change atime.\n- auto contents = ASSERT_NO_ERRNO_AND_VALUE(ListDir(dir.path(), false));\n+ auto fn = [&] {\n+ const auto contents = ASSERT_NO_ERRNO_AND_VALUE(ListDir(dir.path(), false));\nEXPECT_THAT(contents, Not(IsEmpty()));\n-\n- const absl::Time after = absl::Now() + absl::Seconds(1);\n-\n- absl::Time atime2, mtime2, ctime2;\n- std::tie(atime2, mtime2, ctime2) = GetTime(dir);\n-\n- EXPECT_LE(before, atime2);\n- EXPECT_GE(after, atime2);\n- EXPECT_GT(atime2, atime);\n- EXPECT_EQ(mtime2, mtime);\n- EXPECT_EQ(ctime2, ctime);\n+ };\n+ CheckTimes(dir, fn, AtimeEffect::Changed, MtimeEffect::Unchanged,\n+ CtimeEffect::Unchanged);\n}\n} // namespace\n" } ]
Go
Apache License 2.0
google/gvisor
Fix gofer rename ctime and cleanup stat_times test There is a lot of redundancy that we can simplify in the stat_times test. This will make it easier to add new tests. However, the simplification reveals that cached uattrs on goferfs don't properly update ctime on rename. PiperOrigin-RevId: 248773425 Change-Id: I52662728e1e9920981555881f9a85f9ce04041cf
259,881
17.05.2019 13:46:18
25,200
4a842836e560322bb3944b59ff43b9d60cc0f867
Return EPERM for mknod This more directly matches what Linux does with unsupported nodes.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/gofer/path.go", "new_path": "pkg/sentry/fs/gofer/path.go", "diff": "@@ -281,9 +281,9 @@ func (i *inodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string,\n}\n// CreateFifo implements fs.InodeOperations.CreateFifo. Gofer nodes do not support the\n-// creation of fifos and always returns EOPNOTSUPP.\n+// creation of fifos and always returns EPERM.\nfunc (*inodeOperations) CreateFifo(context.Context, *fs.Inode, string, fs.FilePermissions) error {\n- return syscall.EOPNOTSUPP\n+ return syscall.EPERM\n}\n// Remove implements InodeOperations.Remove.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/inode.go", "new_path": "pkg/sentry/fs/host/inode.go", "diff": "@@ -287,7 +287,7 @@ func (*inodeOperations) CreateHardLink(context.Context, *fs.Inode, *fs.Inode, st\n// CreateFifo implements fs.InodeOperations.CreateFifo.\nfunc (*inodeOperations) CreateFifo(context.Context, *fs.Inode, string, fs.FilePermissions) error {\n- return syserror.EOPNOTSUPP\n+ return syserror.EPERM\n}\n// Remove implements fs.InodeOperations.Remove.\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer.go", "new_path": "runsc/fsgofer/fsgofer.go", "diff": "@@ -860,7 +860,10 @@ func (l *localFile) Link(target p9.File, newName string) error {\n//\n// Not implemented.\nfunc (*localFile) Mknod(_ string, _ p9.FileMode, _ uint32, _ uint32, _ p9.UID, _ p9.GID) (p9.QID, error) {\n- return p9.QID{}, syscall.ENOSYS\n+ // From mknod(2) man page:\n+ // \"EPERM: [...] if the filesystem containing pathname does not support\n+ // the type of node requested.\"\n+ return p9.QID{}, syscall.EPERM\n}\n// UnlinkAt implements p9.File.\n" } ]
Go
Apache License 2.0
google/gvisor
Return EPERM for mknod This more directly matches what Linux does with unsupported nodes. PiperOrigin-RevId: 248780425 Change-Id: I17f3dd0b244f6dc4eb00e2e42344851b8367fbec
259,881
20.05.2019 13:34:06
25,200
6588427451c605ee00c8b1a9b6cba06724627ccb
Fix incorrect tmpfs timestamp updates * Creation of files, directories (and other fs objects) in a directory should always update ctime. * Same for removal. * atime should not be updated on lookup, only readdir. I've also renamed some misleading functions that update mtime and ctime.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/fsutil/inode.go", "new_path": "pkg/sentry/fs/fsutil/inode.go", "diff": "@@ -192,6 +192,16 @@ func (i *InodeSimpleAttributes) NotifyStatusChange(ctx context.Context) {\ni.mu.Unlock()\n}\n+// NotifyModificationAndStatusChange updates the modification and status change\n+// times.\n+func (i *InodeSimpleAttributes) NotifyModificationAndStatusChange(ctx context.Context) {\n+ i.mu.Lock()\n+ now := ktime.NowFromContext(ctx)\n+ i.unstable.ModificationTime = now\n+ i.unstable.StatusChangeTime = now\n+ i.mu.Unlock()\n+}\n+\n// InodeSimpleExtendedAttributes implements\n// fs.InodeOperations.{Get,Set,List}xattr.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/fsutil/inode_cached.go", "new_path": "pkg/sentry/fs/fsutil/inode_cached.go", "diff": "@@ -299,7 +299,7 @@ func (c *CachingInodeOperations) Truncate(ctx context.Context, inode *fs.Inode,\n}\noldSize := c.attr.Size\nc.attr.Size = size\n- c.touchModificationTimeLocked(now)\n+ c.touchModificationAndStatusChangeTimeLocked(now)\n// We drop c.dataMu here so that we can lock c.mapsMu and invalidate\n// mappings below. This allows concurrent calls to Read/Translate/etc.\n@@ -360,7 +360,7 @@ func (c *CachingInodeOperations) Allocate(ctx context.Context, offset, length in\n}\nc.attr.Size = newSize\n- c.touchModificationTimeLocked(now)\n+ c.touchModificationAndStatusChangeTimeLocked(now)\nreturn nil\n}\n@@ -394,19 +394,19 @@ func (c *CachingInodeOperations) WriteOut(ctx context.Context, inode *fs.Inode)\nreturn c.backingFile.Sync(ctx)\n}\n-// IncLinks increases the link count and updates cached access time.\n+// IncLinks increases the link count and updates cached modification time.\nfunc (c *CachingInodeOperations) IncLinks(ctx context.Context) {\nc.attrMu.Lock()\nc.attr.Links++\n- c.touchModificationTimeLocked(ktime.NowFromContext(ctx))\n+ c.touchModificationAndStatusChangeTimeLocked(ktime.NowFromContext(ctx))\nc.attrMu.Unlock()\n}\n-// DecLinks decreases the link count and updates cached access time.\n+// DecLinks decreases the link count and updates cached modification time.\nfunc (c *CachingInodeOperations) DecLinks(ctx context.Context) {\nc.attrMu.Lock()\nc.attr.Links--\n- c.touchModificationTimeLocked(ktime.NowFromContext(ctx))\n+ c.touchModificationAndStatusChangeTimeLocked(ktime.NowFromContext(ctx))\nc.attrMu.Unlock()\n}\n@@ -432,19 +432,19 @@ func (c *CachingInodeOperations) touchAccessTimeLocked(now time.Time) {\nc.dirtyAttr.AccessTime = true\n}\n-// TouchModificationTime updates the cached modification and status change time\n-// in-place to the current time.\n-func (c *CachingInodeOperations) TouchModificationTime(ctx context.Context) {\n+// TouchModificationAndStatusChangeTime updates the cached modification and\n+// status change times in-place to the current time.\n+func (c *CachingInodeOperations) TouchModificationAndStatusChangeTime(ctx context.Context) {\nc.attrMu.Lock()\n- c.touchModificationTimeLocked(ktime.NowFromContext(ctx))\n+ c.touchModificationAndStatusChangeTimeLocked(ktime.NowFromContext(ctx))\nc.attrMu.Unlock()\n}\n-// touchModificationTimeLocked updates the cached modification and status\n-// change time in-place to the current time.\n+// touchModificationAndStatusChangeTimeLocked updates the cached modification\n+// and status change times in-place to the current time.\n//\n// Preconditions: c.attrMu is locked for writing.\n-func (c *CachingInodeOperations) touchModificationTimeLocked(now time.Time) {\n+func (c *CachingInodeOperations) touchModificationAndStatusChangeTimeLocked(now time.Time) {\nc.attr.ModificationTime = now\nc.dirtyAttr.ModificationTime = true\nc.attr.StatusChangeTime = now\n@@ -554,7 +554,7 @@ func (c *CachingInodeOperations) Write(ctx context.Context, src usermem.IOSequen\nc.attrMu.Lock()\n// Compare Linux's mm/filemap.c:__generic_file_write_iter() => file_update_time().\n- c.touchModificationTimeLocked(ktime.NowFromContext(ctx))\n+ c.touchModificationAndStatusChangeTimeLocked(ktime.NowFromContext(ctx))\nn, err := src.CopyInTo(ctx, &inodeReadWriter{ctx, c, offset})\nc.attrMu.Unlock()\nreturn n, err\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/gofer/path.go", "new_path": "pkg/sentry/fs/gofer/path.go", "diff": "@@ -113,7 +113,7 @@ func (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string\nreturn nil, err\n}\n- i.touchModificationTime(ctx, dir)\n+ i.touchModificationAndStatusChangeTime(ctx, dir)\n// Get an unopened p9.File for the file we created so that it can be cloned\n// and re-opened multiple times after creation, while also getting its\n@@ -167,7 +167,7 @@ func (i *inodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname\nif _, err := i.fileState.file.symlink(ctx, oldname, newname, p9.UID(owner.UID), p9.GID(owner.GID)); err != nil {\nreturn err\n}\n- i.touchModificationTime(ctx, dir)\n+ i.touchModificationAndStatusChangeTime(ctx, dir)\nreturn nil\n}\n@@ -189,7 +189,7 @@ func (i *inodeOperations) CreateHardLink(ctx context.Context, inode *fs.Inode, t\n// Increase link count.\ntargetOpts.cachingInodeOps.IncLinks(ctx)\n}\n- i.touchModificationTime(ctx, inode)\n+ i.touchModificationAndStatusChangeTime(ctx, inode)\nreturn nil\n}\n@@ -205,6 +205,8 @@ func (i *inodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, s\n}\nif i.session().cachePolicy.cacheUAttrs(dir) {\n// Increase link count.\n+ //\n+ // N.B. This will update the modification time.\ni.cachingInodeOps.IncLinks(ctx)\n}\nif i.session().cachePolicy.cacheReaddir() {\n@@ -246,7 +248,7 @@ func (i *inodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string,\n// We're not going to use this file.\nhostFile.Close()\n- i.touchModificationTime(ctx, dir)\n+ i.touchModificationAndStatusChangeTime(ctx, dir)\n// Get the attributes of the file to create inode key.\nqid, mask, attr, err := getattr(ctx, newFile)\n@@ -317,7 +319,7 @@ func (i *inodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string\nif removeSocket {\ni.session().endpoints.remove(key)\n}\n- i.touchModificationTime(ctx, dir)\n+ i.touchModificationAndStatusChangeTime(ctx, dir)\nreturn nil\n}\n@@ -397,9 +399,9 @@ func (i *inodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldParent\nreturn nil\n}\n-func (i *inodeOperations) touchModificationTime(ctx context.Context, inode *fs.Inode) {\n+func (i *inodeOperations) touchModificationAndStatusChangeTime(ctx context.Context, inode *fs.Inode) {\nif i.session().cachePolicy.cacheUAttrs(inode) {\n- i.cachingInodeOps.TouchModificationTime(ctx)\n+ i.cachingInodeOps.TouchModificationAndStatusChangeTime(ctx)\n}\nif i.session().cachePolicy.cacheReaddir() {\n// Invalidate readdir cache.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/ramfs/dir.go", "new_path": "pkg/sentry/fs/ramfs/dir.go", "diff": "@@ -112,7 +112,7 @@ func NewDir(ctx context.Context, contents map[string]*fs.Inode, owner fs.FileOwn\n}\n// addChildLocked add the child inode, inheriting its reference.\n-func (d *Dir) addChildLocked(name string, inode *fs.Inode) {\n+func (d *Dir) addChildLocked(ctx context.Context, name string, inode *fs.Inode) {\nd.children[name] = inode\nd.dentryMap.Add(name, fs.DentAttr{\nType: inode.StableAttr.Type,\n@@ -123,18 +123,25 @@ func (d *Dir) addChildLocked(name string, inode *fs.Inode) {\n// corresponding to '..' from the subdirectory.\nif fs.IsDir(inode.StableAttr) {\nd.AddLink()\n+ // ctime updated below.\n}\n// Given we're now adding this inode to the directory we must also\n- // increase its link count. Similarly we decremented it in removeChildLocked.\n+ // increase its link count. Similarly we decrement it in removeChildLocked.\n+ //\n+ // Changing link count updates ctime.\ninode.AddLink()\n+ inode.InodeOperations.NotifyStatusChange(ctx)\n+\n+ // We've change the directory. This always updates our mtime and ctime.\n+ d.NotifyModificationAndStatusChange(ctx)\n}\n// AddChild adds a child to this dir.\nfunc (d *Dir) AddChild(ctx context.Context, name string, inode *fs.Inode) {\nd.mu.Lock()\ndefer d.mu.Unlock()\n- d.addChildLocked(name, inode)\n+ d.addChildLocked(ctx, name, inode)\n}\n// FindChild returns (child, true) if the directory contains name.\n@@ -179,14 +186,18 @@ func (d *Dir) removeChildLocked(ctx context.Context, name string) (*fs.Inode, er\n// link count which was the child's \"..\" directory entry.\nif fs.IsDir(inode.StableAttr) {\nd.DropLink()\n+ // ctime changed below.\n}\n- // Update ctime.\n- inode.InodeOperations.NotifyStatusChange(ctx)\n-\n// Given we're now removing this inode to the directory we must also\n// decrease its link count. Similarly it is increased in addChildLocked.\n+ //\n+ // Changing link count updates ctime.\ninode.DropLink()\n+ inode.InodeOperations.NotifyStatusChange(ctx)\n+\n+ // We've change the directory. This always updates our mtime and ctime.\n+ d.NotifyModificationAndStatusChange(ctx)\nreturn inode, nil\n}\n@@ -263,8 +274,6 @@ func (d *Dir) Lookup(ctx context.Context, _ *fs.Inode, p string) (*fs.Dirent, er\n// walkLocked must be called with d.mu held.\nfunc (d *Dir) walkLocked(ctx context.Context, p string) (*fs.Inode, error) {\n- d.NotifyAccess(ctx)\n-\n// Lookup a child node.\nif inode, ok := d.children[p]; ok {\nreturn inode, nil\n@@ -290,8 +299,7 @@ func (d *Dir) createInodeOperationsCommon(ctx context.Context, name string, make\nreturn nil, err\n}\n- d.addChildLocked(name, inode)\n- d.NotifyModification(ctx)\n+ d.addChildLocked(ctx, name, inode)\nreturn inode, nil\n}\n@@ -342,11 +350,7 @@ func (d *Dir) CreateHardLink(ctx context.Context, dir *fs.Inode, target *fs.Inod\ntarget.IncRef()\n// The link count will be incremented in addChildLocked.\n- d.addChildLocked(name, target)\n- d.NotifyModification(ctx)\n-\n- // Update ctime.\n- target.InodeOperations.NotifyStatusChange(ctx)\n+ d.addChildLocked(ctx, name, target)\nreturn nil\n}\n@@ -359,8 +363,6 @@ func (d *Dir) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, p\n_, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) {\nreturn d.NewDir(ctx, dir, perms)\n})\n- // TODO(nlacasse): Support updating status times, as those should be\n- // updated by links.\nreturn err\n}\n@@ -526,10 +528,7 @@ func Rename(ctx context.Context, oldParent fs.InodeOperations, oldName string, n\n// Do the swap.\nn := op.children[oldName]\nop.removeChildLocked(ctx, oldName)\n- np.addChildLocked(newName, n)\n-\n- // Update ctime.\n- n.InodeOperations.NotifyStatusChange(ctx)\n+ np.addChildLocked(ctx, newName, n)\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/stat_times.cc", "new_path": "test/syscalls/linux/stat_times.cc", "diff": "@@ -263,6 +263,40 @@ TEST(StatTimesTest, DirList) {\nCtimeEffect::Unchanged);\n}\n+// Creating a file in a directory changes mtime and ctime.\n+TEST(StatTimesTest, DirCreateFile) {\n+ const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+\n+ TempPath file;\n+ auto fn = [&] {\n+ file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));\n+ };\n+ CheckTimes(dir, fn, AtimeEffect::Unchanged, MtimeEffect::Changed,\n+ CtimeEffect::Changed);\n+}\n+\n+// Creating a directory in a directory changes mtime and ctime.\n+TEST(StatTimesTest, DirCreateDir) {\n+ const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+\n+ TempPath dir2;\n+ auto fn = [&] {\n+ dir2 = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(dir.path()));\n+ };\n+ CheckTimes(dir, fn, AtimeEffect::Unchanged, MtimeEffect::Changed,\n+ CtimeEffect::Changed);\n+}\n+\n+// Removing a file from a directory changes mtime and ctime.\n+TEST(StatTimesTest, DirRemoveFile) {\n+ const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+\n+ TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));\n+ auto fn = [&] { file.reset(); };\n+ CheckTimes(dir, fn, AtimeEffect::Unchanged, MtimeEffect::Changed,\n+ CtimeEffect::Changed);\n+}\n+\n} // namespace\n} // namespace testing\n" } ]
Go
Apache License 2.0
google/gvisor
Fix incorrect tmpfs timestamp updates * Creation of files, directories (and other fs objects) in a directory should always update ctime. * Same for removal. * atime should not be updated on lookup, only readdir. I've also renamed some misleading functions that update mtime and ctime. PiperOrigin-RevId: 249115063 Change-Id: I30fa275fa7db96d01aa759ed64628c18bb3a7dc7
259,881
21.05.2019 17:04:58
25,200
c8857f72696c1097a427b75f4340969e20cc0e95
Fix inconsistencies in ELF anonymous mappings * A segment with filesz == 0, memsz > 0 should be an anonymous only mapping. We were failing to load such an ELF. * Anonymous pages are always mapped RW, regardless of the segment protections.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/loader/elf.go", "new_path": "pkg/sentry/loader/elf.go", "diff": "@@ -223,13 +223,8 @@ func parseHeader(ctx context.Context, f *fs.File) (elfInfo, error) {\n// mapSegment maps a phdr into the Task. offset is the offset to apply to\n// phdr.Vaddr.\nfunc mapSegment(ctx context.Context, m *mm.MemoryManager, f *fs.File, phdr *elf.ProgHeader, offset usermem.Addr) error {\n- // Alignment of vaddr and offset must match. We'll need to map on the\n- // page boundary.\n+ // We must make a page-aligned mapping.\nadjust := usermem.Addr(phdr.Vaddr).PageOffset()\n- if adjust != usermem.Addr(phdr.Off).PageOffset() {\n- ctx.Infof(\"Alignment of vaddr %#x != off %#x\", phdr.Vaddr, phdr.Off)\n- return syserror.ENOEXEC\n- }\naddr, ok := offset.AddLength(phdr.Vaddr)\nif !ok {\n@@ -239,17 +234,11 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f *fs.File, phdr *elf.\n}\naddr -= usermem.Addr(adjust)\n- fileOffset := phdr.Off - adjust\nfileSize := phdr.Filesz + adjust\nif fileSize < phdr.Filesz {\nctx.Infof(\"Computed segment file size overflows: %#x + %#x\", phdr.Filesz, adjust)\nreturn syserror.ENOEXEC\n}\n- memSize := phdr.Memsz + adjust\n- if memSize < phdr.Memsz {\n- ctx.Infof(\"Computed segment mem size overflows: %#x + %#x\", phdr.Memsz, adjust)\n- return syserror.ENOEXEC\n- }\nms, ok := usermem.Addr(fileSize).RoundUp()\nif !ok {\nctx.Infof(\"fileSize %#x too large\", fileSize)\n@@ -257,6 +246,12 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f *fs.File, phdr *elf.\n}\nmapSize := uint64(ms)\n+ if mapSize > 0 {\n+ // This must result in a page-aligned offset. i.e., the original\n+ // phdr.Off must have the same alignment as phdr.Vaddr. If that is not\n+ // true, MMap will reject the mapping.\n+ fileOffset := phdr.Off - adjust\n+\nprot := progFlagsAsPerms(phdr.Flags)\nmopts := memmap.MMapOpts{\nLength: mapSize,\n@@ -303,6 +298,13 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f *fs.File, phdr *elf.\nreturn err\n}\n}\n+ }\n+\n+ memSize := phdr.Memsz + adjust\n+ if memSize < phdr.Memsz {\n+ ctx.Infof(\"Computed segment mem size overflows: %#x + %#x\", phdr.Memsz, adjust)\n+ return syserror.ENOEXEC\n+ }\n// Allocate more anonymous pages if necessary.\nif mapSize < memSize {\n@@ -323,7 +325,11 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f *fs.File, phdr *elf.\n// already at addr.\nFixed: true,\nPrivate: true,\n- Perms: progFlagsAsPerms(phdr.Flags),\n+ // N.B. Linux uses vm_brk to map these pages, ignoring\n+ // the segment protections, instead always mapping RW.\n+ // These pages are not included in the final brk\n+ // region.\n+ Perms: usermem.ReadWrite,\nMaxPerms: usermem.AnyAccess,\n}); err != nil {\nctx.Infof(\"Error mapping PT_LOAD segment %v anonymous memory: %v\", phdr, err)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/exec_binary.cc", "new_path": "test/syscalls/linux/exec_binary.cc", "diff": "@@ -401,6 +401,141 @@ TEST(ElfTest, DataSegment) {\n})));\n}\n+// Additonal pages beyond filesz are always RW.\n+//\n+// N.B. Linux uses set_brk -> vm_brk to additional pages beyond filesz (even\n+// though start_brk itself will always be beyond memsz). As a result, the\n+// segment permissions don't apply; the mapping is always RW.\n+TEST(ElfTest, ExtraMemPages) {\n+ ElfBinary<64> elf = StandardElf();\n+\n+ // Create a standard ELF, but extend to 1.5 pages. The second page will be the\n+ // beginning of a multi-page data + bss segment.\n+ elf.data.resize(kPageSize + kPageSize / 2);\n+\n+ decltype(elf)::ElfPhdr phdr = {};\n+ phdr.p_type = PT_LOAD;\n+ // RWX segment. The extra anon page will be RW anyways.\n+ //\n+ // N.B. Linux uses clear_user to clear the end of the file-mapped page, which\n+ // respects the mapping protections. Thus if we map this RO with memsz >\n+ // (unaligned) filesz, then execve will fail with EFAULT. See padzero(elf_bss)\n+ // in fs/binfmt_elf.c:load_elf_binary.\n+ //\n+ // N.N.B.B. The above only applies to the last segment. For earlier segments,\n+ // the clear_user error is ignored.\n+ phdr.p_flags = PF_R | PF_W | PF_X;\n+ phdr.p_offset = kPageSize;\n+ phdr.p_vaddr = 0x41000;\n+ phdr.p_filesz = kPageSize / 2;\n+ // The header is going to push vaddr up by a few hundred bytes. Keep p_memsz a\n+ // bit less than 2 pages so this mapping doesn't extend beyond 0x43000.\n+ phdr.p_memsz = 2 * kPageSize - kPageSize / 2;\n+ elf.phdrs.push_back(phdr);\n+\n+ elf.UpdateOffsets();\n+\n+ TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));\n+\n+ pid_t child;\n+ int execve_errno;\n+ auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(\n+ ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));\n+ ASSERT_EQ(execve_errno, 0);\n+\n+ ASSERT_NO_ERRNO(WaitStopped(child));\n+\n+ EXPECT_THAT(child,\n+ ContainsMappings(std::vector<ProcMapsEntry>({\n+ // text page.\n+ {0x40000, 0x41000, true, false, true, true, 0, 0, 0, 0,\n+ file.path().c_str()},\n+ // data + bss page from file.\n+ {0x41000, 0x42000, true, true, true, true, kPageSize, 0, 0, 0,\n+ file.path().c_str()},\n+ // extra page from anon.\n+ {0x42000, 0x43000, true, true, false, true, 0, 0, 0, 0, \"\"},\n+ })));\n+}\n+\n+// An aligned segment with filesz == 0, memsz > 0 is anon-only.\n+TEST(ElfTest, AnonOnlySegment) {\n+ ElfBinary<64> elf = StandardElf();\n+\n+ decltype(elf)::ElfPhdr phdr = {};\n+ phdr.p_type = PT_LOAD;\n+ // RO segment. The extra anon page will be RW anyways.\n+ phdr.p_flags = PF_R;\n+ phdr.p_offset = 0;\n+ phdr.p_vaddr = 0x41000;\n+ phdr.p_filesz = 0;\n+ phdr.p_memsz = kPageSize - 0xe8;\n+ elf.phdrs.push_back(phdr);\n+\n+ elf.UpdateOffsets();\n+\n+ // UpdateOffsets adjusts p_vaddr and p_offset by the header size, but we need\n+ // a page-aligned p_vaddr to get a truly anon-only page.\n+ elf.phdrs[2].p_vaddr = 0x41000;\n+ // N.B. p_offset is now unaligned, but Linux doesn't care since this is\n+ // anon-only.\n+\n+ TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));\n+\n+ pid_t child;\n+ int execve_errno;\n+ auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(\n+ ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));\n+ ASSERT_EQ(execve_errno, 0);\n+\n+ ASSERT_NO_ERRNO(WaitStopped(child));\n+\n+ EXPECT_THAT(child,\n+ ContainsMappings(std::vector<ProcMapsEntry>({\n+ // text page.\n+ {0x40000, 0x41000, true, false, true, true, 0, 0, 0, 0,\n+ file.path().c_str()},\n+ // anon page.\n+ {0x41000, 0x42000, true, true, false, true, 0, 0, 0, 0, \"\"},\n+ })));\n+}\n+\n+// p_offset must have the same alignment as p_vaddr.\n+TEST(ElfTest, UnalignedOffset) {\n+ ElfBinary<64> elf = StandardElf();\n+\n+ // Unaligned offset.\n+ elf.phdrs[1].p_offset += 1;\n+\n+ elf.UpdateOffsets();\n+\n+ TempPath file = ASSERT_NO_ERRNO_AND_VALUE(CreateElfWith(elf));\n+\n+ pid_t child;\n+ int execve_errno;\n+ auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(\n+ ForkAndExec(file.path(), {file.path()}, {}, &child, &execve_errno));\n+\n+ // execve(2) return EINVAL, but behavior varies between Linux and gVisor.\n+ //\n+ // On Linux, the new mm is committed before attempting to map into it. By the\n+ // time we hit EINVAL in the segment mmap, the old mm is gone. Linux returns\n+ // to an empty mm, which immediately segfaults.\n+ //\n+ // OTOH, gVisor maps into the new mm before committing it. Thus when it hits\n+ // failure, the caller is still intact to receive the error.\n+ if (IsRunningOnGvisor()) {\n+ ASSERT_EQ(execve_errno, EINVAL);\n+ } else {\n+ ASSERT_EQ(execve_errno, 0);\n+\n+ int status;\n+ ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0),\n+ SyscallSucceedsWithValue(child));\n+ EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGSEGV) << status;\n+ }\n+}\n+\n// Linux will allow PT_LOAD segments to overlap.\nTEST(ElfTest, DirectlyOverlappingSegments) {\n// NOTE(b/37289926): see PIEOutOfOrderSegments.\n" } ]
Go
Apache License 2.0
google/gvisor
Fix inconsistencies in ELF anonymous mappings * A segment with filesz == 0, memsz > 0 should be an anonymous only mapping. We were failing to load such an ELF. * Anonymous pages are always mapped RW, regardless of the segment protections. PiperOrigin-RevId: 249355239 Change-Id: I251e5c0ce8848cf8420c3aadf337b0d77b1ad991
259,881
22.05.2019 11:14:29
25,200
69eac1198f3dae9a41ddf1903e9dda7972ed5d77
Move wait constants to abi/linux package Updates
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/BUILD", "new_path": "pkg/abi/linux/BUILD", "diff": "@@ -52,6 +52,7 @@ go_library(\n\"tty.go\",\n\"uio.go\",\n\"utsname.go\",\n+ \"wait.go\",\n],\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/abi/linux\",\nvisibility = [\"//visibility:public\"],\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/abi/linux/wait.go", "diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+// Options for waitpid(2), wait4(2), and/or waitid(2), from\n+// include/uapi/linux/wait.h.\n+const (\n+ WNOHANG = 0x00000001\n+ WUNTRACED = 0x00000002\n+ WSTOPPED = WUNTRACED\n+ WEXITED = 0x00000004\n+ WCONTINUED = 0x00000008\n+ WNOWAIT = 0x01000000\n+ WNOTHREAD = 0x20000000\n+ WALL = 0x40000000\n+ WCLONE = 0x80000000\n+)\n+\n+// ID types for waitid(2), from include/uapi/linux/wait.h.\n+const (\n+ P_ALL = 0x0\n+ P_PID = 0x1\n+ P_PGID = 0x2\n+)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_thread.go", "new_path": "pkg/sentry/syscalls/linux/sys_thread.go", "diff": "@@ -42,14 +42,6 @@ const (\nexitSignalMask = 0xff\n)\n-// Possible values for the idtype argument to waitid(2), defined in Linux's\n-// include/uapi/linux/wait.h.\n-const (\n- _P_ALL = 0\n- _P_PID = 1\n- _P_PGID = 2\n-)\n-\n// Getppid implements linux syscall getppid(2).\nfunc Getppid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\nparent := t.Parent()\n@@ -191,7 +183,7 @@ func Vfork(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n// wait4 waits for the given child process to exit.\nfunc wait4(t *kernel.Task, pid int, statusAddr usermem.Addr, options int, rusageAddr usermem.Addr) (uintptr, error) {\n- if options&^(syscall.WNOHANG|syscall.WUNTRACED|syscall.WCONTINUED|syscall.WALL|syscall.WCLONE) != 0 {\n+ if options&^(linux.WNOHANG|linux.WUNTRACED|linux.WCONTINUED|linux.WALL|linux.WCLONE) != 0 {\nreturn 0, syscall.EINVAL\n}\nwopts := kernel.WaitOptions{\n@@ -215,24 +207,24 @@ func wait4(t *kernel.Task, pid int, statusAddr usermem.Addr, options int, rusage\nwopts.SpecificTID = kernel.ThreadID(pid)\n}\n- switch options & (syscall.WCLONE | syscall.WALL) {\n+ switch options & (linux.WCLONE | linux.WALL) {\ncase 0:\nwopts.NonCloneTasks = true\n- case syscall.WCLONE:\n+ case linux.WCLONE:\nwopts.CloneTasks = true\n- case syscall.WALL:\n+ case linux.WALL:\nwopts.NonCloneTasks = true\nwopts.CloneTasks = true\ndefault:\nreturn 0, syscall.EINVAL\n}\n- if options&syscall.WUNTRACED != 0 {\n+ if options&linux.WUNTRACED != 0 {\nwopts.Events |= kernel.EventChildGroupStop\n}\n- if options&syscall.WCONTINUED != 0 {\n+ if options&linux.WCONTINUED != 0 {\nwopts.Events |= kernel.EventGroupContinue\n}\n- if options&syscall.WNOHANG == 0 {\n+ if options&linux.WNOHANG == 0 {\nwopts.BlockInterruptErr = kernel.ERESTARTSYS\n}\n@@ -286,36 +278,36 @@ func Waitid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\noptions := int(args[3].Uint())\nrusageAddr := args[4].Pointer()\n- if options&^(syscall.WNOHANG|syscall.WEXITED|syscall.WSTOPPED|syscall.WCONTINUED|syscall.WNOWAIT) != 0 {\n+ if options&^(linux.WNOHANG|linux.WEXITED|linux.WSTOPPED|linux.WCONTINUED|linux.WNOWAIT) != 0 {\nreturn 0, nil, syscall.EINVAL\n}\n- if options&(syscall.WEXITED|syscall.WSTOPPED|syscall.WCONTINUED) == 0 {\n+ if options&(linux.WEXITED|linux.WSTOPPED|linux.WCONTINUED) == 0 {\nreturn 0, nil, syscall.EINVAL\n}\nwopts := kernel.WaitOptions{\nNonCloneTasks: true,\nEvents: kernel.EventTraceeStop,\n- ConsumeEvent: options&syscall.WNOWAIT == 0,\n+ ConsumeEvent: options&linux.WNOWAIT == 0,\n}\nswitch idtype {\n- case _P_ALL:\n- case _P_PID:\n+ case linux.P_ALL:\n+ case linux.P_PID:\nwopts.SpecificTID = kernel.ThreadID(id)\n- case _P_PGID:\n+ case linux.P_PGID:\nwopts.SpecificPGID = kernel.ProcessGroupID(id)\ndefault:\nreturn 0, nil, syscall.EINVAL\n}\n- if options&syscall.WEXITED != 0 {\n+ if options&linux.WEXITED != 0 {\nwopts.Events |= kernel.EventExit\n}\n- if options&syscall.WSTOPPED != 0 {\n+ if options&linux.WSTOPPED != 0 {\nwopts.Events |= kernel.EventChildGroupStop\n}\n- if options&syscall.WCONTINUED != 0 {\n+ if options&linux.WCONTINUED != 0 {\nwopts.Events |= kernel.EventGroupContinue\n}\n- if options&syscall.WNOHANG == 0 {\n+ if options&linux.WNOHANG == 0 {\nwopts.BlockInterruptErr = kernel.ERESTARTSYS\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Move wait constants to abi/linux package Updates #214 PiperOrigin-RevId: 249483756 Change-Id: I0d3cf4112bed75a863d5eb08c2063fbc506cd875
259,881
22.05.2019 15:53:13
25,200
711290a7f6c434ddbfe401e46002afd30df26aa5
Add support for wait(WNOTHREAD)
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_exit.go", "new_path": "pkg/sentry/kernel/task_exit.go", "diff": "@@ -782,6 +782,10 @@ type WaitOptions struct {\n// for.\nCloneTasks bool\n+ // If SiblingChildren is true, events from children tasks of any task\n+ // in the thread group of the waiter are eligible to be waited for.\n+ SiblingChildren bool\n+\n// Events is a bitwise combination of the events defined above that specify\n// what events are of interest to the call to Wait.\nEvents waiter.EventMask\n@@ -869,9 +873,35 @@ func (t *Task) waitOnce(opts *WaitOptions) (*WaitResult, error) {\nt.tg.pidns.owner.mu.Lock()\ndefer t.tg.pidns.owner.mu.Unlock()\n- // Without the (unimplemented) __WNOTHREAD flag, a task can wait on the\n- // children and tracees of any task in the same thread group.\n+ if opts.SiblingChildren {\n+ // We can wait on the children and tracees of any task in the\n+ // same thread group.\nfor parent := t.tg.tasks.Front(); parent != nil; parent = parent.Next() {\n+ wr, any := t.waitParentLocked(opts, parent)\n+ if wr != nil {\n+ return wr, nil\n+ }\n+ anyWaitableTasks = anyWaitableTasks || any\n+ }\n+ } else {\n+ // We can only wait on this task.\n+ var wr *WaitResult\n+ wr, anyWaitableTasks = t.waitParentLocked(opts, t)\n+ if wr != nil {\n+ return wr, nil\n+ }\n+ }\n+\n+ if anyWaitableTasks {\n+ return nil, ErrNoWaitableEvent\n+ }\n+ return nil, syserror.ECHILD\n+}\n+\n+// Preconditions: The TaskSet mutex must be locked for writing.\n+func (t *Task) waitParentLocked(opts *WaitOptions, parent *Task) (*WaitResult, bool) {\n+ anyWaitableTasks := false\n+\nfor child := range parent.children {\nif !opts.matchesTask(child, parent.tg.pidns) {\ncontinue\n@@ -881,7 +911,7 @@ func (t *Task) waitOnce(opts *WaitOptions) (*WaitResult, error) {\nif opts.Events&EventExit != 0 && child == child.tg.leader && !child.exitParentAcked {\nanyWaitableTasks = true\nif wr := t.waitCollectZombieLocked(child, opts, false); wr != nil {\n- return wr, nil\n+ return wr, anyWaitableTasks\n}\n}\n// Check for group stops and continues. Tasks that have passed\n@@ -906,12 +936,12 @@ func (t *Task) waitOnce(opts *WaitOptions) (*WaitResult, error) {\nanyWaitableTasks = true\nif opts.Events&EventChildGroupStop != 0 {\nif wr := t.waitCollectChildGroupStopLocked(child, opts); wr != nil {\n- return wr, nil\n+ return wr, anyWaitableTasks\n}\n}\nif opts.Events&EventGroupContinue != 0 {\nif wr := t.waitCollectGroupContinueLocked(child, opts); wr != nil {\n- return wr, nil\n+ return wr, anyWaitableTasks\n}\n}\n}\n@@ -923,7 +953,7 @@ func (t *Task) waitOnce(opts *WaitOptions) (*WaitResult, error) {\nif opts.Events&EventExit != 0 && !tracee.exitTracerAcked {\nanyWaitableTasks = true\nif wr := t.waitCollectZombieLocked(tracee, opts, true); wr != nil {\n- return wr, nil\n+ return wr, anyWaitableTasks\n}\n}\nif opts.Events&(EventTraceeStop|EventGroupContinue) == 0 {\n@@ -935,21 +965,17 @@ func (t *Task) waitOnce(opts *WaitOptions) (*WaitResult, error) {\nanyWaitableTasks = true\nif opts.Events&EventTraceeStop != 0 {\nif wr := t.waitCollectTraceeStopLocked(tracee, opts); wr != nil {\n- return wr, nil\n+ return wr, anyWaitableTasks\n}\n}\nif opts.Events&EventGroupContinue != 0 {\nif wr := t.waitCollectGroupContinueLocked(tracee, opts); wr != nil {\n- return wr, nil\n- }\n+ return wr, anyWaitableTasks\n}\n}\n}\n- if anyWaitableTasks {\n- return nil, ErrNoWaitableEvent\n- }\n- return nil, syserror.ECHILD\n+ return nil, anyWaitableTasks\n}\n// Preconditions: The TaskSet mutex must be locked for writing.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_thread.go", "new_path": "pkg/sentry/syscalls/linux/sys_thread.go", "diff": "@@ -183,7 +183,7 @@ func Vfork(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n// wait4 waits for the given child process to exit.\nfunc wait4(t *kernel.Task, pid int, statusAddr usermem.Addr, options int, rusageAddr usermem.Addr) (uintptr, error) {\n- if options&^(linux.WNOHANG|linux.WUNTRACED|linux.WCONTINUED|linux.WALL|linux.WCLONE) != 0 {\n+ if options&^(linux.WNOHANG|linux.WUNTRACED|linux.WCONTINUED|linux.WNOTHREAD|linux.WALL|linux.WCLONE) != 0 {\nreturn 0, syscall.EINVAL\n}\nwopts := kernel.WaitOptions{\n@@ -227,6 +227,9 @@ func wait4(t *kernel.Task, pid int, statusAddr usermem.Addr, options int, rusage\nif options&linux.WNOHANG == 0 {\nwopts.BlockInterruptErr = kernel.ERESTARTSYS\n}\n+ if options&linux.WNOTHREAD == 0 {\n+ wopts.SiblingChildren = true\n+ }\nwr, err := t.Wait(&wopts)\nif err != nil {\n@@ -278,7 +281,7 @@ func Waitid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\noptions := int(args[3].Uint())\nrusageAddr := args[4].Pointer()\n- if options&^(linux.WNOHANG|linux.WEXITED|linux.WSTOPPED|linux.WCONTINUED|linux.WNOWAIT) != 0 {\n+ if options&^(linux.WNOHANG|linux.WEXITED|linux.WSTOPPED|linux.WCONTINUED|linux.WNOWAIT|linux.WNOTHREAD) != 0 {\nreturn 0, nil, syscall.EINVAL\n}\nif options&(linux.WEXITED|linux.WSTOPPED|linux.WCONTINUED) == 0 {\n@@ -310,6 +313,9 @@ func Waitid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\nif options&linux.WNOHANG == 0 {\nwopts.BlockInterruptErr = kernel.ERESTARTSYS\n}\n+ if options&linux.WNOTHREAD == 0 {\n+ wopts.SiblingChildren = true\n+ }\nwr, err := t.Wait(&wopts)\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -3179,7 +3179,9 @@ cc_binary(\n\"//test/util:signal_util\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n+ \"//test/util:thread_util\",\n\"@com_google_absl//absl/strings\",\n+ \"@com_google_absl//absl/synchronization\",\n\"@com_google_absl//absl/time\",\n\"@com_google_googletest//:gtest\",\n],\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/wait.cc", "new_path": "test/syscalls/linux/wait.cc", "diff": "#include <unistd.h>\n#include <functional>\n+#include <tuple>\n#include <vector>\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"absl/strings/str_cat.h\"\n+#include \"absl/synchronization/mutex.h\"\n#include \"absl/time/clock.h\"\n#include \"absl/time/time.h\"\n#include \"test/util/cleanup.h\"\n#include \"test/util/posix_error.h\"\n#include \"test/util/signal_util.h\"\n#include \"test/util/test_util.h\"\n+#include \"test/util/thread_util.h\"\nusing ::testing::UnorderedElementsAre;\n@@ -42,10 +45,8 @@ using ::testing::UnorderedElementsAre;\n//\n// NOTE(b/22640830,b/27680907,b/29049891): Some functionality is not tested as\n// it is not currently supported by gVisor:\n-// * UID in waitid(2) siginfo.\n// * Process groups.\n// * Core dump status (WCOREDUMP).\n-// * Linux only option __WNOTHREAD.\n//\n// Tests for waiting on stopped/continued children are in sigstop.cc.\n@@ -357,13 +358,22 @@ INSTANTIATE_TEST_SUITE_P(\nreturn static_cast<pid_t>(si.si_pid);\n}));\n-// Fixture for tests parameterized by a function that takes the PID of a\n-// specific child to wait for, waits for it to exit, and checks that it exits\n-// with the given code.\n+// Fixture for tests parameterized by a (sysno, function) tuple. The function\n+// takes the PID of a specific child to wait for, waits for it to exit, and\n+// checks that it exits with the given code.\nclass WaitSpecificChildTest\n- : public ::testing::TestWithParam<std::function<PosixError(pid_t, int)>> {\n+ : public ::testing::TestWithParam<\n+ std::tuple<int, std::function<PosixError(pid_t, int, int)>>> {\nprotected:\n- PosixError WaitFor(pid_t pid, int code) { return GetParam()(pid, code); }\n+ int Sysno() { return std::get<0>(GetParam()); }\n+\n+ PosixError WaitForWithOptions(pid_t pid, int options, int code) {\n+ return std::get<1>(GetParam())(pid, options, code);\n+ }\n+\n+ PosixError WaitFor(pid_t pid, int code) {\n+ return std::get<1>(GetParam())(pid, 0, code);\n+ }\n};\n// Wait for specific child to exit.\n@@ -432,6 +442,75 @@ TEST_P(WaitSpecificChildTest, AfterExit) {\nEXPECT_NO_ERRNO(WaitFor(child, 0));\n}\n+// Wait for child of sibling thread.\n+TEST_P(WaitSpecificChildTest, SiblingChildren) {\n+ absl::Mutex mu;\n+ pid_t child;\n+ bool ready = false;\n+ bool stop = false;\n+\n+ ScopedThread t([&] {\n+ absl::MutexLock ml(&mu);\n+ EXPECT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());\n+ ready = true;\n+ mu.Await(absl::Condition(&stop));\n+ });\n+\n+ // N.B. This must be declared after ScopedThread, so it is destructed first,\n+ // thus waking the thread.\n+ absl::MutexLock ml(&mu);\n+ mu.Await(absl::Condition(&ready));\n+\n+ EXPECT_NO_ERRNO(WaitFor(child, 0));\n+\n+ // Keep the sibling alive until after we've waited so the child isn't\n+ // reparented.\n+ stop = true;\n+}\n+\n+// Waiting for child of sibling thread not allowed with WNOTHREAD.\n+TEST_P(WaitSpecificChildTest, SiblingChildrenWNOTHREAD) {\n+ // Linux added WNOTHREAD support to waitid(2) in\n+ // 91c4e8ea8f05916df0c8a6f383508ac7c9e10dba (\"wait: allow sys_waitid() to\n+ // accept __WNOTHREAD/__WCLONE/__WALL\"). i.e., Linux 4.7.\n+ //\n+ // Skip the test if it isn't supported yet.\n+ if (Sysno() == SYS_waitid) {\n+ int ret = waitid(P_ALL, 0, nullptr, WEXITED | WNOHANG | __WNOTHREAD);\n+ SKIP_IF(ret < 0 && errno == EINVAL);\n+ }\n+\n+ absl::Mutex mu;\n+ pid_t child;\n+ bool ready = false;\n+ bool stop = false;\n+\n+ ScopedThread t([&] {\n+ absl::MutexLock ml(&mu);\n+ EXPECT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());\n+ ready = true;\n+ mu.Await(absl::Condition(&stop));\n+\n+ // This thread can wait on child.\n+ EXPECT_NO_ERRNO(WaitForWithOptions(child, __WNOTHREAD, 0));\n+ });\n+\n+ // N.B. This must be declared after ScopedThread, so it is destructed first,\n+ // thus waking the thread.\n+ absl::MutexLock ml(&mu);\n+ mu.Await(absl::Condition(&ready));\n+\n+ // This thread can't wait on child.\n+ EXPECT_THAT(\n+ WaitForWithOptions(child, __WNOTHREAD, 0),\n+ PosixErrorIs(ECHILD, ::testing::AnyOf(::testing::StrEq(\"waitid\"),\n+ ::testing::StrEq(\"wait4\"))));\n+\n+ // Keep the sibling alive until after we've waited so the child isn't\n+ // reparented.\n+ stop = true;\n+}\n+\n// Wait for specific child to exit.\n// A non-CLONE_THREAD child which sends SIGCHLD upon exit behaves much like\n// a forked process.\n@@ -551,55 +630,53 @@ TEST_P(WaitSpecificChildTest, AfterChildExecve) {\nEXPECT_NO_ERRNO(WaitFor(child, 0));\n}\n-INSTANTIATE_TEST_SUITE_P(\n- Waiters, WaitSpecificChildTest,\n- ::testing::Values(\n- [](pid_t pid, int code) -> PosixError {\n+PosixError CheckWait4(pid_t pid, int options, int code) {\nint status;\n- auto const rv = Wait4(pid, &status, 0, nullptr);\n+ auto const rv = Wait4(pid, &status, options, nullptr);\nMaybeSave();\nif (rv < 0) {\nreturn PosixError(errno, \"wait4\");\n} else if (rv != pid) {\n- return PosixError(EINVAL, absl::StrCat(\"unexpected pid: got \", rv,\n- \", wanted \", pid));\n+ return PosixError(\n+ EINVAL, absl::StrCat(\"unexpected pid: got \", rv, \", wanted \", pid));\n}\nif (!WIFEXITED(status) || WEXITSTATUS(status) != code) {\n- return PosixError(\n- EINVAL, absl::StrCat(\"unexpected wait status: got \", status,\n- \", wanted \", code));\n+ return PosixError(EINVAL, absl::StrCat(\"unexpected wait status: got \",\n+ status, \", wanted \", code));\n}\nreturn NoError();\n- },\n- [](pid_t pid, int code) -> PosixError {\n+};\n+\n+PosixError CheckWaitid(pid_t pid, int options, int code) {\nsiginfo_t si;\n- auto const rv = Waitid(P_PID, pid, &si, WEXITED);\n+ auto const rv = Waitid(P_PID, pid, &si, options | WEXITED);\nMaybeSave();\nif (rv < 0) {\nreturn PosixError(errno, \"waitid\");\n}\nif (si.si_pid != pid) {\n- return PosixError(EINVAL,\n- absl::StrCat(\"unexpected pid: got \", si.si_pid,\n+ return PosixError(EINVAL, absl::StrCat(\"unexpected pid: got \", si.si_pid,\n\", wanted \", pid));\n}\nif (si.si_signo != SIGCHLD) {\n- return PosixError(\n- EINVAL, absl::StrCat(\"unexpected signo: got \", si.si_signo,\n- \", wanted \", SIGCHLD));\n+ return PosixError(EINVAL, absl::StrCat(\"unexpected signo: got \",\n+ si.si_signo, \", wanted \", SIGCHLD));\n}\nif (si.si_status != code) {\n- return PosixError(\n- EINVAL, absl::StrCat(\"unexpected status: got \", si.si_status,\n- \", wanted \", code));\n+ return PosixError(EINVAL, absl::StrCat(\"unexpected status: got \",\n+ si.si_status, \", wanted \", code));\n}\nif (si.si_code != CLD_EXITED) {\n- return PosixError(EINVAL,\n- absl::StrCat(\"unexpected code: got \", si.si_code,\n+ return PosixError(EINVAL, absl::StrCat(\"unexpected code: got \", si.si_code,\n\", wanted \", CLD_EXITED));\n}\nreturn NoError();\n- }));\n+}\n+\n+INSTANTIATE_TEST_SUITE_P(\n+ Waiters, WaitSpecificChildTest,\n+ ::testing::Values(std::make_tuple(SYS_wait4, CheckWait4),\n+ std::make_tuple(SYS_waitid, CheckWaitid)));\n// WIFEXITED, WIFSIGNALED, WTERMSIG indicate signal exit.\nTEST(WaitTest, SignalExit) {\n" } ]
Go
Apache License 2.0
google/gvisor
Add support for wait(WNOTHREAD) PiperOrigin-RevId: 249537694 Change-Id: Iaa4bca73a2d8341e03064d59a2eb490afc3f80da
259,858
22.05.2019 16:59:21
25,200
21915eb58b875809b60c0a43e53a97ea0560c299
Remove obsolete TODO. There no obvious reason to require that BlockSize and StatFS are MountSource operations. Today they are in INodeOperations, and they can be moved elsewhere in the future as part of a normal refactor process.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/mount.go", "new_path": "pkg/sentry/fs/mount.go", "diff": "@@ -42,10 +42,6 @@ type DirentOperations interface {\n// MountSourceOperations contains filesystem specific operations.\ntype MountSourceOperations interface {\n- // TODO(b/67778729): Add:\n- // BlockSize() int64\n- // FS() Filesystem\n-\n// DirentOperations provide optional extra management of Dirents.\nDirentOperations\n" } ]
Go
Apache License 2.0
google/gvisor
Remove obsolete TODO. There no obvious reason to require that BlockSize and StatFS are MountSource operations. Today they are in INodeOperations, and they can be moved elsewhere in the future as part of a normal refactor process. PiperOrigin-RevId: 249549982 Change-Id: Ib832e02faeaf8253674475df4e385bcc53d780f3
259,881
22.05.2019 18:10:54
25,200
f65dfec09650768626a9af916b0487afa557a930
Add WCLONE / WALL support to waitid The previous commit adds WNOTHREAD support to waitid, so we may as well complete the upstream change. Linux added WCLONE, WALL, WNOTHREAD support to waitid(2) in ("wait: allow sys_waitid() to accept __WNOTHREAD/__WCLONE/__WALL"). i.e., Linux 4.7.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_thread.go", "new_path": "pkg/sentry/syscalls/linux/sys_thread.go", "diff": "@@ -181,6 +181,32 @@ func Vfork(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nreturn clone(t, syscall.CLONE_VM|syscall.CLONE_VFORK|int(syscall.SIGCHLD), 0, 0, 0, 0)\n}\n+// parseCommonWaitOptions applies the options common to wait4 and waitid to\n+// wopts.\n+func parseCommonWaitOptions(wopts *kernel.WaitOptions, options int) error {\n+ switch options & (linux.WCLONE | linux.WALL) {\n+ case 0:\n+ wopts.NonCloneTasks = true\n+ case linux.WCLONE:\n+ wopts.CloneTasks = true\n+ case linux.WALL:\n+ wopts.NonCloneTasks = true\n+ wopts.CloneTasks = true\n+ default:\n+ return syscall.EINVAL\n+ }\n+ if options&linux.WCONTINUED != 0 {\n+ wopts.Events |= kernel.EventGroupContinue\n+ }\n+ if options&linux.WNOHANG == 0 {\n+ wopts.BlockInterruptErr = kernel.ERESTARTSYS\n+ }\n+ if options&linux.WNOTHREAD == 0 {\n+ wopts.SiblingChildren = true\n+ }\n+ return nil\n+}\n+\n// wait4 waits for the given child process to exit.\nfunc wait4(t *kernel.Task, pid int, statusAddr usermem.Addr, options int, rusageAddr usermem.Addr) (uintptr, error) {\nif options&^(linux.WNOHANG|linux.WUNTRACED|linux.WCONTINUED|linux.WNOTHREAD|linux.WALL|linux.WCLONE) != 0 {\n@@ -207,29 +233,12 @@ func wait4(t *kernel.Task, pid int, statusAddr usermem.Addr, options int, rusage\nwopts.SpecificTID = kernel.ThreadID(pid)\n}\n- switch options & (linux.WCLONE | linux.WALL) {\n- case 0:\n- wopts.NonCloneTasks = true\n- case linux.WCLONE:\n- wopts.CloneTasks = true\n- case linux.WALL:\n- wopts.NonCloneTasks = true\n- wopts.CloneTasks = true\n- default:\n- return 0, syscall.EINVAL\n+ if err := parseCommonWaitOptions(&wopts, options); err != nil {\n+ return 0, err\n}\nif options&linux.WUNTRACED != 0 {\nwopts.Events |= kernel.EventChildGroupStop\n}\n- if options&linux.WCONTINUED != 0 {\n- wopts.Events |= kernel.EventGroupContinue\n- }\n- if options&linux.WNOHANG == 0 {\n- wopts.BlockInterruptErr = kernel.ERESTARTSYS\n- }\n- if options&linux.WNOTHREAD == 0 {\n- wopts.SiblingChildren = true\n- }\nwr, err := t.Wait(&wopts)\nif err != nil {\n@@ -281,14 +290,13 @@ func Waitid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\noptions := int(args[3].Uint())\nrusageAddr := args[4].Pointer()\n- if options&^(linux.WNOHANG|linux.WEXITED|linux.WSTOPPED|linux.WCONTINUED|linux.WNOWAIT|linux.WNOTHREAD) != 0 {\n+ if options&^(linux.WNOHANG|linux.WEXITED|linux.WSTOPPED|linux.WCONTINUED|linux.WNOWAIT|linux.WNOTHREAD|linux.WALL|linux.WCLONE) != 0 {\nreturn 0, nil, syscall.EINVAL\n}\nif options&(linux.WEXITED|linux.WSTOPPED|linux.WCONTINUED) == 0 {\nreturn 0, nil, syscall.EINVAL\n}\nwopts := kernel.WaitOptions{\n- NonCloneTasks: true,\nEvents: kernel.EventTraceeStop,\nConsumeEvent: options&linux.WNOWAIT == 0,\n}\n@@ -301,21 +309,16 @@ func Waitid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\ndefault:\nreturn 0, nil, syscall.EINVAL\n}\n+\n+ if err := parseCommonWaitOptions(&wopts, options); err != nil {\n+ return 0, nil, err\n+ }\nif options&linux.WEXITED != 0 {\nwopts.Events |= kernel.EventExit\n}\nif options&linux.WSTOPPED != 0 {\nwopts.Events |= kernel.EventChildGroupStop\n}\n- if options&linux.WCONTINUED != 0 {\n- wopts.Events |= kernel.EventGroupContinue\n- }\n- if options&linux.WNOHANG == 0 {\n- wopts.BlockInterruptErr = kernel.ERESTARTSYS\n- }\n- if options&linux.WNOTHREAD == 0 {\n- wopts.SiblingChildren = true\n- }\nwr, err := t.Wait(&wopts)\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/wait.cc", "new_path": "test/syscalls/linux/wait.cc", "diff": "@@ -233,18 +233,14 @@ TEST_P(WaitAnyChildTest, ForkAndClone) {\n// Return immediately if no child has exited.\nTEST_P(WaitAnyChildTest, WaitWNOHANG) {\n- EXPECT_THAT(\n- WaitAnyWithOptions(0, WNOHANG),\n- PosixErrorIs(ECHILD, ::testing::AnyOf(::testing::StrEq(\"waitid\"),\n- ::testing::StrEq(\"wait4\"))));\n+ EXPECT_THAT(WaitAnyWithOptions(0, WNOHANG),\n+ PosixErrorIs(ECHILD, ::testing::_));\n}\n// Bad options passed\nTEST_P(WaitAnyChildTest, BadOption) {\n- EXPECT_THAT(\n- WaitAnyWithOptions(0, 123456),\n- PosixErrorIs(EINVAL, ::testing::AnyOf(::testing::StrEq(\"waitid\"),\n- ::testing::StrEq(\"wait4\"))));\n+ EXPECT_THAT(WaitAnyWithOptions(0, 123456),\n+ PosixErrorIs(EINVAL, ::testing::_));\n}\nTEST_P(WaitAnyChildTest, WaitedChildRusage) {\n@@ -295,9 +291,7 @@ TEST_P(WaitAnyChildTest, IgnoredChildRusage) {\npid_t child;\nASSERT_THAT(child = ForkSpinAndExit(0, absl::ToInt64Seconds(kSpin)),\nSyscallSucceeds());\n- ASSERT_THAT(WaitAny(0), PosixErrorIs(ECHILD, ::testing::AnyOf(\n- ::testing::StrEq(\"waitid\"),\n- ::testing::StrEq(\"wait4\"))));\n+ ASSERT_THAT(WaitAny(0), PosixErrorIs(ECHILD, ::testing::_));\nconst absl::Duration end =\nabsl::Nanoseconds(clock_gettime_nsecs(CLOCK_MONOTONIC));\nEXPECT_GE(end - start, kSpin - kSpinGrace);\n@@ -501,10 +495,8 @@ TEST_P(WaitSpecificChildTest, SiblingChildrenWNOTHREAD) {\nmu.Await(absl::Condition(&ready));\n// This thread can't wait on child.\n- EXPECT_THAT(\n- WaitForWithOptions(child, __WNOTHREAD, 0),\n- PosixErrorIs(ECHILD, ::testing::AnyOf(::testing::StrEq(\"waitid\"),\n- ::testing::StrEq(\"wait4\"))));\n+ EXPECT_THAT(WaitForWithOptions(child, __WNOTHREAD, 0),\n+ PosixErrorIs(ECHILD, ::testing::_));\n// Keep the sibling alive until after we've waited so the child isn't\n// reparented.\n@@ -538,10 +530,7 @@ TEST_P(WaitSpecificChildTest, CloneNoSIGCHLD) {\nint child;\nASSERT_THAT(child = CloneAndExit(0, stack, 0), SyscallSucceeds());\n- EXPECT_THAT(\n- WaitFor(child, 0),\n- PosixErrorIs(ECHILD, ::testing::AnyOf(::testing::StrEq(\"waitid\"),\n- ::testing::StrEq(\"wait4\"))));\n+ EXPECT_THAT(WaitFor(child, 0), PosixErrorIs(ECHILD, ::testing::_));\n}\n// Waiting after the child has already exited returns immediately.\n@@ -571,10 +560,7 @@ TEST_P(WaitSpecificChildTest, CloneThread) {\nASSERT_THAT(child = CloneAndExit(15, stack, CLONE_THREAD), SyscallSucceeds());\nauto start = absl::Now();\n- EXPECT_THAT(\n- WaitFor(child, 0),\n- PosixErrorIs(ECHILD, ::testing::AnyOf(::testing::StrEq(\"waitid\"),\n- ::testing::StrEq(\"wait4\"))));\n+ EXPECT_THAT(WaitFor(child, 0), PosixErrorIs(ECHILD, ::testing::_));\n// Ensure wait4 didn't block.\nEXPECT_LE(absl::Now() - start, absl::Seconds(10));\n@@ -584,12 +570,81 @@ TEST_P(WaitSpecificChildTest, CloneThread) {\nabsl::SleepFor(absl::Seconds(5));\n}\n+// A child that does not send a SIGCHLD on exit may be waited on with\n+// the __WCLONE flag.\n+TEST_P(WaitSpecificChildTest, CloneWCLONE) {\n+ // Linux added WCLONE support to waitid(2) in\n+ // 91c4e8ea8f05916df0c8a6f383508ac7c9e10dba (\"wait: allow sys_waitid() to\n+ // accept __WNOTHREAD/__WCLONE/__WALL\"). i.e., Linux 4.7.\n+ //\n+ // Skip the test if it isn't supported yet.\n+ if (Sysno() == SYS_waitid) {\n+ int ret = waitid(P_ALL, 0, nullptr, WEXITED | WNOHANG | __WCLONE);\n+ SKIP_IF(ret < 0 && errno == EINVAL);\n+ }\n+\n+ uintptr_t stack;\n+ ASSERT_THAT(stack = AllocStack(), SyscallSucceeds());\n+ auto free =\n+ Cleanup([stack] { ASSERT_THAT(FreeStack(stack), SyscallSucceeds()); });\n+\n+ int child;\n+ ASSERT_THAT(child = CloneAndExit(0, stack, 0), SyscallSucceeds());\n+\n+ EXPECT_NO_ERRNO(WaitForWithOptions(child, __WCLONE, 0));\n+}\n+\n+// A forked child cannot be waited on with WCLONE.\n+TEST_P(WaitSpecificChildTest, ForkWCLONE) {\n+ // Linux added WCLONE support to waitid(2) in\n+ // 91c4e8ea8f05916df0c8a6f383508ac7c9e10dba (\"wait: allow sys_waitid() to\n+ // accept __WNOTHREAD/__WCLONE/__WALL\"). i.e., Linux 4.7.\n+ //\n+ // Skip the test if it isn't supported yet.\n+ if (Sysno() == SYS_waitid) {\n+ int ret = waitid(P_ALL, 0, nullptr, WEXITED | WNOHANG | __WCLONE);\n+ SKIP_IF(ret < 0 && errno == EINVAL);\n+ }\n+\n+ pid_t child;\n+ ASSERT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());\n+\n+ EXPECT_THAT(WaitForWithOptions(child, WNOHANG | __WCLONE, 0),\n+ PosixErrorIs(ECHILD, ::testing::_));\n+\n+ EXPECT_NO_ERRNO(WaitFor(child, 0));\n+}\n+\n+// Any type of child can be waited on with WALL.\n+TEST_P(WaitSpecificChildTest, WALL) {\n+ // Linux added WALL support to waitid(2) in\n+ // 91c4e8ea8f05916df0c8a6f383508ac7c9e10dba (\"wait: allow sys_waitid() to\n+ // accept __WNOTHREAD/__WCLONE/__WALL\"). i.e., Linux 4.7.\n+ //\n+ // Skip the test if it isn't supported yet.\n+ if (Sysno() == SYS_waitid) {\n+ int ret = waitid(P_ALL, 0, nullptr, WEXITED | WNOHANG | __WALL);\n+ SKIP_IF(ret < 0 && errno == EINVAL);\n+ }\n+\n+ pid_t child;\n+ ASSERT_THAT(child = ForkAndExit(0, 0), SyscallSucceeds());\n+\n+ EXPECT_NO_ERRNO(WaitForWithOptions(child, __WALL, 0));\n+\n+ uintptr_t stack;\n+ ASSERT_THAT(stack = AllocStack(), SyscallSucceeds());\n+ auto free =\n+ Cleanup([stack] { ASSERT_THAT(FreeStack(stack), SyscallSucceeds()); });\n+\n+ ASSERT_THAT(child = CloneAndExit(0, stack, 0), SyscallSucceeds());\n+\n+ EXPECT_NO_ERRNO(WaitForWithOptions(child, __WALL, 0));\n+}\n+\n// Return ECHILD for bad child.\nTEST_P(WaitSpecificChildTest, BadChild) {\n- EXPECT_THAT(\n- WaitFor(42, 0),\n- PosixErrorIs(ECHILD, ::testing::AnyOf(::testing::StrEq(\"waitid\"),\n- ::testing::StrEq(\"wait4\"))));\n+ EXPECT_THAT(WaitFor(42, 0), PosixErrorIs(ECHILD, ::testing::_));\n}\n// Wait for a child process that only exits after calling execve(2) from a\n@@ -694,21 +749,6 @@ TEST(WaitTest, SignalExit) {\nEXPECT_EQ(SIGKILL, WTERMSIG(status));\n}\n-// A child that does not send a SIGCHLD on exit may be waited on with\n-// the __WCLONE flag.\n-TEST(WaitTest, CloneWCLONE) {\n- uintptr_t stack;\n- ASSERT_THAT(stack = AllocStack(), SyscallSucceeds());\n- auto free =\n- Cleanup([stack] { ASSERT_THAT(FreeStack(stack), SyscallSucceeds()); });\n-\n- int child;\n- ASSERT_THAT(child = CloneAndExit(0, stack, 0), SyscallSucceeds());\n-\n- EXPECT_THAT(Wait4(child, nullptr, __WCLONE, nullptr),\n- SyscallSucceedsWithValue(child));\n-}\n-\n// waitid requires at least one option.\nTEST(WaitTest, WaitidOptions) {\nEXPECT_THAT(Waitid(P_ALL, 0, nullptr, 0), SyscallFailsWithErrno(EINVAL));\n" } ]
Go
Apache License 2.0
google/gvisor
Add WCLONE / WALL support to waitid The previous commit adds WNOTHREAD support to waitid, so we may as well complete the upstream change. Linux added WCLONE, WALL, WNOTHREAD support to waitid(2) in 91c4e8ea8f05916df0c8a6f383508ac7c9e10dba ("wait: allow sys_waitid() to accept __WNOTHREAD/__WCLONE/__WALL"). i.e., Linux 4.7. PiperOrigin-RevId: 249560587 Change-Id: Iff177b0848a3f7bae6cb5592e44500c5a942fbeb
259,858
22.05.2019 18:18:01
25,200
79738d3958a027bcf449cf1bd608f3adec42b72c
Log unhandled faults only at DEBUG level.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_run.go", "new_path": "pkg/sentry/kernel/task_run.go", "diff": "@@ -26,7 +26,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/memmap\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n- \"gvisor.googlesource.com/gvisor/pkg/syserror\"\n)\n// A taskRunState is a reified state in the task state machine. See README.md\n@@ -267,13 +266,8 @@ func (*runApp) execute(t *Task) taskRunState {\n}\n}\n- // The JVM will trigger these errors constantly, so don't\n- // spam logs with this error.\n- if err == syserror.EFAULT || err == syserror.EPERM {\n+ // Faults are common, log only at debug level.\nt.Debugf(\"Unhandled user fault: addr=%x ip=%x access=%v err=%v\", addr, t.Arch().IP(), at, err)\n- } else {\n- t.Warningf(\"Unhandled user fault: addr=%x ip=%x access=%v err=%v\", addr, t.Arch().IP(), at, err)\n- }\nt.DebugDumpState()\n// Continue to signal handling.\n" } ]
Go
Apache License 2.0
google/gvisor
Log unhandled faults only at DEBUG level. PiperOrigin-RevId: 249561399 Change-Id: Ic73c68c8538bdca53068f38f82b7260939addac2
259,962
22.05.2019 18:56:18
25,200
022bd0fd1091a29a41fa4c065ac35e45e3d6c576
Fix the signature for gopark. gopark's signature was changed from having a string reason to a uint8. See: This broke execution tracing of the sentry. Switching to the right signature makes tracing work again. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sleep/sleep_unsafe.go", "new_path": "pkg/sleep/sleep_unsafe.go", "diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n+// +build go1.11\n+// +build !go1.13\n+\n// Package sleep allows goroutines to efficiently sleep on multiple sources of\n// notifications (wakers). It offers O(1) complexity, which is different from\n// multi-channel selects which have O(n) complexity (where n is the number of\n@@ -85,7 +88,7 @@ var (\n)\n//go:linkname gopark runtime.gopark\n-func gopark(unlockf func(uintptr, *uintptr) bool, wg *uintptr, reason string, traceEv byte, traceskip int)\n+func gopark(unlockf func(uintptr, *uintptr) bool, wg *uintptr, reason uint8, traceEv byte, traceskip int)\n//go:linkname goready runtime.goready\nfunc goready(g uintptr, traceskip int)\n@@ -179,7 +182,10 @@ func (s *Sleeper) nextWaker(block bool) *Waker {\n// commitSleep to decide whether to immediately\n// wake the caller up or to leave it sleeping.\nconst traceEvGoBlockSelect = 24\n- gopark(commitSleep, &s.waitingG, \"sleeper\", traceEvGoBlockSelect, 0)\n+ // See:runtime2.go in the go runtime package for\n+ // the values to pass as the waitReason here.\n+ const waitReasonSelect = 9\n+ gopark(commitSleep, &s.waitingG, waitReasonSelect, traceEvGoBlockSelect, 0)\n}\n// Pull the shared list out and reverse it in the local\n" } ]
Go
Apache License 2.0
google/gvisor
Fix the signature for gopark. gopark's signature was changed from having a string reason to a uint8. See: https://github.com/golang/go/commit/4d7cf3fedbc382215df5ff6167ee9782a9cc9375 This broke execution tracing of the sentry. Switching to the right signature makes tracing work again. Updates #220 PiperOrigin-RevId: 249565311 Change-Id: If77fd276cecb37d4003c8222f6de510b8031a074
259,992
23.05.2019 06:46:55
25,200
c091e6236922f1e9af7afbe811fd03ec297aae16
Set sticky bit to /tmp This is generally done for '/tmp' to prevent accidental deletion of files. More details here:
[ { "change_type": "MODIFY", "old_path": "runsc/boot/fs.go", "new_path": "runsc/boot/fs.go", "diff": "@@ -290,7 +290,7 @@ func mountSubmounts(ctx context.Context, conf *Config, mns *fs.MountNamespace, r\n}\n}\n- if err := mountTmp(ctx, conf, mns, root, fds, mounts); err != nil {\n+ if err := mountTmp(ctx, conf, mns, root, mounts); err != nil {\nreturn fmt.Errorf(\"mount submount %q: %v\", \"tmp\", err)\n}\n@@ -551,9 +551,8 @@ func subtargets(root string, mnts []specs.Mount) []string {\nfunc setupContainerFS(procArgs *kernel.CreateProcessArgs, spec *specs.Spec, conf *Config, stdioFDs, goferFDs []int, console bool, creds *auth.Credentials, ls *limits.LimitSet, k *kernel.Kernel, cid string) error {\nctx := procArgs.NewContext(k)\n- // Create the FD map, which will set stdin, stdout, and stderr. If\n- // console is true, then ioctl calls will be passed through to the host\n- // fd.\n+ // Create the FD map, which will set stdin, stdout, and stderr. If console\n+ // is true, then ioctl calls will be passed through to the host fd.\nfdm, err := createFDMap(ctx, k, ls, console, stdioFDs)\nif err != nil {\nreturn fmt.Errorf(\"importing fds: %v\", err)\n@@ -725,7 +724,7 @@ func destroyContainerFS(ctx context.Context, cid string, k *kernel.Kernel) error\n//\n// Note that when there are submounts inside of '/tmp', directories for the\n// mount points must be present, making '/tmp' not empty anymore.\n-func mountTmp(ctx context.Context, conf *Config, mns *fs.MountNamespace, root *fs.Dirent, fds *fdDispenser, mounts []specs.Mount) error {\n+func mountTmp(ctx context.Context, conf *Config, mns *fs.MountNamespace, root *fs.Dirent, mounts []specs.Mount) error {\nfor _, m := range mounts {\nif filepath.Clean(m.Destination) == \"/tmp\" {\nlog.Debugf(\"Explict %q mount found, skipping internal tmpfs, mount: %+v\", \"/tmp\", m)\n@@ -763,8 +762,11 @@ func mountTmp(ctx context.Context, conf *Config, mns *fs.MountNamespace, root *f\ntmpMount := specs.Mount{\nType: tmpfs,\nDestination: \"/tmp\",\n+ // Sticky bit is added to prevent accidental deletion of files from\n+ // another user. This is normally done for /tmp.\n+ Options: []string{\"mode=1777\"},\n}\n- return mountSubmount(ctx, conf, mns, root, fds, tmpMount, mounts)\n+ return mountSubmount(ctx, conf, mns, root, nil, tmpMount, mounts)\ndefault:\nreturn err\n" } ]
Go
Apache License 2.0
google/gvisor
Set sticky bit to /tmp This is generally done for '/tmp' to prevent accidental deletion of files. More details here: http://man7.org/linux/man-pages/man1/chmod.1.html#RESTRICTED_DELETION_FLAG_OR_STICKY_BIT PiperOrigin-RevId: 249633207 Change-Id: I444a5b406fdef664f5677b2f20f374972613a02b
259,907
23.05.2019 16:54:38
25,200
6240abb205f9e5cdbad1c864dbed345d92f04b09
Added boilerplate code for ext4 fs. Initialized BUILD with license Mount is still unimplemented and is not meant to be part of this CL. Rest of the fs interface is implemented. Referenced the Linux kernel appropriately when needed
[ { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/fs/ext4/BUILD", "diff": "+package(licenses = [\"notice\"])\n+\n+load(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+\n+go_library(\n+ name = \"ext4\",\n+ srcs = [\"fs.go\"],\n+ importpath = \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/ext4\",\n+ visibility = [\"//pkg/sentry:internal\"],\n+ deps = [\n+ \"//pkg/sentry/context\",\n+ \"//pkg/sentry/fs\",\n+ ],\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/fs/ext4/fs.go", "diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package ext4 implements the ext4 filesystem.\n+package ext4\n+\n+import (\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n+)\n+\n+// filesystem implements fs.Filesystem for ext4.\n+//\n+// +stateify savable\n+type filesystem struct{}\n+\n+func init() {\n+ fs.RegisterFilesystem(&filesystem{})\n+}\n+\n+// FilesystemName is the name under which the filesystem is registered.\n+// Name matches fs/ext4/super.c:ext4_fs_type.name.\n+const FilesystemName = \"ext4\"\n+\n+// Name is the name of the file system.\n+func (*filesystem) Name() string {\n+ return FilesystemName\n+}\n+\n+// AllowUserMount prohibits users from using mount(2) with this file system.\n+func (*filesystem) AllowUserMount() bool {\n+ return false\n+}\n+\n+// AllowUserList prohibits this filesystem to be listed in /proc/filesystems.\n+func (*filesystem) AllowUserList() bool {\n+ return false\n+}\n+\n+// Flags returns properties of the filesystem.\n+//\n+// In Linux, ext4 returns FS_REQUIRES_DEV. See fs/ext4/super.c\n+func (*filesystem) Flags() fs.FilesystemFlags {\n+ return fs.FilesystemRequiresDev\n+}\n+\n+// Mount returns the root inode of the ext4 fs.\n+func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string, cgroupsInt interface{}) (*fs.Inode, error) {\n+ panic(\"unimplemented\")\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Added boilerplate code for ext4 fs. Initialized BUILD with license Mount is still unimplemented and is not meant to be part of this CL. Rest of the fs interface is implemented. Referenced the Linux kernel appropriately when needed PiperOrigin-RevId: 249741997 Change-Id: Id1e4c7c9e68b3f6946da39896fc6a0c3dcd7f98c
259,853
23.05.2019 22:27:36
25,200
409e8eea60f096b34c9005b302dc821f38ac19ed
runsc/do: do a proper cleanup if a command failed due to internal errors Fatalf calls os.Exit and a process exits without calling defer callbacks. Should we do this for other runsc commands?
[ { "change_type": "MODIFY", "old_path": "runsc/cmd/cmd.go", "new_path": "runsc/cmd/cmd.go", "diff": "@@ -22,19 +22,26 @@ import (\n\"strconv\"\n\"syscall\"\n+ \"github.com/google/subcommands\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n)\n-// Fatalf logs to stderr and exits with a failure status code.\n-func Fatalf(s string, args ...interface{}) {\n+// Errorf logs to stderr and returns subcommands.ExitFailure.\n+func Errorf(s string, args ...interface{}) subcommands.ExitStatus {\n// If runsc is being invoked by docker or cri-o, then we might not have\n// access to stderr, so we log a serious-looking warning in addition to\n// writing to stderr.\nlog.Warningf(\"FATAL ERROR: \"+s, args...)\nfmt.Fprintf(os.Stderr, s+\"\\n\", args...)\n// Return an error that is unlikely to be used by the application.\n+ return subcommands.ExitFailure\n+}\n+\n+// Fatalf logs to stderr and exits with a failure status code.\n+func Fatalf(s string, args ...interface{}) {\n+ Errorf(s, args...)\nos.Exit(128)\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/cmd/do.go", "new_path": "runsc/cmd/do.go", "diff": "@@ -89,16 +89,16 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\nhostname, err := os.Hostname()\nif err != nil {\n- Fatalf(\"Error to retrieve hostname: %v\", err)\n+ return Errorf(\"Error to retrieve hostname: %v\", err)\n}\nabsRoot, err := resolvePath(c.root)\nif err != nil {\n- Fatalf(\"Error resolving root: %v\", err)\n+ return Errorf(\"Error resolving root: %v\", err)\n}\nabsCwd, err := resolvePath(c.cwd)\nif err != nil {\n- Fatalf(\"Error resolving current directory: %v\", err)\n+ return Errorf(\"Error resolving current directory: %v\", err)\n}\nspec := &specs.Spec{\n@@ -121,18 +121,18 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\nif conf.Network != boot.NetworkNone {\nclean, err := c.setupNet(cid, spec)\nif err != nil {\n- Fatalf(\"Error setting up network: %v\", err)\n+ return Errorf(\"Error setting up network: %v\", err)\n}\ndefer clean()\n}\nout, err := json.Marshal(spec)\nif err != nil {\n- Fatalf(\"Error to marshal spec: %v\", err)\n+ return Errorf(\"Error to marshal spec: %v\", err)\n}\ntmpDir, err := ioutil.TempDir(\"\", \"runsc-do\")\nif err != nil {\n- Fatalf(\"Error to create tmp dir: %v\", err)\n+ return Errorf(\"Error to create tmp dir: %v\", err)\n}\ndefer os.RemoveAll(tmpDir)\n@@ -141,12 +141,12 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\ncfgPath := filepath.Join(tmpDir, \"config.json\")\nif err := ioutil.WriteFile(cfgPath, out, 0755); err != nil {\n- Fatalf(\"Error write spec: %v\", err)\n+ return Errorf(\"Error write spec: %v\", err)\n}\nws, err := container.Run(cid, spec, conf, tmpDir, \"\", \"\", \"\", false)\nif err != nil {\n- Fatalf(\"running container: %v\", err)\n+ return Errorf(\"running container: %v\", err)\n}\n*waitStatus = ws\n" } ]
Go
Apache License 2.0
google/gvisor
runsc/do: do a proper cleanup if a command failed due to internal errors Fatalf calls os.Exit and a process exits without calling defer callbacks. Should we do this for other runsc commands? PiperOrigin-RevId: 249776310 Change-Id: If9d8b54d0ae37db443895906eb33bd9e9b600cc9
259,853
23.05.2019 23:20:11
25,200
a949133c4b22a87c79310b2d825f2899028d6088
gvisor: interrupt the sendfile system call if a task has been interrupted sendfile can be called for a big range and it can require significant amount of time to process it, so we need to handle task interrupts in this system call.
[ { "change_type": "MODIFY", "old_path": "pkg/amutex/amutex.go", "new_path": "pkg/amutex/amutex.go", "diff": "@@ -33,6 +33,9 @@ type Sleeper interface {\n// SleepFinish is called by AbortableMutex.Lock() once a contended mutex\n// is acquired or the wait is aborted.\nSleepFinish(success bool)\n+\n+ // Interrupted returns true if the wait is aborted.\n+ Interrupted() bool\n}\n// NoopSleeper is a stateless no-op implementation of Sleeper for anonymous\n@@ -47,6 +50,9 @@ func (NoopSleeper) SleepStart() <-chan struct{} {\n// SleepFinish implements Sleeper.SleepFinish.\nfunc (NoopSleeper) SleepFinish(success bool) {}\n+// Interrupted implements Sleeper.Interrupted.\n+func (NoopSleeper) Interrupted() bool { return false }\n+\n// AbortableMutex is an abortable mutex. It allows Lock() to be aborted while it\n// waits to acquire the mutex.\ntype AbortableMutex struct {\n" }, { "change_type": "MODIFY", "old_path": "pkg/amutex/amutex_test.go", "new_path": "pkg/amutex/amutex_test.go", "diff": "@@ -31,6 +31,10 @@ func (s *sleeper) SleepStart() <-chan struct{} {\nfunc (*sleeper) SleepFinish(bool) {\n}\n+func (s *sleeper) Interrupted() bool {\n+ return len(s.ch) != 0\n+}\n+\nfunc TestMutualExclusion(t *testing.T) {\nvar m AbortableMutex\nm.Init()\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/file.go", "new_path": "pkg/sentry/fs/file.go", "diff": "@@ -516,12 +516,18 @@ type lockedReader struct {\n// Read implements io.Reader.Read.\nfunc (r *lockedReader) Read(buf []byte) (int, error) {\n+ if r.Ctx.Interrupted() {\n+ return 0, syserror.ErrInterrupted\n+ }\nn, err := r.File.FileOperations.Read(r.Ctx, r.File, usermem.BytesIOSequence(buf), r.File.offset)\nreturn int(n), err\n}\n// ReadAt implements io.Reader.ReadAt.\nfunc (r *lockedReader) ReadAt(buf []byte, offset int64) (int, error) {\n+ if r.Ctx.Interrupted() {\n+ return 0, syserror.ErrInterrupted\n+ }\nn, err := r.File.FileOperations.Read(r.Ctx, r.File, usermem.BytesIOSequence(buf), offset)\nreturn int(n), err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/pipe/node_test.go", "new_path": "pkg/sentry/kernel/pipe/node_test.go", "diff": "@@ -48,6 +48,10 @@ func (s *sleeper) Cancel() {\ns.ch <- struct{}{}\n}\n+func (s *sleeper) Interrupted() bool {\n+ return len(s.ch) != 0\n+}\n+\ntype openResult struct {\n*fs.File\nerror\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_block.go", "new_path": "pkg/sentry/kernel/task_block.go", "diff": "@@ -158,6 +158,11 @@ func (t *Task) SleepFinish(success bool) {\nt.Activate()\n}\n+// Interrupted implements amutex.Sleeper.Interrupted\n+func (t *Task) Interrupted() bool {\n+ return len(t.interruptChan) != 0\n+}\n+\n// UninterruptibleSleepStart implements context.Context.UninterruptibleSleepStart.\nfunc (t *Task) UninterruptibleSleepStart(deactivate bool) {\nif deactivate {\n" } ]
Go
Apache License 2.0
google/gvisor
gvisor: interrupt the sendfile system call if a task has been interrupted sendfile can be called for a big range and it can require significant amount of time to process it, so we need to handle task interrupts in this system call. PiperOrigin-RevId: 249781023 Change-Id: Ifc2ec505d74c06f5ee76f93b8d30d518ec2d4015