author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
259,992 | 27.08.2018 20:35:00 | 25,200 | ae648bafda2d82a6641e4a28bed34dae40d426ec | Add command-line parameter to trigger panic on signal
This is to troubleshoot problems with a hung process that is
not responding to 'runsc debug --stack' command. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/sighandling/sighandling.go",
"new_path": "pkg/sentry/sighandling/sighandling.go",
"diff": "@@ -103,7 +103,7 @@ func forwardSignals(k *kernel.Kernel, sigchans []chan os.Signal, start, stop cha\n// PrepareForwarding ensures that synchronous signals are forwarded to k and\n// returns a callback that starts signal delivery, which itself returns a\n// callback that stops signal forwarding.\n-func PrepareForwarding(k *kernel.Kernel, enablePanicSignal bool) func() func() {\n+func PrepareForwarding(k *kernel.Kernel, skipSignal syscall.Signal) func() func() {\nstart := make(chan struct{})\nstop := make(chan struct{})\n@@ -119,8 +119,7 @@ func PrepareForwarding(k *kernel.Kernel, enablePanicSignal bool) func() func() {\nsigchan := make(chan os.Signal, 1)\nsigchans = append(sigchans, sigchan)\n- // SignalPanic is handled by Run.\n- if enablePanicSignal && linux.Signal(sig) == kernel.SignalPanic {\n+ if syscall.Signal(sig) == skipSignal {\ncontinue\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/config.go",
"new_path": "runsc/boot/config.go",
"diff": "@@ -204,7 +204,12 @@ type Config struct {\n// TODO: Remove this when multiple container is fully supported.\nMultiContainer bool\n+ // WatchdogAction sets what action the watchdog takes when triggered.\nWatchdogAction watchdog.Action\n+\n+ // PanicSignal register signal handling that panics. Usually set to\n+ // SIGUSR2(12) to troubleshoot hangs. -1 disables it.\n+ PanicSignal int\n}\n// ToFlags returns a slice of flags that correspond to the given Config.\n@@ -225,5 +230,6 @@ func (c *Config) ToFlags() []string {\n\"--strace-syscalls=\" + strings.Join(c.StraceSyscalls, \",\"),\n\"--strace-log-size=\" + strconv.Itoa(int(c.StraceLogSize)),\n\"--watchdog-action=\" + c.WatchdogAction.String(),\n+ \"--panic-signal=\" + strconv.Itoa(c.PanicSignal),\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -20,6 +20,7 @@ import (\n\"fmt\"\n\"math/rand\"\n\"os\"\n+ \"os/signal\"\n\"runtime\"\n\"sync\"\n\"sync/atomic\"\n@@ -229,7 +230,18 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\nreturn nil, fmt.Errorf(\"failed to ignore child stop signals: %v\", err)\n}\n// Ensure that signals received are forwarded to the emulated kernel.\n- stopSignalForwarding := sighandling.PrepareForwarding(k, false)()\n+ ps := syscall.Signal(conf.PanicSignal)\n+ stopSignalForwarding := sighandling.PrepareForwarding(k, ps)()\n+ if conf.PanicSignal != -1 {\n+ // Panics if the sentry receives 'conf.PanicSignal'.\n+ panicChan := make(chan os.Signal, 1)\n+ signal.Notify(panicChan, ps)\n+ go func() { // S/R-SAFE: causes sentry panic.\n+ <-panicChan\n+ panic(\"Signal-induced panic\")\n+ }()\n+ log.Infof(\"Panic signal set to %v(%d)\", ps, conf.PanicSignal)\n+ }\nprocArgs, err := newProcess(spec, creds, utsns, ipcns, k)\nif err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/debug.go",
"new_path": "runsc/cmd/debug.go",
"diff": "package cmd\nimport (\n+ \"syscall\"\n+\n\"context\"\n\"flag\"\n\"github.com/google/subcommands\"\n@@ -27,6 +29,7 @@ import (\ntype Debug struct {\npid int\nstacks bool\n+ signal int\n}\n// Name implements subcommands.Command.\n@@ -48,6 +51,7 @@ func (*Debug) Usage() string {\nfunc (d *Debug) SetFlags(f *flag.FlagSet) {\nf.IntVar(&d.pid, \"pid\", 0, \"sandbox process ID. Container ID is not necessary if this is set\")\nf.BoolVar(&d.stacks, \"stacks\", false, \"if true, dumps all sandbox stacks to the log\")\n+ f.IntVar(&d.signal, \"signal\", -1, \"sends signal to the sandbox\")\n}\n// Execute implements subcommands.Command.Execute.\n@@ -96,6 +100,12 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nFatalf(\"sandbox %q is not running\", c.Sandbox.ID)\n}\n+ if d.signal > 0 {\n+ log.Infof(\"Sending signal %d to process: %d\", d.signal, c.Sandbox.Pid)\n+ if err := syscall.Kill(c.Sandbox.Pid, syscall.Signal(d.signal)); err != nil {\n+ Fatalf(\"failed to send signal %d to processs %d\", d.signal, c.Sandbox.Pid)\n+ }\n+ }\nif d.stacks {\nlog.Infof(\"Retrieving sandbox stacks\")\nstacks, err := c.Sandbox.Stacks()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/main.go",
"new_path": "runsc/main.go",
"diff": "@@ -61,6 +61,7 @@ var (\noverlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable overlay. All modifications are stored in memory inside the sandbox.\")\nmultiContainer = flag.Bool(\"multi-container\", false, \"enable *experimental* multi-container support.\")\nwatchdogAction = flag.String(\"watchdog-action\", \"log\", \"sets what action the watchdog takes when triggered: log (default), panic.\")\n+ panicSignal = flag.Int(\"panic-signal\", -1, \"register signal handling that panics. Usually set to SIGUSR2(12) to troubleshoot hangs. -1 disables it.\")\n)\nvar gitRevision = \"\"\n@@ -139,6 +140,7 @@ func main() {\nStraceLogSize: *straceLogSize,\nMultiContainer: *multiContainer,\nWatchdogAction: wa,\n+ PanicSignal: *panicSignal,\n}\nif len(*straceSyscalls) != 0 {\nconf.StraceSyscalls = strings.Split(*straceSyscalls, \",\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add command-line parameter to trigger panic on signal
This is to troubleshoot problems with a hung process that is
not responding to 'runsc debug --stack' command.
PiperOrigin-RevId: 210483513
Change-Id: I4377b210b4e51bc8a281ad34fd94f3df13d9187d |
259,948 | 28.08.2018 09:20:05 | 25,200 | d08ccdaaad0ff00622321957aa01cac149741005 | sentry: avoid double counting map objects in save / restore stats. | [
{
"change_type": "MODIFY",
"old_path": "pkg/state/decode.go",
"new_path": "pkg/state/decode.go",
"diff": "@@ -365,6 +365,12 @@ func (ds *decodeState) decodeObject(os *objectState, obj reflect.Value, object *\n// (For non-interfaces this is a no-op).\ndyntyp := reflect.TypeOf(obj.Interface())\nif dyntyp.Kind() == reflect.Map {\n+ // Remove the map object count here to avoid\n+ // double counting, as this object will be\n+ // counted again when it gets processed later.\n+ // We do not add a reference count as the\n+ // reference is artificial.\n+ ds.stats.Remove(obj)\nobj.Set(ds.register(id, dyntyp).obj)\n} else if dyntyp.Kind() == reflect.Ptr {\nds.push(true /* dereference */, \"\", nil)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/state/encode.go",
"new_path": "pkg/state/encode.go",
"diff": "@@ -335,6 +335,12 @@ func (es *encodeState) encodeObject(obj reflect.Value, mapAsValue bool, format s\nobject = &pb.Object{Value: &pb.Object_MapValue{es.encodeMap(obj)}}\n} else {\n// Encode a reference to the map.\n+ //\n+ // Remove the map object count here to avoid double\n+ // counting, as this object will be counted again when\n+ // it gets processed later. We do not add a reference\n+ // count as the reference is artificial.\n+ es.stats.Remove(obj)\nobject = &pb.Object{Value: &pb.Object_RefValue{es.register(obj)}}\n}\ndefault:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/state/stats.go",
"new_path": "pkg/state/stats.go",
"diff": "@@ -68,6 +68,17 @@ func (s *Stats) Add(obj reflect.Value) {\nentry.count++\n}\n+// Remove removes a sample count. It should only be called after a previous\n+// Add().\n+func (s *Stats) Remove(obj reflect.Value) {\n+ if s == nil {\n+ return\n+ }\n+ typ := obj.Type()\n+ entry := s.byType[typ]\n+ entry.count--\n+}\n+\n// Start starts a sample.\nfunc (s *Stats) Start(obj reflect.Value) {\nif s == nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: avoid double counting map objects in save / restore stats.
PiperOrigin-RevId: 210551929
Change-Id: Idd05935bffc63b39166cc3751139aff61b689faa |
259,881 | 28.08.2018 09:20:17 | 25,200 | 25a8e13a78ad6418a1798ec419a1b5ab2116a7f8 | Bump to Go 1.11
The procid offset is unchanged. | [
{
"change_type": "MODIFY",
"old_path": "WORKSPACE",
"new_path": "WORKSPACE",
"diff": "# Load go bazel rules and gazelle.\nhttp_archive(\nname = \"io_bazel_rules_go\",\n- url = \"https://github.com/bazelbuild/rules_go/releases/download/0.14.0/rules_go-0.14.0.tar.gz\",\n- sha256 = \"5756a4ad75b3703eb68249d50e23f5d64eaf1593e886b9aa931aa6e938c4e301\",\n+ url = \"https://github.com/bazelbuild/rules_go/releases/download/0.15.1/rules_go-0.15.1.tar.gz\",\n+ sha256 = \"5f3b0304cdf0c505ec9e5b3c4fc4a87b5ca21b13d8ecc780c97df3d1809b9ce6\",\n)\nhttp_archive(\nname = \"bazel_gazelle\",\n@@ -11,7 +11,7 @@ http_archive(\n)\nload(\"@io_bazel_rules_go//go:def.bzl\", \"go_rules_dependencies\", \"go_register_toolchains\")\ngo_rules_dependencies()\n-go_register_toolchains(go_version=\"1.10.3\")\n+go_register_toolchains(go_version=\"1.11\")\nload(\"@bazel_gazelle//:deps.bzl\", \"gazelle_dependencies\", \"go_repository\")\ngazelle_dependencies()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/procid/procid_amd64.s",
"new_path": "pkg/sentry/platform/procid/procid_amd64.s",
"diff": "// +build amd64\n// +build go1.8\n-// +build !go1.11\n+// +build !go1.12\n#include \"textflag.h\"\n"
}
] | Go | Apache License 2.0 | google/gvisor | Bump to Go 1.11
The procid offset is unchanged.
PiperOrigin-RevId: 210551969
Change-Id: I33ba1ce56c2f5631b712417d870aa65ef24e6022 |
259,992 | 28.08.2018 11:29:53 | 25,200 | f7366e4e6465530ecc1641312011fd82a94f55f8 | Consolidate image tests into a single file
This is to keep it consistent with other test, and
it's easier to maintain them in single file.
Also increase python test timeout to deflake it. | [
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/BUILD",
"new_path": "runsc/test/image/BUILD",
"diff": "@@ -7,8 +7,6 @@ go_test(\nsize = \"large\",\nsrcs = [\n\"image_test.go\",\n- \"python_test.go\",\n- \"tomcat_test.go\",\n],\ndata = [\n\"latin10k.txt\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/image_test.go",
"new_path": "runsc/test/image/image_test.go",
"diff": "@@ -192,6 +192,70 @@ func TestMysql(t *testing.T) {\n}\n}\n+func TestPythonHello(t *testing.T) {\n+ if err := testutil.Pull(\"google/python-hello\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\", err)\n+ }\n+ d := testutil.MakeDocker(\"python-hello-test\")\n+ if _, err := d.Run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+ defer d.CleanUp()\n+\n+ // Find where port 8080 is mapped to.\n+ port, err := d.FindPort(8080)\n+ if err != nil {\n+ t.Fatalf(\"docker.FindPort(8080) failed: %v\", err)\n+ }\n+\n+ // Wait until it's up and running.\n+ if err := testutil.WaitForHTTP(port, 20*time.Second); err != nil {\n+ t.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n+ }\n+\n+ // Ensure that content is being served.\n+ url := fmt.Sprintf(\"http://localhost:%d\", port)\n+ resp, err := http.Get(url)\n+ if err != nil {\n+ t.Errorf(\"Error reaching http server: %v\", err)\n+ }\n+ if want := http.StatusOK; resp.StatusCode != want {\n+ t.Errorf(\"Wrong response code, got: %d, want: %d\", resp.StatusCode, want)\n+ }\n+}\n+\n+func TestTomcat(t *testing.T) {\n+ if err := testutil.Pull(\"tomcat:8.0\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\", err)\n+ }\n+ d := testutil.MakeDocker(\"tomcat-test\")\n+ if _, err := d.Run(\"-p\", \"8080\", \"tomcat:8.0\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+ defer d.CleanUp()\n+\n+ // Find where port 8080 is mapped to.\n+ port, err := d.FindPort(8080)\n+ if err != nil {\n+ t.Fatalf(\"docker.FindPort(8080) failed: %v\", err)\n+ }\n+\n+ // Wait until it's up and running.\n+ if err := testutil.WaitForHTTP(port, 10*time.Second); err != nil {\n+ t.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n+ }\n+\n+ // Ensure that content is being served.\n+ url := fmt.Sprintf(\"http://localhost:%d\", port)\n+ resp, err := http.Get(url)\n+ if err != nil {\n+ t.Errorf(\"Error reaching http server: %v\", err)\n+ }\n+ if want := http.StatusOK; resp.StatusCode != want {\n+ t.Errorf(\"Wrong response code, got: %d, want: %d\", resp.StatusCode, want)\n+ }\n+}\n+\nfunc MainTest(m *testing.M) {\ntestutil.EnsureSupportedDockerVersion()\nos.Exit(m.Run())\n"
},
{
"change_type": "DELETE",
"old_path": "runsc/test/image/python_test.go",
"new_path": null,
"diff": "-// Copyright 2018 Google Inc.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-package image\n-\n-import (\n- \"fmt\"\n- \"net/http\"\n- \"testing\"\n- \"time\"\n-\n- \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n-)\n-\n-func TestPythonHello(t *testing.T) {\n- if err := testutil.Pull(\"google/python-hello\"); err != nil {\n- t.Fatalf(\"docker pull failed: %v\", err)\n- }\n- d := testutil.MakeDocker(\"python-hello-test\")\n- if _, err := d.Run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\n- t.Fatalf(\"docker run failed: %v\", err)\n- }\n- defer d.CleanUp()\n-\n- // Find where port 8080 is mapped to.\n- port, err := d.FindPort(8080)\n- if err != nil {\n- t.Fatalf(\"docker.FindPort(8080) failed: %v\", err)\n- }\n-\n- // Wait until it's up and running.\n- if err := testutil.WaitForHTTP(port, 10*time.Second); err != nil {\n- t.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n- }\n-\n- // Ensure that content is being served.\n- url := fmt.Sprintf(\"http://localhost:%d\", port)\n- resp, err := http.Get(url)\n- if err != nil {\n- t.Errorf(\"Error reaching http server: %v\", err)\n- }\n- if want := http.StatusOK; resp.StatusCode != want {\n- t.Errorf(\"Wrong response code, got: %d, want: %d\", resp.StatusCode, want)\n- }\n-}\n"
},
{
"change_type": "DELETE",
"old_path": "runsc/test/image/tomcat_test.go",
"new_path": null,
"diff": "-// Copyright 2018 Google Inc.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-package image\n-\n-import (\n- \"fmt\"\n- \"net/http\"\n- \"testing\"\n- \"time\"\n-\n- \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n-)\n-\n-func TestTomcat(t *testing.T) {\n- if err := testutil.Pull(\"tomcat:8.0\"); err != nil {\n- t.Fatalf(\"docker pull failed: %v\", err)\n- }\n- d := testutil.MakeDocker(\"tomcat-test\")\n- if _, err := d.Run(\"-p\", \"8080\", \"tomcat:8.0\"); err != nil {\n- t.Fatalf(\"docker run failed: %v\", err)\n- }\n- defer d.CleanUp()\n-\n- // Find where port 8080 is mapped to.\n- port, err := d.FindPort(8080)\n- if err != nil {\n- t.Fatalf(\"docker.FindPort(8080) failed: %v\", err)\n- }\n-\n- // Wait until it's up and running.\n- if err := testutil.WaitForHTTP(port, 10*time.Second); err != nil {\n- t.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n- }\n-\n- // Ensure that content is being served.\n- url := fmt.Sprintf(\"http://localhost:%d\", port)\n- resp, err := http.Get(url)\n- if err != nil {\n- t.Errorf(\"Error reaching http server: %v\", err)\n- }\n- if want := http.StatusOK; resp.StatusCode != want {\n- t.Errorf(\"Wrong response code, got: %d, want: %d\", resp.StatusCode, want)\n- }\n-}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Consolidate image tests into a single file
This is to keep it consistent with other test, and
it's easier to maintain them in single file.
Also increase python test timeout to deflake it.
PiperOrigin-RevId: 210575042
Change-Id: I2ef5bcd5d97c08549f0c5f645c4b694253ef0b4d |
259,881 | 28.08.2018 12:55:11 | 25,200 | ea113a4380543080f7ad92f536e71706e71d9285 | Drop support for Go 1.10 | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/BUILD",
"new_path": "runsc/boot/filter/BUILD",
"diff": "@@ -6,8 +6,6 @@ go_library(\nname = \"filter\",\nsrcs = [\n\"config.go\",\n- \"config_go110.go\",\n- \"config_go111.go\",\n\"extra_filters.go\",\n\"extra_filters_msan.go\",\n\"extra_filters_race.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/config.go",
"new_path": "runsc/boot/filter/config.go",
"diff": "@@ -60,6 +60,7 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_MMAP: {},\nsyscall.SYS_MPROTECT: {},\nsyscall.SYS_MUNMAP: {},\n+ syscall.SYS_NANOSLEEP: {},\nsyscall.SYS_POLL: {},\nsyscall.SYS_PREAD64: {},\nsyscall.SYS_PWRITE64: {},\n"
},
{
"change_type": "DELETE",
"old_path": "runsc/boot/filter/config_go110.go",
"new_path": null,
"diff": "-// Copyright 2018 Google Inc.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-// +build !go1.11\n-\n-package filter\n-\n-import (\n- \"syscall\"\n-\n- \"gvisor.googlesource.com/gvisor/pkg/seccomp\"\n-)\n-\n-// TODO: Remove this file and merge config_go111.go back into\n-// config.go once we no longer build with Go 1.10.\n-\n-func init() {\n- allowedSyscalls[syscall.SYS_PSELECT6] = []seccomp.Rule{}\n-}\n"
},
{
"change_type": "DELETE",
"old_path": "runsc/boot/filter/config_go111.go",
"new_path": null,
"diff": "-// Copyright 2018 Google Inc.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-// +build go1.11\n-\n-package filter\n-\n-import (\n- \"syscall\"\n-\n- \"gvisor.googlesource.com/gvisor/pkg/seccomp\"\n-)\n-\n-func init() {\n- allowedSyscalls[syscall.SYS_NANOSLEEP] = []seccomp.Rule{}\n-}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Drop support for Go 1.10
PiperOrigin-RevId: 210589588
Change-Id: Iba898bc3eb8f13e17c668ceea6dc820fc8180a70 |
259,948 | 28.08.2018 13:20:54 | 25,200 | d724863a313f5e08a043c8f2ccb4969e8ea23de1 | sentry: optimize dirent weakref map save / restore.
Weak references save / restore involves multiple interface indirection
and cause material latency overhead when there are lots of dirents, each
containing a weak reference map. The nil entries in the map should also
be purged. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dirent.go",
"new_path": "pkg/sentry/fs/dirent.go",
"diff": "@@ -201,7 +201,7 @@ type Dirent struct {\nmu sync.Mutex `state:\"nosave\"`\n// children are cached via weak references.\n- children map[string]*refs.WeakRef\n+ children map[string]*refs.WeakRef `state:\".(map[string]*Dirent)\"`\n}\n// NewDirent returns a new root Dirent, taking the caller's reference on inode. The caller\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dirent_state.go",
"new_path": "pkg/sentry/fs/dirent_state.go",
"diff": "@@ -17,6 +17,8 @@ package fs\nimport (\n\"fmt\"\n\"sync/atomic\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/refs\"\n)\n// beforeSave is invoked by stateify.\n@@ -36,6 +38,27 @@ func (d *Dirent) beforeSave() {\n}\n}\n+// saveChildren is invoked by stateify.\n+func (d *Dirent) saveChildren() map[string]*Dirent {\n+ c := make(map[string]*Dirent)\n+ for name, w := range d.children {\n+ if rc := w.Get(); rc != nil {\n+ // Drop the reference count obtain in w.Get()\n+ rc.DecRef()\n+ c[name] = rc.(*Dirent)\n+ }\n+ }\n+ return c\n+}\n+\n+// loadChildren is invoked by stateify.\n+func (d *Dirent) loadChildren(children map[string]*Dirent) {\n+ d.children = make(map[string]*refs.WeakRef)\n+ for name, c := range children {\n+ d.children[name] = refs.NewWeakRef(c, nil)\n+ }\n+}\n+\n// afterLoad is invoked by stateify.\nfunc (d *Dirent) afterLoad() {\nif d.userVisible {\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: optimize dirent weakref map save / restore.
Weak references save / restore involves multiple interface indirection
and cause material latency overhead when there are lots of dirents, each
containing a weak reference map. The nil entries in the map should also
be purged.
PiperOrigin-RevId: 210593727
Change-Id: Ied6f4c3c0726fcc53a24b983d9b3a79121b6b758 |
259,992 | 28.08.2018 17:08:49 | 25,200 | 30c025f3efdf5b599d8fbd4172bb5b856cc269af | Add argument checks to seccomp
This is required to increase protection when running in GKE. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/BUILD",
"new_path": "runsc/boot/filter/BUILD",
"diff": "@@ -22,6 +22,7 @@ go_library(\n\"//pkg/sentry/platform\",\n\"//pkg/sentry/platform/kvm\",\n\"//pkg/sentry/platform/ptrace\",\n+ \"//pkg/tcpip/link/fdbased\",\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/config.go",
"new_path": "runsc/boot/filter/config.go",
"diff": "package filter\nimport (\n+ \"os\"\n\"syscall\"\n\"golang.org/x/sys/unix\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/seccomp\"\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/link/fdbased\"\n)\n-// allowedSyscalls is the set of syscalls executed by the Sentry\n-// to the host OS.\n+// allowedSyscalls is the set of syscalls executed by the Sentry to the host OS.\nvar allowedSyscalls = seccomp.SyscallRules{\n- syscall.SYS_ACCEPT: {},\n- syscall.SYS_ARCH_PRCTL: {},\n+ syscall.SYS_ARCH_PRCTL: []seccomp.Rule{\n+ {seccomp.AllowValue(linux.ARCH_GET_FS)},\n+ {seccomp.AllowValue(linux.ARCH_SET_FS)},\n+ },\nsyscall.SYS_CLOCK_GETTIME: {},\n- syscall.SYS_CLONE: {},\n+ syscall.SYS_CLONE: []seccomp.Rule{\n+ {\n+ seccomp.AllowValue(\n+ syscall.CLONE_VM |\n+ syscall.CLONE_FS |\n+ syscall.CLONE_FILES |\n+ syscall.CLONE_SIGHAND |\n+ syscall.CLONE_SYSVSEM |\n+ syscall.CLONE_THREAD),\n+ },\n+ },\nsyscall.SYS_CLOSE: {},\nsyscall.SYS_DUP: {},\nsyscall.SYS_EPOLL_CREATE1: {},\nsyscall.SYS_EPOLL_CTL: {},\n- syscall.SYS_EPOLL_PWAIT: {},\n+ syscall.SYS_EPOLL_PWAIT: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(0),\n+ },\n+ },\nsyscall.SYS_EPOLL_WAIT: {},\n- syscall.SYS_EVENTFD2: {},\n+ syscall.SYS_EVENTFD2: []seccomp.Rule{\n+ {\n+ seccomp.AllowValue(0),\n+ seccomp.AllowValue(0),\n+ },\n+ },\nsyscall.SYS_EXIT: {},\nsyscall.SYS_EXIT_GROUP: {},\nsyscall.SYS_FALLOCATE: {},\n- syscall.SYS_FCNTL: {},\n+ syscall.SYS_FCHMOD: {},\n+ syscall.SYS_FCNTL: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.F_GETFL),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.F_SETFL),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.F_GETFD),\n+ },\n+ },\nsyscall.SYS_FSTAT: {},\nsyscall.SYS_FSYNC: {},\nsyscall.SYS_FTRUNCATE: {},\n- syscall.SYS_FUTEX: {},\n+ syscall.SYS_FUTEX: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(linux.FUTEX_WAIT | linux.FUTEX_PRIVATE_FLAG),\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(0),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(linux.FUTEX_WAKE | linux.FUTEX_PRIVATE_FLAG),\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(0),\n+ },\n+ },\nsyscall.SYS_GETDENTS64: {},\nsyscall.SYS_GETPID: {},\nunix.SYS_GETRANDOM: {},\n- syscall.SYS_GETSOCKOPT: {},\n+ syscall.SYS_GETSOCKOPT: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_DOMAIN),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_TYPE),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_ERROR),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_SNDBUF),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_RCVBUF),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_REUSEADDR),\n+ },\n+ },\nsyscall.SYS_GETTID: {},\nsyscall.SYS_GETTIMEOFDAY: {},\n- syscall.SYS_LISTEN: {},\n- syscall.SYS_LSEEK: {},\n- // TODO: Remove SYS_LSTAT when executable lookup moves\n- // into the gofer.\n- syscall.SYS_LSTAT: {},\n- syscall.SYS_MADVISE: {},\n- syscall.SYS_MINCORE: {},\n- syscall.SYS_MMAP: {},\n- syscall.SYS_MPROTECT: {},\n- syscall.SYS_MUNMAP: {},\n- syscall.SYS_NANOSLEEP: {},\n- syscall.SYS_POLL: {},\n- syscall.SYS_PREAD64: {},\n- syscall.SYS_PWRITE64: {},\n- syscall.SYS_READ: {},\n- syscall.SYS_READV: {},\n- syscall.SYS_RECVMSG: {},\n- syscall.SYS_RESTART_SYSCALL: {},\n- syscall.SYS_RT_SIGACTION: {},\n- syscall.SYS_RT_SIGPROCMASK: {},\n- syscall.SYS_RT_SIGRETURN: {},\n- syscall.SYS_SCHED_YIELD: {},\n- syscall.SYS_SENDMSG: {},\n- syscall.SYS_SETITIMER: {},\n- syscall.SYS_SHUTDOWN: {},\n- syscall.SYS_SIGALTSTACK: {},\n- syscall.SYS_SYNC_FILE_RANGE: {},\n- syscall.SYS_TGKILL: {},\n- syscall.SYS_WRITE: {},\n- syscall.SYS_WRITEV: {},\n-\n// SYS_IOCTL is needed for terminal support, but we only allow\n// setting/getting termios and winsize.\nsyscall.SYS_IOCTL: []seccomp.Rule{\n@@ -110,6 +165,107 @@ var allowedSyscalls = seccomp.SyscallRules{\nseccomp.AllowAny{}, /* winsize struct */\n},\n},\n+ syscall.SYS_LSEEK: {},\n+ // TODO: Remove SYS_LSTAT when executable lookup moves\n+ // into the gofer.\n+ syscall.SYS_LSTAT: {},\n+ syscall.SYS_MADVISE: {},\n+ syscall.SYS_MINCORE: {},\n+ syscall.SYS_MMAP: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MAP_SHARED),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MAP_PRIVATE),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS | syscall.MAP_STACK),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS | syscall.MAP_NORESERVE),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.PROT_WRITE | syscall.PROT_READ),\n+ seccomp.AllowValue(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS | syscall.MAP_FIXED),\n+ },\n+ },\n+ syscall.SYS_MPROTECT: {},\n+ syscall.SYS_MUNMAP: {},\n+ syscall.SYS_NANOSLEEP: {},\n+ syscall.SYS_POLL: {},\n+ syscall.SYS_PREAD64: {},\n+ syscall.SYS_PWRITE64: {},\n+ syscall.SYS_READ: {},\n+ syscall.SYS_READV: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(len(fdbased.BufConfig)),\n+ },\n+ },\n+ syscall.SYS_RECVMSG: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC | syscall.MSG_PEEK),\n+ },\n+ },\n+ syscall.SYS_RESTART_SYSCALL: {},\n+ syscall.SYS_RT_SIGACTION: {},\n+ syscall.SYS_RT_SIGPROCMASK: {},\n+ syscall.SYS_RT_SIGRETURN: {},\n+ syscall.SYS_SCHED_YIELD: {},\n+ syscall.SYS_SENDMSG: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MSG_DONTWAIT | syscall.MSG_NOSIGNAL),\n+ },\n+ },\n+ syscall.SYS_SETITIMER: {},\n+ syscall.SYS_SHUTDOWN: []seccomp.Rule{\n+ {seccomp.AllowAny{}, seccomp.AllowValue(syscall.SHUT_RDWR)},\n+ },\n+ syscall.SYS_SIGALTSTACK: {},\n+ syscall.SYS_SYNC_FILE_RANGE: {},\n+ syscall.SYS_TGKILL: []seccomp.Rule{\n+ {\n+ seccomp.AllowValue(uint64(os.Getpid())),\n+ },\n+ },\n+ syscall.SYS_WRITE: {},\n+ syscall.SYS_WRITEV: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(2),\n+ },\n+ },\n}\n// whitelistFSFilters returns syscalls made by whitelistFS. Using WhitelistFS\n@@ -154,22 +310,150 @@ func whitelistFSFilters() seccomp.SyscallRules {\n// hostInetFilters contains syscalls that are needed by sentry/socket/hostinet.\nfunc hostInetFilters() seccomp.SyscallRules {\nreturn seccomp.SyscallRules{\n- syscall.SYS_ACCEPT4: {},\n+ syscall.SYS_ACCEPT4: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOCK_NONBLOCK | syscall.SOCK_CLOEXEC),\n+ },\n+ },\nsyscall.SYS_BIND: {},\nsyscall.SYS_CONNECT: {},\nsyscall.SYS_GETPEERNAME: {},\nsyscall.SYS_GETSOCKNAME: {},\n- syscall.SYS_GETSOCKOPT: {},\n- syscall.SYS_IOCTL: {},\n+ syscall.SYS_GETSOCKOPT: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_IPV6),\n+ seccomp.AllowValue(syscall.IPV6_V6ONLY),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_ERROR),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_KEEPALIVE),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_SNDBUF),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_REUSEADDR),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_TYPE),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_LINGER),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_TCP),\n+ seccomp.AllowValue(syscall.TCP_NODELAY),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_TCP),\n+ seccomp.AllowValue(syscall.TCP_INFO),\n+ },\n+ },\n+ syscall.SYS_IOCTL: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.TIOCOUTQ),\n+ },\n+ },\nsyscall.SYS_LISTEN: {},\nsyscall.SYS_READV: {},\nsyscall.SYS_RECVFROM: {},\nsyscall.SYS_RECVMSG: {},\nsyscall.SYS_SENDMSG: {},\nsyscall.SYS_SENDTO: {},\n- syscall.SYS_SETSOCKOPT: {},\n- syscall.SYS_SHUTDOWN: {},\n- syscall.SYS_SOCKET: {},\n+ syscall.SYS_SETSOCKOPT: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_IPV6),\n+ seccomp.AllowValue(syscall.IPV6_V6ONLY),\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(4),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_SNDBUF),\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(4),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_RCVBUF),\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(4),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_REUSEADDR),\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(4),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_TCP),\n+ seccomp.AllowValue(syscall.TCP_NODELAY),\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(4),\n+ },\n+ },\n+ syscall.SYS_SHUTDOWN: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SHUT_RD),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SHUT_WR),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SHUT_RDWR),\n+ },\n+ },\n+ syscall.SYS_SOCKET: []seccomp.Rule{\n+ {\n+ seccomp.AllowValue(syscall.AF_INET),\n+ seccomp.AllowValue(syscall.SOCK_STREAM | syscall.SOCK_NONBLOCK | syscall.SOCK_CLOEXEC),\n+ seccomp.AllowValue(0),\n+ },\n+ {\n+ seccomp.AllowValue(syscall.AF_INET),\n+ seccomp.AllowValue(syscall.SOCK_DGRAM | syscall.SOCK_NONBLOCK | syscall.SOCK_CLOEXEC),\n+ seccomp.AllowValue(0),\n+ },\n+ {\n+ seccomp.AllowValue(syscall.AF_INET6),\n+ seccomp.AllowValue(syscall.SOCK_STREAM | syscall.SOCK_NONBLOCK | syscall.SOCK_CLOEXEC),\n+ seccomp.AllowValue(0),\n+ },\n+ {\n+ seccomp.AllowValue(syscall.AF_INET6),\n+ seccomp.AllowValue(syscall.SOCK_DGRAM | syscall.SOCK_NONBLOCK | syscall.SOCK_CLOEXEC),\n+ seccomp.AllowValue(0),\n+ },\n+ },\nsyscall.SYS_WRITEV: {},\n}\n}\n@@ -177,19 +461,46 @@ func hostInetFilters() seccomp.SyscallRules {\n// ptraceFilters returns syscalls made exclusively by the ptrace platform.\nfunc ptraceFilters() seccomp.SyscallRules {\nreturn seccomp.SyscallRules{\n- syscall.SYS_PTRACE: {},\n- syscall.SYS_WAIT4: {},\nunix.SYS_GETCPU: {},\nunix.SYS_SCHED_SETAFFINITY: {},\n+ syscall.SYS_PTRACE: {},\n+ syscall.SYS_TGKILL: {},\n+ syscall.SYS_WAIT4: {},\n}\n}\n// kvmFilters returns syscalls made exclusively by the KVM platform.\nfunc kvmFilters() seccomp.SyscallRules {\nreturn seccomp.SyscallRules{\n+ syscall.SYS_ARCH_PRCTL: {},\n+ syscall.SYS_FUTEX: {},\nsyscall.SYS_IOCTL: {},\n+ syscall.SYS_MMAP: {},\nsyscall.SYS_RT_SIGSUSPEND: {},\nsyscall.SYS_RT_SIGTIMEDWAIT: {},\n0xffffffffffffffff: {}, // KVM uses syscall -1 to transition to host.\n}\n}\n+\n+func controlServerFilters(fd int) seccomp.SyscallRules {\n+ return seccomp.SyscallRules{\n+ syscall.SYS_ACCEPT: []seccomp.Rule{\n+ {\n+ seccomp.AllowValue(fd),\n+ },\n+ },\n+ syscall.SYS_LISTEN: []seccomp.Rule{\n+ {\n+ seccomp.AllowValue(fd),\n+ seccomp.AllowValue(16 /* unet.backlog */),\n+ },\n+ },\n+ syscall.SYS_GETSOCKOPT: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.SOL_SOCKET),\n+ seccomp.AllowValue(syscall.SO_PEERCRED),\n+ },\n+ },\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/filter.go",
"new_path": "runsc/boot/filter/filter.go",
"diff": "@@ -27,24 +27,33 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/ptrace\"\n)\n+// Options are seccomp filter related options.\n+type Options struct {\n+ Platform platform.Platform\n+ WhitelistFS bool\n+ HostNetwork bool\n+ ControllerFD int\n+}\n+\n// Install installs seccomp filters for based on the given platform.\n-func Install(p platform.Platform, whitelistFS, hostNetwork bool) error {\n+func Install(opt Options) error {\ns := allowedSyscalls\n+ s.Merge(controlServerFilters(opt.ControllerFD))\n// Set of additional filters used by -race and -msan. Returns empty\n// when not enabled.\ns.Merge(instrumentationFilters())\n- if whitelistFS {\n+ if opt.WhitelistFS {\nReport(\"direct file access allows unrestricted file access!\")\ns.Merge(whitelistFSFilters())\n}\n- if hostNetwork {\n+ if opt.HostNetwork {\nReport(\"host networking enabled: syscall filters less restrictive!\")\ns.Merge(hostInetFilters())\n}\n- switch p := p.(type) {\n+ switch p := opt.Platform.(type) {\ncase *ptrace.PTrace:\ns.Merge(ptraceFilters())\ncase *kvm.KVM:\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -351,9 +351,13 @@ func (l *Loader) run() error {\nif l.conf.DisableSeccomp {\nfilter.Report(\"syscall filter is DISABLED. Running in less secure mode.\")\n} else {\n- whitelistFS := l.conf.FileAccess == FileAccessDirect\n- hostNet := l.conf.Network == NetworkHost\n- if err := filter.Install(l.k.Platform, whitelistFS, hostNet); err != nil {\n+ opts := filter.Options{\n+ Platform: l.k.Platform,\n+ WhitelistFS: l.conf.FileAccess == FileAccessDirect,\n+ HostNetwork: l.conf.Network == NetworkHost,\n+ ControllerFD: l.ctrl.srv.FD(),\n+ }\n+ if err := filter.Install(opts); err != nil {\nreturn fmt.Errorf(\"Failed to install seccomp filters: %v\", err)\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add argument checks to seccomp
This is required to increase protection when running in GKE.
PiperOrigin-RevId: 210635123
Change-Id: Iaaa8be49e73f7a3a90805313885e75894416f0b5 |
259,854 | 29.08.2018 14:14:32 | 25,200 | 313d4af52d774b07d4b98d770d38684c9c119f67 | ping: update comment about UDP | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/ping/protocol.go",
"new_path": "pkg/tcpip/transport/ping/protocol.go",
"diff": "@@ -85,7 +85,7 @@ func (p *protocol) MinimumPacketSize() int {\npanic(fmt.Sprint(\"unknown protocol number: \", p.number))\n}\n-// ParsePorts returns the source and destination ports stored in the given udp\n+// ParsePorts returns the source and destination ports stored in the given ping\n// packet.\nfunc (*protocol) ParsePorts(v buffer.View) (src, dst uint16, err *tcpip.Error) {\nreturn 0, binary.BigEndian.Uint16(v[header.ICMPv4MinimumSize:]), nil\n"
}
] | Go | Apache License 2.0 | google/gvisor | ping: update comment about UDP
PiperOrigin-RevId: 210788012
Change-Id: I5ebdcf3d02bfab3484a1374fbccba870c9d68954 |
259,992 | 30.08.2018 17:29:14 | 25,200 | 3e493adf7adb6c8b920ae224fb68e2c317a16a56 | Add seccomp filter to fsgofer | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/BUILD",
"new_path": "runsc/cmd/BUILD",
"diff": "@@ -42,6 +42,7 @@ go_library(\n\"//runsc/console\",\n\"//runsc/container\",\n\"//runsc/fsgofer\",\n+ \"//runsc/fsgofer/filter\",\n\"//runsc/specutils\",\n\"@com_github_google_subcommands//:go_default_library\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/gofer.go",
"new_path": "runsc/cmd/gofer.go",
"diff": "@@ -28,6 +28,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/p9\"\n\"gvisor.googlesource.com/gvisor/pkg/unet\"\n\"gvisor.googlesource.com/gvisor/runsc/fsgofer\"\n+ \"gvisor.googlesource.com/gvisor/runsc/fsgofer/filter\"\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n)\n@@ -151,6 +152,10 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nFatalf(\"too many FDs passed for mounts. mounts: %d, FDs: %d\", mountIdx, len(g.ioFDs))\n}\n+ if err := filter.Install(); err != nil {\n+ Fatalf(\"Failed to install seccomp filters: %v\", err)\n+ }\n+\nrunServers(ats, g.ioFDs)\nreturn subcommands.ExitSuccess\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/fsgofer/filter/BUILD",
"diff": "+package(licenses = [\"notice\"]) # Apache 2.0\n+\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\")\n+\n+go_library(\n+ name = \"filter\",\n+ srcs = [\n+ \"config.go\",\n+ \"extra_filters.go\",\n+ \"extra_filters_msan.go\",\n+ \"extra_filters_race.go\",\n+ \"filter.go\",\n+ ],\n+ importpath = \"gvisor.googlesource.com/gvisor/runsc/fsgofer/filter\",\n+ visibility = [\n+ \"//runsc:__subpackages__\",\n+ ],\n+ deps = [\n+ \"//pkg/abi/linux\",\n+ \"//pkg/log\",\n+ \"//pkg/seccomp\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/fsgofer/filter/config.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package filter\n+\n+import (\n+ \"os\"\n+ \"syscall\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/seccomp\"\n+)\n+\n+// allowedSyscalls is the set of syscalls executed by the gofer.\n+var allowedSyscalls = seccomp.SyscallRules{\n+ syscall.SYS_ACCEPT: {},\n+ syscall.SYS_ARCH_PRCTL: []seccomp.Rule{\n+ {seccomp.AllowValue(linux.ARCH_GET_FS)},\n+ {seccomp.AllowValue(linux.ARCH_SET_FS)},\n+ },\n+ syscall.SYS_CLOCK_GETTIME: {},\n+ syscall.SYS_CLONE: []seccomp.Rule{\n+ {\n+ seccomp.AllowValue(\n+ syscall.CLONE_VM |\n+ syscall.CLONE_FS |\n+ syscall.CLONE_FILES |\n+ syscall.CLONE_SIGHAND |\n+ syscall.CLONE_SYSVSEM |\n+ syscall.CLONE_THREAD),\n+ },\n+ },\n+ syscall.SYS_CLOSE: {},\n+ syscall.SYS_DUP: {},\n+ syscall.SYS_EPOLL_CTL: {},\n+ syscall.SYS_EPOLL_PWAIT: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(0),\n+ },\n+ },\n+ syscall.SYS_EVENTFD2: []seccomp.Rule{\n+ {\n+ seccomp.AllowValue(0),\n+ seccomp.AllowValue(0),\n+ },\n+ },\n+ syscall.SYS_EXIT: {},\n+ syscall.SYS_EXIT_GROUP: {},\n+ syscall.SYS_FCHMOD: {},\n+ syscall.SYS_FCHOWNAT: {},\n+ syscall.SYS_FCNTL: []seccomp.Rule{\n+ seccomp.Rule{\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.F_GETFL),\n+ },\n+ seccomp.Rule{\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.F_SETFL),\n+ },\n+ seccomp.Rule{\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.F_GETFD),\n+ },\n+ },\n+ syscall.SYS_FSTAT: {},\n+ syscall.SYS_FSTATFS: {},\n+ syscall.SYS_FSYNC: {},\n+ syscall.SYS_FTRUNCATE: {},\n+ syscall.SYS_FUTEX: {\n+ seccomp.Rule{\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(linux.FUTEX_WAIT | linux.FUTEX_PRIVATE_FLAG),\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(0),\n+ },\n+ seccomp.Rule{\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(linux.FUTEX_WAKE | linux.FUTEX_PRIVATE_FLAG),\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(0),\n+ },\n+ },\n+ syscall.SYS_GETDENTS64: {},\n+ syscall.SYS_GETPID: {},\n+ unix.SYS_GETRANDOM: {},\n+ syscall.SYS_GETTID: {},\n+ syscall.SYS_GETTIMEOFDAY: {},\n+ syscall.SYS_LINKAT: {},\n+ syscall.SYS_LSEEK: {},\n+ syscall.SYS_MKDIRAT: {},\n+ syscall.SYS_MMAP: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MAP_SHARED),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS | syscall.MAP_FIXED),\n+ },\n+ },\n+ syscall.SYS_MPROTECT: {},\n+ syscall.SYS_MUNMAP: {},\n+ syscall.SYS_NANOSLEEP: {},\n+ syscall.SYS_NEWFSTATAT: {},\n+ syscall.SYS_OPENAT: {},\n+ syscall.SYS_POLL: {},\n+ syscall.SYS_PREAD64: {},\n+ syscall.SYS_PWRITE64: {},\n+ syscall.SYS_READ: {},\n+ syscall.SYS_READLINKAT: {},\n+ syscall.SYS_RECVMSG: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC),\n+ },\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC | syscall.MSG_PEEK),\n+ },\n+ },\n+ syscall.SYS_RENAMEAT: {},\n+ syscall.SYS_RESTART_SYSCALL: {},\n+ syscall.SYS_RT_SIGPROCMASK: {},\n+ syscall.SYS_SCHED_YIELD: {},\n+ syscall.SYS_SENDMSG: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.MSG_DONTWAIT | syscall.MSG_NOSIGNAL),\n+ },\n+ },\n+ syscall.SYS_SHUTDOWN: []seccomp.Rule{\n+ {seccomp.AllowAny{}, seccomp.AllowValue(syscall.SHUT_RDWR)},\n+ },\n+ syscall.SYS_SIGALTSTACK: {},\n+ syscall.SYS_SYMLINKAT: {},\n+ syscall.SYS_TGKILL: []seccomp.Rule{\n+ {\n+ seccomp.AllowValue(uint64(os.Getpid())),\n+ },\n+ },\n+ syscall.SYS_UNLINKAT: {},\n+ syscall.SYS_UTIMENSAT: {},\n+ syscall.SYS_WRITE: {},\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/fsgofer/filter/extra_filters.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build !msan,!race\n+\n+package filter\n+\n+import (\n+ \"gvisor.googlesource.com/gvisor/pkg/seccomp\"\n+)\n+\n+// instrumentationFilters returns additional filters for syscalls used by\n+// Go intrumentation tools, e.g. -race, -msan.\n+// Returns empty when disabled.\n+func instrumentationFilters() seccomp.SyscallRules {\n+ return nil\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/fsgofer/filter/extra_filters_msan.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build msan\n+\n+package filter\n+\n+import (\n+ \"syscall\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/pkg/seccomp\"\n+)\n+\n+// instrumentationFilters returns additional filters for syscalls used by MSAN.\n+func instrumentationFilters() seccomp.SyscallRules {\n+ log.Warningf(\"*** SECCOMP WARNING: MSAN is enabled: syscall filters less restrictive!\")\n+ return seccomp.SyscallRules{\n+ syscall.SYS_SCHED_GETAFFINITY: {},\n+ syscall.SYS_SET_ROBUST_LIST: {},\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/fsgofer/filter/extra_filters_race.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build race\n+\n+package filter\n+\n+import (\n+ \"syscall\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/pkg/seccomp\"\n+)\n+\n+// instrumentationFilters returns additional filters for syscalls used by TSAN.\n+func instrumentationFilters() seccomp.SyscallRules {\n+ log.Warningf(\"*** SECCOMP WARNING: TSAN is enabled: syscall filters less restrictive!\")\n+ return seccomp.SyscallRules{\n+ syscall.SYS_BRK: {},\n+ syscall.SYS_CLONE: {},\n+ syscall.SYS_FUTEX: {},\n+ syscall.SYS_MADVISE: {},\n+ syscall.SYS_MMAP: {},\n+ syscall.SYS_MUNLOCK: {},\n+ syscall.SYS_NANOSLEEP: {},\n+ syscall.SYS_OPEN: {},\n+ syscall.SYS_SET_ROBUST_LIST: {},\n+ // Used within glibc's malloc.\n+ syscall.SYS_TIME: {},\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/fsgofer/filter/filter.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package filter defines all syscalls the gofer is allowed to make, and\n+// installs seccomp filters to prevent prohibited syscalls in case it's\n+// compromised.\n+package filter\n+\n+import (\n+ \"gvisor.googlesource.com/gvisor/pkg/seccomp\"\n+)\n+\n+// Install installs seccomp filters.\n+func Install() error {\n+ s := allowedSyscalls\n+\n+ // Set of additional filters used by -race and -msan. Returns empty\n+ // when not enabled.\n+ s.Merge(instrumentationFilters())\n+\n+ // TODO: Set kill=true when SECCOMP_RET_KILL_PROCESS is supported.\n+ return seccomp.Install(s, false)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add seccomp filter to fsgofer
PiperOrigin-RevId: 211011542
Change-Id: Ib5a83a00f8eb6401603c6fb5b59afc93bac52558 |
259,992 | 31.08.2018 09:44:31 | 25,200 | e669697241e9774f1a1e4ab609dde933a0563ba6 | Fix RunAsRoot arguments forwarding
It was including the path to the executable twice in the
arguments. | [
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -254,7 +254,7 @@ func RunAsRoot(m *testing.M) {\nruntime.LockOSThread()\ndefer runtime.UnlockOSThread()\n- cmd := exec.Command(\"/proc/self/exe\", os.Args...)\n+ cmd := exec.Command(\"/proc/self/exe\", os.Args[1:]...)\ncmd.SysProcAttr = &syscall.SysProcAttr{\nCloneflags: syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS,\n// Set current user/group as root inside the namespace.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix RunAsRoot arguments forwarding
It was including the path to the executable twice in the
arguments.
PiperOrigin-RevId: 211098311
Change-Id: I5357c51c63f38dfab551b17bb0e04011a0575010 |
259,885 | 31.08.2018 13:06:16 | 25,200 | b1c1afa3ccc499df3fd15814d2b6cf9005bc2ab1 | Delete the long-obsolete kernel.TaskMaybe interface. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task.go",
"new_path": "pkg/sentry/kernel/task.go",
"diff": "@@ -499,13 +499,6 @@ func (t *Task) afterLoad() {\n// struct.\nconst copyScratchBufferLen = 52\n-// TaskMaybe is the interface for extracting Tasks out of things which may be\n-// or contain Task objects.\n-type TaskMaybe interface {\n- // ExtractTask returns the Task.\n- ExtractTask() *Task\n-}\n-\n// CopyScratchBuffer returns a scratch buffer to be used in CopyIn/CopyOut\n// functions. It must only be used within those functions and can only be used\n// by the task goroutine; it exists to improve performance and thus\n@@ -525,11 +518,6 @@ func (t *Task) FutexWaiter() *futex.Waiter {\nreturn t.futexWaiter\n}\n-// ExtractTask implements TaskMaybe.ExtractTask.\n-func (t *Task) ExtractTask() *Task {\n- return t\n-}\n-\n// TaskContext returns t's TaskContext.\n//\n// Precondition: The caller must be running on the task goroutine, or t.mu must\n"
}
] | Go | Apache License 2.0 | google/gvisor | Delete the long-obsolete kernel.TaskMaybe interface.
PiperOrigin-RevId: 211131855
Change-Id: Ia7799561ccd65d16269e0ae6f408ab53749bca37 |
259,885 | 31.08.2018 13:57:02 | 25,200 | 098046ba193b839d69c059f7a0e68c89409b4237 | Disintegrate kernel.TaskResources.
This allows us to call kernel.FDMap.DecRef without holding mutexes
cleanly. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/BUILD",
"new_path": "pkg/sentry/kernel/BUILD",
"diff": "@@ -95,7 +95,6 @@ go_library(\n\"task_list.go\",\n\"task_log.go\",\n\"task_net.go\",\n- \"task_resources.go\",\n\"task_run.go\",\n\"task_sched.go\",\n\"task_signals.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -332,7 +332,8 @@ func (ts *TaskSet) flushWritesToFiles(ctx context.Context) error {\nts.mu.RLock()\ndefer ts.mu.RUnlock()\nfor t := range ts.Root.tids {\n- if fdmap := t.FDMap(); fdmap != nil {\n+ // We can skip locking Task.mu here since the kernel is paused.\n+ if fdmap := t.fds; fdmap != nil {\nfor _, desc := range fdmap.files {\nif flags := desc.file.Flags(); !flags.Write {\ncontinue\n@@ -381,7 +382,8 @@ func (ts *TaskSet) unregisterEpollWaiters() {\nts.mu.RLock()\ndefer ts.mu.RUnlock()\nfor t := range ts.Root.tids {\n- if fdmap := t.FDMap(); fdmap != nil {\n+ // We can skip locking Task.mu here since the kernel is paused.\n+ if fdmap := t.fds; fdmap != nil {\nfor _, desc := range fdmap.files {\nif desc.file != nil {\nif e, ok := desc.file.FileOperations.(*epoll.EventPoll); ok {\n@@ -625,20 +627,23 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, error) {\nif err != nil {\nreturn nil, err\n}\n- tr := newTaskResources(args.FDMap, newFSContext(root, wd, args.Umask))\n- // NewTask unconditionally takes ownership of tr, so we never have to call\n- // tr.release.\n+\n+ // Take a reference on the FDMap, which will be transferred to\n+ // TaskSet.NewTask().\n+ args.FDMap.IncRef()\n// Create the task.\nconfig := &TaskConfig{\nKernel: k,\nThreadGroup: tg,\nTaskContext: tc,\n- TaskResources: tr,\n+ FSContext: newFSContext(root, wd, args.Umask),\n+ FDMap: args.FDMap,\nCredentials: args.Credentials,\n+ AllowedCPUMask: sched.NewFullCPUSet(k.applicationCores),\nUTSNamespace: args.UTSNamespace,\nIPCNamespace: args.IPCNamespace,\n- AllowedCPUMask: sched.NewFullCPUSet(k.applicationCores),\n+ AbstractSocketNamespace: NewAbstractSocketNamespace(), // FIXME\n}\nt, err := k.tasks.NewTask(config)\nif err != nil {\n@@ -714,7 +719,7 @@ func (k *Kernel) pauseTimeLocked() {\nfor _, it := range t.tg.timers {\nit.PauseTimer()\n}\n- if fdm := t.tr.FDMap; fdm != nil {\n+ if fdm := t.fds; fdm != nil {\nfor _, desc := range fdm.files {\nif tfd, ok := desc.file.FileOperations.(*timerfd.TimerOperations); ok {\ntfd.PauseTimer()\n@@ -744,7 +749,7 @@ func (k *Kernel) resumeTimeLocked() {\nfor _, it := range t.tg.timers {\nit.ResumeTimer()\n}\n- if fdm := t.tr.FDMap; fdm != nil {\n+ if fdm := t.fds; fdm != nil {\nfor _, desc := range fdm.files {\nif tfd, ok := desc.file.FileOperations.(*timerfd.TimerOperations); ok {\ntfd.ResumeTimer()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/ptrace.go",
"new_path": "pkg/sentry/kernel/ptrace.go",
"diff": "@@ -992,9 +992,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\nif addr != linux.SignalSetSize {\nreturn syserror.EINVAL\n}\n- target.mu.Lock()\n- defer target.mu.Unlock()\n- _, err := t.CopyOut(data, target.tr.SignalMask)\n+ _, err := t.CopyOut(data, target.SignalMask())\nreturn err\ncase linux.PTRACE_SETSIGMASK:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task.go",
"new_path": "pkg/sentry/kernel/task.go",
"diff": "@@ -99,6 +99,19 @@ type Task struct {\n// ThreadGroup.signalHandlers.\npendingSignals pendingSignals\n+ // signalMask is the set of signals whose delivery is currently blocked.\n+ //\n+ // signalMask is accessed using atomic memory operations, and is protected\n+ // by the signal mutex (such that reading signalMask is safe if either the\n+ // signal mutex is locked or if atomic memory operations are used, while\n+ // writing signalMask requires both). signalMask is owned by the task\n+ // goroutine.\n+ signalMask linux.SignalSet\n+\n+ // FIXME: An equivalent to task_struct::real_blocked is needed\n+ // to prevent signals that are ignored, but transiently unblocked by\n+ // sigtimedwait(2), from being dropped in Task.sendSignalTimerLocked.\n+\n// If haveSavedSignalMask is true, savedSignalMask is the signal mask that\n// should be applied after the task has either delivered one signal to a\n// user handler or is about to resume execution in the untrusted\n@@ -182,25 +195,30 @@ type Task struct {\n// syscallRestartBlock is exclusive to the task goroutine.\nsyscallRestartBlock SyscallRestartBlock\n+ // p provides the mechanism by which the task runs code in userspace. The p\n+ // interface object is immutable.\n+ p platform.Context `state:\"nosave\"`\n+\n+ // k is the Kernel that this task belongs to. The k pointer is immutable.\n+ k *Kernel\n+\n// mu protects some of the following fields.\nmu sync.Mutex `state:\"nosave\"`\n- // tc and tr form the majority of the task's data.\n+ // tc holds task data provided by the ELF loader.\n//\n- // tc and tr are protected by mu. tc and tr are owned by the task\n- // goroutine. tr.signalMask is protected by the signal mutex and must be\n- // written using atomic memory operations (such that reading tr.signalMask\n- // is safe if the signal mutex is locked or if atomic memory operations are\n- // used), but is also owned by the task goroutine.\n+ // tc is protected by mu, and is owned by the task goroutine.\ntc TaskContext\n- tr TaskResources\n- // p provides the mechanism by which the task runs code in userspace. The p\n- // interface object is immutable.\n- p platform.Context `state:\"nosave\"`\n+ // fsc is the task's filesystem context.\n+ //\n+ // fsc is protected by mu, and is owned by the task goroutine.\n+ fsc *FSContext\n- // k is the Kernel that this task belongs to. The k pointer is immutable.\n- k *Kernel\n+ // fds is the task's file descriptor table.\n+ //\n+ // fds is protected by mu, and is owned by the task goroutine.\n+ fds *FDMap\n// If vforkParent is not nil, it is the task that created this task with\n// vfork() or clone(CLONE_VFORK), and should have its vforkStop ended when\n@@ -351,6 +369,11 @@ type Task struct {\n// ipcns is protected by mu.\nipcns *IPCNamespace\n+ // abstractSockets tracks abstract sockets that are in use.\n+ //\n+ // abstractSockets is protected by mu.\n+ abstractSockets *AbstractSocketNamespace\n+\n// parentDeathSignal is sent to this task's thread group when its parent exits.\n//\n// parentDeathSignal is protected by mu.\n@@ -518,29 +541,6 @@ func (t *Task) FutexWaiter() *futex.Waiter {\nreturn t.futexWaiter\n}\n-// TaskContext returns t's TaskContext.\n-//\n-// Precondition: The caller must be running on the task goroutine, or t.mu must\n-// be locked.\n-func (t *Task) TaskContext() *TaskContext {\n- return &t.tc\n-}\n-\n-// TaskResources returns t's TaskResources.\n-//\n-// Precondition: The caller must be running on the task goroutine, or t.mu must\n-// be locked.\n-func (t *Task) TaskResources() *TaskResources {\n- return &t.tr\n-}\n-\n-// WithMuLocked executes f with t.mu locked.\n-func (t *Task) WithMuLocked(f func(*Task)) {\n- t.mu.Lock()\n- defer t.mu.Unlock()\n- f(t)\n-}\n-\n// Kernel returns the Kernel containing t.\nfunc (t *Task) Kernel() *Kernel {\nreturn t.k\n@@ -572,7 +572,7 @@ func (t *Task) Value(key interface{}) interface{} {\ncase context.CtxThreadGroupID:\nreturn int32(t.ThreadGroup().ID())\ncase fs.CtxRoot:\n- return t.FSContext().RootDirectory()\n+ return t.fsc.RootDirectory()\ncase inet.CtxStack:\nreturn t.NetworkContext()\ncase ktime.CtxRealtimeClock:\n@@ -619,3 +619,62 @@ func (t *Task) SyscallRestartBlock() SyscallRestartBlock {\nt.syscallRestartBlock = nil\nreturn r\n}\n+\n+// IsChrooted returns true if the root directory of t's FSContext is not the\n+// root directory of t's MountNamespace.\n+//\n+// Preconditions: The caller must be running on the task goroutine, or t.mu\n+// must be locked.\n+func (t *Task) IsChrooted() bool {\n+ realRoot := t.k.mounts.Root()\n+ defer realRoot.DecRef()\n+ root := t.fsc.RootDirectory()\n+ if root != nil {\n+ defer root.DecRef()\n+ }\n+ return root != realRoot\n+}\n+\n+// TaskContext returns t's TaskContext.\n+//\n+// Precondition: The caller must be running on the task goroutine, or t.mu must\n+// be locked.\n+func (t *Task) TaskContext() *TaskContext {\n+ return &t.tc\n+}\n+\n+// FSContext returns t's FSContext. FSContext does not take an additional\n+// reference on the returned FSContext.\n+//\n+// Precondition: The caller must be running on the task goroutine, or t.mu must\n+// be locked.\n+func (t *Task) FSContext() *FSContext {\n+ return t.fsc\n+}\n+\n+// FDMap returns t's FDMap. FDMap does not take an additional reference on the\n+// returned FDMap.\n+//\n+// Precondition: The caller must be running on the task goroutine, or t.mu must\n+// be locked.\n+func (t *Task) FDMap() *FDMap {\n+ return t.fds\n+}\n+\n+// WithMuLocked executes f with t.mu locked.\n+func (t *Task) WithMuLocked(f func(*Task)) {\n+ t.mu.Lock()\n+ defer t.mu.Unlock()\n+ f(t)\n+}\n+\n+// MountNamespace returns t's MountNamespace. MountNamespace does not take an\n+// additional reference on the returned MountNamespace.\n+func (t *Task) MountNamespace() *fs.MountNamespace {\n+ return t.k.mounts\n+}\n+\n+// AbstractSockets returns t's AbstractSocketNamespace.\n+func (t *Task) AbstractSockets() *AbstractSocketNamespace {\n+ return t.abstractSockets\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_clone.go",
"new_path": "pkg/sentry/kernel/task_clone.go",
"diff": "@@ -213,6 +213,22 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\ntc.Arch.StateData().Regs.Fs_base = uint64(opts.TLS)\n}\n+ var fsc *FSContext\n+ if opts.NewFSContext {\n+ fsc = t.fsc.Fork()\n+ } else {\n+ fsc = t.fsc\n+ fsc.IncRef()\n+ }\n+\n+ var fds *FDMap\n+ if opts.NewFiles {\n+ fds = t.fds.Fork()\n+ } else {\n+ fds = t.fds\n+ fds.IncRef()\n+ }\n+\npidns := t.tg.pidns\nif t.childPIDNamespace != nil {\npidns = t.childPIDNamespace\n@@ -227,17 +243,21 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\n}\ntg = NewThreadGroup(pidns, sh, opts.TerminationSignal, tg.limits.GetCopy(), t.k.monotonicClock)\n}\n+\ncfg := &TaskConfig{\nKernel: t.k,\nThreadGroup: tg,\n+ SignalMask: t.SignalMask(),\nTaskContext: tc,\n- TaskResources: t.tr.Fork(!opts.NewFiles, !opts.NewFSContext),\n- Niceness: t.Niceness(),\n+ FSContext: fsc,\n+ FDMap: fds,\nCredentials: creds.Fork(),\n+ Niceness: t.Niceness(),\nNetworkNamespaced: t.netns,\nAllowedCPUMask: t.CPUMask(),\nUTSNamespace: utsns,\nIPCNamespace: ipcns,\n+ AbstractSocketNamespace: t.abstractSockets,\n}\nif opts.NewThreadGroup {\ncfg.Parent = t\n@@ -435,15 +455,17 @@ func (t *Task) Unshare(opts *SharingOptions) error {\nt.childPIDNamespace = t.tg.pidns.NewChild(t.UserNamespace())\n}\nt.mu.Lock()\n- defer t.mu.Unlock()\n+ // Can't defer unlock: DecRefs must occur without holding t.mu.\nif opts.NewNetworkNamespace {\nif !haveCapSysAdmin {\n+ t.mu.Unlock()\nreturn syserror.EPERM\n}\nt.netns = true\n}\nif opts.NewUTSNamespace {\nif !haveCapSysAdmin {\n+ t.mu.Unlock()\nreturn syserror.EPERM\n}\n// Note that this must happen after NewUserNamespace, so the\n@@ -452,21 +474,29 @@ func (t *Task) Unshare(opts *SharingOptions) error {\n}\nif opts.NewIPCNamespace {\nif !haveCapSysAdmin {\n+ t.mu.Unlock()\nreturn syserror.EPERM\n}\n// Note that \"If CLONE_NEWIPC is set, then create the process in a new IPC\n// namespace\"\nt.ipcns = NewIPCNamespace(t.creds.UserNamespace)\n}\n+ var oldfds *FDMap\nif opts.NewFiles {\n- oldFDMap := t.tr.FDMap\n- t.tr.FDMap = oldFDMap.Fork()\n- oldFDMap.DecRef()\n+ oldfds = t.fds\n+ t.fds = oldfds.Fork()\n}\n+ var oldfsc *FSContext\nif opts.NewFSContext {\n- oldFS := t.tr.FSContext\n- t.tr.FSContext = oldFS.Fork()\n- oldFS.DecRef()\n+ oldfsc = t.fsc\n+ t.fsc = oldfsc.Fork()\n+ }\n+ t.mu.Unlock()\n+ if oldfds != nil {\n+ oldfds.DecRef()\n+ }\n+ if oldfsc != nil {\n+ oldfsc.DecRef()\n}\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_exec.go",
"new_path": "pkg/sentry/kernel/task_exec.go",
"diff": "@@ -194,7 +194,7 @@ func (r *runSyscallAfterExecStop) execute(t *Task) taskRunState {\nt.tg.pidns.owner.mu.Unlock()\n// Remove FDs with the CloseOnExec flag set.\n- t.FDMap().RemoveIf(func(file *fs.File, flags FDFlags) bool {\n+ t.fds.RemoveIf(func(file *fs.File, flags FDFlags) bool {\nreturn flags.CloseOnExec\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_exit.go",
"new_path": "pkg/sentry/kernel/task_exit.go",
"diff": "@@ -253,21 +253,22 @@ func (*runExitMain) execute(t *Task) taskRunState {\n}\n}\n- // Deactivate the address space before releasing the MM.\n+ // Deactivate the address space and update max RSS before releasing the\n+ // task's MM.\nt.Deactivate()\n-\n- // Update the max resident set size before releasing t.tc.mm.\nt.tg.pidns.owner.mu.Lock()\nt.updateRSSLocked()\nt.tg.pidns.owner.mu.Unlock()\n-\n- // Release all of the task's resources.\nt.mu.Lock()\nt.tc.release()\n- t.tr.release()\nt.mu.Unlock()\n+\n+ // Releasing the MM unblocks a blocked CLONE_VFORK parent.\nt.unstopVforkParent()\n+ t.fsc.DecRef()\n+ t.fds.DecRef()\n+\n// If this is the last task to exit from the thread group, release the\n// thread group's resources.\nif lastExiter {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_log.go",
"new_path": "pkg/sentry/kernel/task_log.go",
"diff": "@@ -63,7 +63,7 @@ func (t *Task) DebugDumpState() {\nif mm := t.MemoryManager(); mm != nil {\nt.Debugf(\"Mappings:\\n%s\", mm)\n}\n- t.Debugf(\"FDMap:\\n%s\", t.FDMap())\n+ t.Debugf(\"FDMap:\\n%s\", t.fds)\n}\n// debugDumpRegisters logs register state at log level debug.\n"
},
{
"change_type": "DELETE",
"old_path": "pkg/sentry/kernel/task_resources.go",
"new_path": null,
"diff": "-// Copyright 2018 Google Inc.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-package kernel\n-\n-import (\n- \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n-)\n-\n-// TaskResources is the subset of a task's data provided by its creator that is\n-// not provided by the loader.\n-//\n-// +stateify savable\n-type TaskResources struct {\n- // SignalMask is the set of signals whose delivery is currently blocked.\n- //\n- // FIXME: Determine if we also need RealSignalMask\n- SignalMask linux.SignalSet\n-\n- // FSContext is the filesystem context.\n- *FSContext\n-\n- // FDMap provides access to files to the task.\n- *FDMap\n-\n- // Tracks abstract sockets that are in use.\n- AbstractSockets *AbstractSocketNamespace\n-}\n-\n-// newTaskResources returns a new TaskResources, taking an additional reference\n-// on fdm.\n-func newTaskResources(fdm *FDMap, fc *FSContext) *TaskResources {\n- fdm.IncRef()\n- return &TaskResources{\n- FDMap: fdm,\n- FSContext: fc,\n- AbstractSockets: NewAbstractSocketNamespace(),\n- }\n-}\n-\n-// release releases all resources held by the TaskResources. release is called\n-// by the task when it exits.\n-func (tr *TaskResources) release() {\n- tr.FDMap.DecRef()\n- tr.FDMap = nil\n- tr.FSContext.DecRef()\n- tr.FSContext = nil\n- tr.AbstractSockets = nil\n-}\n-\n-// Fork returns a duplicate of tr.\n-//\n-// FIXME: Preconditions: When tr is owned by a Task, that task's\n-// signal mutex must be locked, or Fork must be called by the task's goroutine.\n-func (tr *TaskResources) Fork(shareFiles bool, shareFSContext bool) *TaskResources {\n- var fdmap *FDMap\n- if shareFiles {\n- fdmap = tr.FDMap\n- fdmap.IncRef()\n- } else {\n- fdmap = tr.FDMap.Fork()\n- }\n-\n- var fsc *FSContext\n- if shareFSContext {\n- fsc = tr.FSContext\n- fsc.IncRef()\n- } else {\n- fsc = tr.FSContext.Fork()\n- }\n-\n- return &TaskResources{\n- SignalMask: tr.SignalMask,\n- FDMap: fdmap,\n- FSContext: fsc,\n- AbstractSockets: tr.AbstractSockets,\n- }\n-}\n-\n-// FDMap returns t's FDMap.\n-//\n-// Preconditions: The caller must be running on the task goroutine, or t.mu\n-// must be locked.\n-func (t *Task) FDMap() *FDMap {\n- return t.tr.FDMap\n-}\n-\n-// FSContext returns t's FSContext.\n-//\n-// Preconditions: The caller must be running on the task goroutine, or t.mu\n-// must be locked.\n-func (t *Task) FSContext() *FSContext {\n- return t.tr.FSContext\n-}\n-\n-// MountNamespace returns t's MountNamespace. MountNamespace does not take an additional\n-// reference on the returned MountNamespace.\n-func (t *Task) MountNamespace() *fs.MountNamespace {\n- return t.k.mounts\n-}\n-\n-// AbstractSockets returns t's AbstractSocketNamespace.\n-func (t *Task) AbstractSockets() *AbstractSocketNamespace {\n- return t.tr.AbstractSockets\n-}\n-\n-// IsChrooted returns true if the root directory of t's FSContext is not the\n-// root directory of t's MountNamespace.\n-//\n-// Preconditions: The caller must be running on the task goroutine, or t.mu\n-// must be locked.\n-func (t *Task) IsChrooted() bool {\n- realRoot := t.k.mounts.Root()\n- defer realRoot.DecRef()\n- root := t.tr.FSContext.RootDirectory()\n- if root != nil {\n- defer root.DecRef()\n- }\n- return root != realRoot\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_signals.go",
"new_path": "pkg/sentry/kernel/task_signals.go",
"diff": "@@ -124,10 +124,10 @@ var StopSignals = linux.MakeSignalSet(linux.SIGSTOP, linux.SIGTSTP, linux.SIGTTI\n//\n// Preconditions: t.tg.signalHandlers.mu must be locked.\nfunc (t *Task) dequeueSignalLocked() *arch.SignalInfo {\n- if info := t.pendingSignals.dequeue(t.tr.SignalMask); info != nil {\n+ if info := t.pendingSignals.dequeue(t.signalMask); info != nil {\nreturn info\n}\n- return t.tg.pendingSignals.dequeue(t.tr.SignalMask)\n+ return t.tg.pendingSignals.dequeue(t.signalMask)\n}\n// TakeSignal returns a pending signal not blocked by mask. Signal handlers are\n@@ -252,7 +252,7 @@ func (t *Task) deliverSignalToHandler(info *arch.SignalInfo, act arch.SignalAct)\n// handler should run with the current mask, but sigreturn should restore\n// the saved one.\nst := &arch.Stack{t.Arch(), t.MemoryManager(), sp}\n- mask := t.tr.SignalMask\n+ mask := t.signalMask\nif t.haveSavedSignalMask {\nmask = t.savedSignalMask\n}\n@@ -262,7 +262,7 @@ func (t *Task) deliverSignalToHandler(info *arch.SignalInfo, act arch.SignalAct)\nt.haveSavedSignalMask = false\n// Add our signal mask.\n- newMask := t.tr.SignalMask | act.Mask\n+ newMask := t.signalMask | act.Mask\nif !act.IsNoDefer() {\nnewMask |= linux.SignalSetOf(linux.Signal(info.Signo))\n}\n@@ -431,7 +431,7 @@ func (t *Task) sendSignalTimerLocked(info *arch.SignalInfo, group bool, timer *I\n// Linux's kernel/signal.c:__send_signal() => prepare_signal() =>\n// sig_ignored().\nignored := computeAction(sig, t.tg.signalHandlers.actions[sig]) == SignalActionIgnore\n- if linux.SignalSetOf(sig)&t.tr.SignalMask == 0 && ignored && !t.hasTracer() {\n+ if linux.SignalSetOf(sig)&t.signalMask == 0 && ignored && !t.hasTracer() {\nt.Debugf(\"Discarding ignored signal %d\", sig)\nif timer != nil {\ntimer.signalRejectedLocked()\n@@ -515,7 +515,7 @@ func (tg *ThreadGroup) applySignalSideEffectsLocked(sig linux.Signal) {\n// Preconditions: The signal mutex must be locked.\nfunc (t *Task) canReceiveSignalLocked(sig linux.Signal) bool {\n// - Do not choose tasks that are blocking the signal.\n- if linux.SignalSetOf(sig)&t.tr.SignalMask != 0 {\n+ if linux.SignalSetOf(sig)&t.signalMask != 0 {\nreturn false\n}\n// - No need to check Task.exitState, as the exit path sets every bit in the\n@@ -564,21 +564,21 @@ func (t *Task) forceSignal(sig linux.Signal, unconditional bool) {\n}\nfunc (t *Task) forceSignalLocked(sig linux.Signal, unconditional bool) {\n- blocked := linux.SignalSetOf(sig)&t.tr.SignalMask != 0\n+ blocked := linux.SignalSetOf(sig)&t.signalMask != 0\nact := t.tg.signalHandlers.actions[sig]\nignored := act.Handler == arch.SignalActIgnore\nif blocked || ignored || unconditional {\nact.Handler = arch.SignalActDefault\nt.tg.signalHandlers.actions[sig] = act\nif blocked {\n- t.setSignalMaskLocked(t.tr.SignalMask &^ linux.SignalSetOf(sig))\n+ t.setSignalMaskLocked(t.signalMask &^ linux.SignalSetOf(sig))\n}\n}\n}\n// SignalMask returns a copy of t's signal mask.\nfunc (t *Task) SignalMask() linux.SignalSet {\n- return linux.SignalSet(atomic.LoadUint64((*uint64)(&t.tr.SignalMask)))\n+ return linux.SignalSet(atomic.LoadUint64((*uint64)(&t.signalMask)))\n}\n// SetSignalMask sets t's signal mask.\n@@ -595,8 +595,8 @@ func (t *Task) SetSignalMask(mask linux.SignalSet) {\n// Preconditions: The signal mutex must be locked.\nfunc (t *Task) setSignalMaskLocked(mask linux.SignalSet) {\n- oldMask := t.tr.SignalMask\n- atomic.StoreUint64((*uint64)(&t.tr.SignalMask), uint64(mask))\n+ oldMask := t.signalMask\n+ atomic.StoreUint64((*uint64)(&t.signalMask), uint64(mask))\n// If the new mask blocks any signals that were not blocked by the old\n// mask, and at least one such signal is pending in tg.pendingSignals, and\n@@ -1076,7 +1076,7 @@ func (*runInterruptAfterSignalDeliveryStop) execute(t *Task) taskRunState {\nt.tg.signalHandlers.mu.Lock()\nt.tg.pidns.owner.mu.Unlock()\n// If the signal is masked, re-queue it.\n- if linux.SignalSetOf(sig)&t.tr.SignalMask != 0 {\n+ if linux.SignalSetOf(sig)&t.signalMask != 0 {\nt.sendSignalLocked(info, false /* group */)\nt.tg.signalHandlers.mu.Unlock()\nreturn (*runInterrupt)(nil)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_start.go",
"new_path": "pkg/sentry/kernel/task_start.go",
"diff": "package kernel\nimport (\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/futex\"\n@@ -26,7 +27,7 @@ import (\n// TaskConfig defines the configuration of a new Task (see below).\ntype TaskConfig struct {\n// Kernel is the owning Kernel.\n- *Kernel\n+ Kernel *Kernel\n// Parent is the new task's parent. Parent may be nil.\nParent *Task\n@@ -36,13 +37,24 @@ type TaskConfig struct {\nInheritParent *Task\n// ThreadGroup is the ThreadGroup the new task belongs to.\n- *ThreadGroup\n+ ThreadGroup *ThreadGroup\n- // TaskContext is the TaskContext of the new task.\n- *TaskContext\n+ // SignalMask is the new task's initial signal mask.\n+ SignalMask linux.SignalSet\n- // TaskResources is the TaskResources of the new task.\n- *TaskResources\n+ // TaskContext is the TaskContext of the new task. Ownership of the\n+ // TaskContext is transferred to TaskSet.NewTask, whether or not it\n+ // succeeds.\n+ TaskContext *TaskContext\n+\n+ // FSContext is the FSContext of the new task. A reference must be held on\n+ // FSContext, which is transferred to TaskSet.NewTask whether or not it\n+ // succeeds.\n+ FSContext *FSContext\n+\n+ // FDMap is the FDMap of the new task. A reference must be held on FDMap,\n+ // which is transferred to TaskSet.NewTask whether or not it succeeds.\n+ FDMap *FDMap\n// Credentials is the Credentials of the new task.\nCredentials *auth.Credentials\n@@ -62,25 +74,27 @@ type TaskConfig struct {\n// IPCNamespace is the IPCNamespace of the new task.\nIPCNamespace *IPCNamespace\n+\n+ // AbstractSocketNamespace is the AbstractSocketNamespace of the new task.\n+ AbstractSocketNamespace *AbstractSocketNamespace\n}\n-// NewTask creates a new task defined by TaskConfig.\n-// Whether or not NewTask is successful, it takes ownership of both TaskContext\n-// and TaskResources of the TaskConfig.\n+// NewTask creates a new task defined by cfg.\n//\n// NewTask does not start the returned task; the caller must call Task.Start.\nfunc (ts *TaskSet) NewTask(cfg *TaskConfig) (*Task, error) {\nt, err := ts.newTask(cfg)\nif err != nil {\ncfg.TaskContext.release()\n- cfg.TaskResources.release()\n+ cfg.FSContext.DecRef()\n+ cfg.FDMap.DecRef()\nreturn nil, err\n}\nreturn t, nil\n}\n-// newTask is a helper for TaskSet.NewTask that only takes ownership of TaskContext\n-// and TaskResources of the TaskConfig if it succeeds.\n+// newTask is a helper for TaskSet.NewTask that only takes ownership of parts\n+// of cfg if it succeeds.\nfunc (ts *TaskSet) newTask(cfg *TaskConfig) (*Task, error) {\ntg := cfg.ThreadGroup\ntc := cfg.TaskContext\n@@ -92,9 +106,11 @@ func (ts *TaskSet) newTask(cfg *TaskConfig) (*Task, error) {\n},\nrunState: (*runApp)(nil),\ninterruptChan: make(chan struct{}, 1),\n+ signalMask: cfg.SignalMask,\nsignalStack: arch.SignalStack{Flags: arch.SignalStackFlagDisable},\ntc: *tc,\n- tr: *cfg.TaskResources,\n+ fsc: cfg.FSContext,\n+ fds: cfg.FDMap,\np: cfg.Kernel.Platform.NewContext(),\nk: cfg.Kernel,\nptraceTracees: make(map[*Task]struct{}),\n@@ -105,6 +121,7 @@ func (ts *TaskSet) newTask(cfg *TaskConfig) (*Task, error) {\nnetns: cfg.NetworkNamespaced,\nutsns: cfg.UTSNamespace,\nipcns: cfg.IPCNamespace,\n+ abstractSockets: cfg.AbstractSocketNamespace,\nrseqCPU: -1,\nfutexWaiter: futex.NewWaiter(),\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Disintegrate kernel.TaskResources.
This allows us to call kernel.FDMap.DecRef without holding mutexes
cleanly.
PiperOrigin-RevId: 211139657
Change-Id: Ie59d5210fb9282e1950e2e40323df7264a01bcec |
259,885 | 31.08.2018 14:16:36 | 25,200 | b935311e2371abdbceba89294d0001905f2658d5 | Do not use fs.FileOwnerFromContext in fs/proc.file.UnstableAttr().
From //pkg/sentry/context/context.go:
// - It is *not safe* to retain a Context passed to a function beyond the scope
// of that function call.
Passing a stored kernel.Task as a context.Context to
fs.FileOwnerFromContext violates this requirement. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/file.go",
"new_path": "pkg/sentry/fs/proc/file.go",
"diff": "@@ -51,7 +51,8 @@ func (f *file) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAt\nreturn fs.UnstableAttr{}, err\n}\nif f.t != nil {\n- uattr.Owner = fs.FileOwnerFromContext(f.t)\n+ creds := f.t.Credentials()\n+ uattr.Owner = fs.FileOwner{creds.EffectiveKUID, creds.EffectiveKGID}\n}\nreturn uattr, nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Do not use fs.FileOwnerFromContext in fs/proc.file.UnstableAttr().
From //pkg/sentry/context/context.go:
// - It is *not safe* to retain a Context passed to a function beyond the scope
// of that function call.
Passing a stored kernel.Task as a context.Context to
fs.FileOwnerFromContext violates this requirement.
PiperOrigin-RevId: 211143021
Change-Id: I4c5b02bd941407be4c9cfdbcbdfe5a26acaec037 |
259,885 | 31.08.2018 15:43:32 | 25,200 | f8ccfbbed4875e65c78c849cd46afa882ba68ee3 | Document more task-goroutine-owned fields in kernel.Task.
Task.creds can only be changed by the task's own set*id and execve
syscalls, and Task namespaces can only be changed by the task's own
unshare/setns syscalls. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task.go",
"new_path": "pkg/sentry/kernel/task.go",
"diff": "@@ -354,19 +354,19 @@ type Task struct {\n// creds is the task's credentials.\n//\n- // creds is protected by mu, however the value itself is immutable and\n- // can only be changed by a copy. After reading the pointer, access\n- // will proceed outside the scope of mu.\n+ // creds is protected by mu, however the value itself is immutable and can\n+ // only be changed by a copy. After reading the pointer, access will\n+ // proceed outside the scope of mu. creds is owned by the task goroutine.\ncreds *auth.Credentials\n// utsns is the task's UTS namespace.\n//\n- // utsns is protected by mu.\n+ // utsns is protected by mu. utsns is owned by the task goroutine.\nutsns *UTSNamespace\n// ipcns is the task's IPC namespace.\n//\n- // ipcns is protected by mu.\n+ // ipcns is protected by mu. ipcns is owned by the task goroutine.\nipcns *IPCNamespace\n// abstractSockets tracks abstract sockets that are in use.\n@@ -547,6 +547,9 @@ func (t *Task) Kernel() *Kernel {\n}\n// Value implements context.Context.Value.\n+//\n+// Preconditions: The caller must be running on the task goroutine (as implied\n+// by the requirements of context.Context).\nfunc (t *Task) Value(key interface{}) interface{} {\nswitch key {\ncase CtxCanTrace:\n@@ -556,18 +559,12 @@ func (t *Task) Value(key interface{}) interface{} {\ncase CtxPIDNamespace:\nreturn t.tg.pidns\ncase CtxUTSNamespace:\n- t.mu.Lock()\n- defer t.mu.Unlock()\nreturn t.utsns\ncase CtxIPCNamespace:\n- t.mu.Lock()\n- defer t.mu.Unlock()\nreturn t.ipcns\ncase CtxTask:\nreturn t\ncase auth.CtxCredentials:\n- t.mu.Lock()\n- defer t.mu.Unlock()\nreturn t.creds\ncase context.CtxThreadGroupID:\nreturn int32(t.ThreadGroup().ID())\n"
}
] | Go | Apache License 2.0 | google/gvisor | Document more task-goroutine-owned fields in kernel.Task.
Task.creds can only be changed by the task's own set*id and execve
syscalls, and Task namespaces can only be changed by the task's own
unshare/setns syscalls.
PiperOrigin-RevId: 211156279
Change-Id: I94d57105d34e8739d964400995a8a5d76306b2a0 |
259,992 | 31.08.2018 16:11:07 | 25,200 | 66c03b3dd79c45014da19f36973a85290e9a4458 | Mounting over '/tmp' may fail | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -744,8 +744,8 @@ func TestUnixDomainSockets(t *testing.T) {\nspec.Mounts = []specs.Mount{\nspecs.Mount{\nType: \"bind\",\n- Destination: \"/tmp\",\n- Source: \"/tmp\",\n+ Destination: dir,\n+ Source: dir,\n},\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Mounting over '/tmp' may fail
PiperOrigin-RevId: 211160120
Change-Id: Ie5f280bdac17afd01cb16562ffff6222b3184c34 |
259,858 | 04.09.2018 09:18:00 | 25,200 | c09f9acd7c7a2e85472b1ee47bf26f7c89ded43e | Distinguish Element and Linker for ilist.
Furthermore, allow for the specification of an ElementMapper. This allows a
single "Element" type to exist on multiple inline lists, and work without
having to embed the entry type.
This is a requisite change for supporting a per-Inode list of Dirents. | [
{
"change_type": "MODIFY",
"old_path": "pkg/ilist/BUILD",
"new_path": "pkg/ilist/BUILD",
"diff": "@@ -28,6 +28,7 @@ go_template_instance(\nprefix = \"direct\",\ntemplate = \":generic_list\",\ntypes = {\n+ \"Element\": \"*direct\",\n\"Linker\": \"*direct\",\n},\n)\n@@ -47,6 +48,10 @@ go_template(\nsrcs = [\n\"list.go\",\n],\n- opt_types = [\"Linker\"],\n+ opt_types = [\n+ \"Element\",\n+ \"ElementMapper\",\n+ \"Linker\",\n+ ],\nvisibility = [\"//visibility:public\"],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/ilist/list.go",
"new_path": "pkg/ilist/list.go",
"diff": "@@ -21,12 +21,34 @@ package ilist\n// N.B. When substituted in a template instantiation, Linker doesn't need to\n// be an interface, and in most cases won't be.\ntype Linker interface {\n- Next() Linker\n- Prev() Linker\n- SetNext(Linker)\n- SetPrev(Linker)\n+ Next() Element\n+ Prev() Element\n+ SetNext(Element)\n+ SetPrev(Element)\n}\n+// Element the item that is used at the API level.\n+//\n+// N.B. Like Linker, this is unlikely to be an interface in most cases.\n+type Element interface {\n+ Linker\n+}\n+\n+// ElementMapper provides an identity mapping by default.\n+//\n+// This can be replaced to provide a struct that maps elements to linker\n+// objects, if they are not the same. An ElementMapper is not typically\n+// required if: Linker is left as is, Element is left as is, or Linker and\n+// Element are the same type.\n+type ElementMapper struct{}\n+\n+// linkerFor maps an Element to a Linker.\n+//\n+// This default implementation should be inlined.\n+//\n+//go:nosplit\n+func (ElementMapper) linkerFor(elem Element) Linker { return elem }\n+\n// List is an intrusive list. Entries can be added to or removed from the list\n// in O(1) time and with no additional memory allocations.\n//\n@@ -39,8 +61,8 @@ type Linker interface {\n//\n// +stateify savable\ntype List struct {\n- head Linker\n- tail Linker\n+ head Element\n+ tail Element\n}\n// Reset resets list l to the empty state.\n@@ -55,22 +77,22 @@ func (l *List) Empty() bool {\n}\n// Front returns the first element of list l or nil.\n-func (l *List) Front() Linker {\n+func (l *List) Front() Element {\nreturn l.head\n}\n// Back returns the last element of list l or nil.\n-func (l *List) Back() Linker {\n+func (l *List) Back() Element {\nreturn l.tail\n}\n// PushFront inserts the element e at the front of list l.\n-func (l *List) PushFront(e Linker) {\n- e.SetNext(l.head)\n- e.SetPrev(nil)\n+func (l *List) PushFront(e Element) {\n+ ElementMapper{}.linkerFor(e).SetNext(l.head)\n+ ElementMapper{}.linkerFor(e).SetPrev(nil)\nif l.head != nil {\n- l.head.SetPrev(e)\n+ ElementMapper{}.linkerFor(l.head).SetPrev(e)\n} else {\nl.tail = e\n}\n@@ -79,12 +101,12 @@ func (l *List) PushFront(e Linker) {\n}\n// PushBack inserts the element e at the back of list l.\n-func (l *List) PushBack(e Linker) {\n- e.SetNext(nil)\n- e.SetPrev(l.tail)\n+func (l *List) PushBack(e Element) {\n+ ElementMapper{}.linkerFor(e).SetNext(nil)\n+ ElementMapper{}.linkerFor(e).SetPrev(l.tail)\nif l.tail != nil {\n- l.tail.SetNext(e)\n+ ElementMapper{}.linkerFor(l.tail).SetNext(e)\n} else {\nl.head = e\n}\n@@ -98,8 +120,8 @@ func (l *List) PushBackList(m *List) {\nl.head = m.head\nl.tail = m.tail\n} else if m.head != nil {\n- l.tail.SetNext(m.head)\n- m.head.SetPrev(l.tail)\n+ ElementMapper{}.linkerFor(l.tail).SetNext(m.head)\n+ ElementMapper{}.linkerFor(m.head).SetPrev(l.tail)\nl.tail = m.tail\n}\n@@ -109,46 +131,46 @@ func (l *List) PushBackList(m *List) {\n}\n// InsertAfter inserts e after b.\n-func (l *List) InsertAfter(b, e Linker) {\n- a := b.Next()\n- e.SetNext(a)\n- e.SetPrev(b)\n- b.SetNext(e)\n+func (l *List) InsertAfter(b, e Element) {\n+ a := ElementMapper{}.linkerFor(b).Next()\n+ ElementMapper{}.linkerFor(e).SetNext(a)\n+ ElementMapper{}.linkerFor(e).SetPrev(b)\n+ ElementMapper{}.linkerFor(b).SetNext(e)\nif a != nil {\n- a.SetPrev(e)\n+ ElementMapper{}.linkerFor(a).SetPrev(e)\n} else {\nl.tail = e\n}\n}\n// InsertBefore inserts e before a.\n-func (l *List) InsertBefore(a, e Linker) {\n- b := a.Prev()\n- e.SetNext(a)\n- e.SetPrev(b)\n- a.SetPrev(e)\n+func (l *List) InsertBefore(a, e Element) {\n+ b := ElementMapper{}.linkerFor(a).Prev()\n+ ElementMapper{}.linkerFor(e).SetNext(a)\n+ ElementMapper{}.linkerFor(e).SetPrev(b)\n+ ElementMapper{}.linkerFor(a).SetPrev(e)\nif b != nil {\n- b.SetNext(e)\n+ ElementMapper{}.linkerFor(b).SetNext(e)\n} else {\nl.head = e\n}\n}\n// Remove removes e from l.\n-func (l *List) Remove(e Linker) {\n- prev := e.Prev()\n- next := e.Next()\n+func (l *List) Remove(e Element) {\n+ prev := ElementMapper{}.linkerFor(e).Prev()\n+ next := ElementMapper{}.linkerFor(e).Next()\nif prev != nil {\n- prev.SetNext(next)\n+ ElementMapper{}.linkerFor(prev).SetNext(next)\n} else {\nl.head = next\n}\nif next != nil {\n- next.SetPrev(prev)\n+ ElementMapper{}.linkerFor(next).SetPrev(prev)\n} else {\nl.tail = prev\n}\n@@ -160,26 +182,26 @@ func (l *List) Remove(e Linker) {\n//\n// +stateify savable\ntype Entry struct {\n- next Linker\n- prev Linker\n+ next Element\n+ prev Element\n}\n// Next returns the entry that follows e in the list.\n-func (e *Entry) Next() Linker {\n+func (e *Entry) Next() Element {\nreturn e.next\n}\n// Prev returns the entry that precedes e in the list.\n-func (e *Entry) Prev() Linker {\n+func (e *Entry) Prev() Element {\nreturn e.prev\n}\n// SetNext assigns 'entry' as the entry that follows e in the list.\n-func (e *Entry) SetNext(entry Linker) {\n- e.next = entry\n+func (e *Entry) SetNext(elem Element) {\n+ e.next = elem\n}\n// SetPrev assigns 'entry' as the entry that precedes e in the list.\n-func (e *Entry) SetPrev(entry Linker) {\n- e.prev = entry\n+func (e *Entry) SetPrev(elem Element) {\n+ e.prev = elem\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/refs/BUILD",
"new_path": "pkg/refs/BUILD",
"diff": "package(licenses = [\"notice\"]) # Apache 2.0\n+load(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\nload(\"//tools/go_stateify:defs.bzl\", \"go_library\", \"go_test\")\n+go_template_instance(\n+ name = \"weak_ref_list\",\n+ out = \"weak_ref_list.go\",\n+ package = \"refs\",\n+ prefix = \"weakRef\",\n+ template = \"//pkg/ilist:generic_list\",\n+ types = {\n+ \"Element\": \"*WeakRef\",\n+ \"Linker\": \"*WeakRef\",\n+ },\n+)\n+\ngo_library(\nname = \"refs\",\nsrcs = [\n\"refcounter.go\",\n\"refcounter_state.go\",\n+ \"weak_ref_list.go\",\n],\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/refs\",\nvisibility = [\"//:sandbox\"],\n- deps = [\"//pkg/ilist\"],\n)\ngo_test(\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/refs/refcounter.go",
"new_path": "pkg/refs/refcounter.go",
"diff": "@@ -20,8 +20,6 @@ import (\n\"reflect\"\n\"sync\"\n\"sync/atomic\"\n-\n- \"gvisor.googlesource.com/gvisor/pkg/ilist\"\n)\n// RefCounter is the interface to be implemented by objects that are reference\n@@ -61,7 +59,7 @@ type WeakRefUser interface {\n//\n// +stateify savable\ntype WeakRef struct {\n- ilist.Entry `state:\"nosave\"`\n+ weakRefEntry `state:\"nosave\"`\n// obj is an atomic value that points to the refCounter.\nobj atomic.Value `state:\".(savedReference)\"`\n@@ -195,7 +193,7 @@ type AtomicRefCount struct {\nmu sync.Mutex `state:\"nosave\"`\n// weakRefs is our collection of weak references.\n- weakRefs ilist.List `state:\"nosave\"`\n+ weakRefs weakRefList `state:\"nosave\"`\n}\n// ReadRefs returns the current number of references. The returned count is\n@@ -276,7 +274,7 @@ func (r *AtomicRefCount) DecRefWithDestructor(destroy func()) {\n// return false due to the reference count check.\nr.mu.Lock()\nfor !r.weakRefs.Empty() {\n- w := r.weakRefs.Front().(*WeakRef)\n+ w := r.weakRefs.Front()\n// Capture the callback because w cannot be touched\n// after it's zapped -- the owner is free it reuse it\n// after that.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/BUILD",
"new_path": "pkg/sentry/fs/BUILD",
"diff": "@@ -78,6 +78,7 @@ go_template_instance(\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n\"Linker\": \"*Dirent\",\n+ \"Element\": \"*Dirent\",\n},\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/BUILD",
"new_path": "pkg/sentry/kernel/BUILD",
"diff": "@@ -10,6 +10,7 @@ go_template_instance(\nprefix = \"pendingSignal\",\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n+ \"Element\": \"*pendingSignal\",\n\"Linker\": \"*pendingSignal\",\n},\n)\n@@ -21,6 +22,7 @@ go_template_instance(\nprefix = \"processGroup\",\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n+ \"Element\": \"*ProcessGroup\",\n\"Linker\": \"*ProcessGroup\",\n},\n)\n@@ -43,6 +45,7 @@ go_template_instance(\nprefix = \"session\",\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n+ \"Element\": \"*Session\",\n\"Linker\": \"*Session\",\n},\n)\n@@ -54,6 +57,7 @@ go_template_instance(\nprefix = \"task\",\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n+ \"Element\": \"*Task\",\n\"Linker\": \"*Task\",\n},\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/futex/BUILD",
"new_path": "pkg/sentry/kernel/futex/BUILD",
"diff": "@@ -10,6 +10,7 @@ go_template_instance(\nprefix = \"waiter\",\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n+ \"Element\": \"*Waiter\",\n\"Linker\": \"*Waiter\",\n},\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/semaphore/BUILD",
"new_path": "pkg/sentry/kernel/semaphore/BUILD",
"diff": "@@ -10,6 +10,7 @@ go_template_instance(\nprefix = \"waiter\",\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n+ \"Element\": \"*waiter\",\n\"Linker\": \"*waiter\",\n},\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/BUILD",
"new_path": "pkg/sentry/mm/BUILD",
"diff": "@@ -67,6 +67,7 @@ go_template_instance(\nprefix = \"io\",\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n+ \"Element\": \"*ioResult\",\n\"Linker\": \"*ioResult\",\n},\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/fragmentation/BUILD",
"new_path": "pkg/tcpip/network/fragmentation/BUILD",
"diff": "@@ -10,6 +10,7 @@ go_template_instance(\nprefix = \"reassembler\",\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n+ \"Element\": \"*reassembler\",\n\"Linker\": \"*reassembler\",\n},\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/ping/BUILD",
"new_path": "pkg/tcpip/transport/ping/BUILD",
"diff": "@@ -10,6 +10,7 @@ go_template_instance(\nprefix = \"pingPacket\",\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n+ \"Element\": \"*pingPacket\",\n\"Linker\": \"*pingPacket\",\n},\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/BUILD",
"new_path": "pkg/tcpip/transport/tcp/BUILD",
"diff": "@@ -10,6 +10,7 @@ go_template_instance(\nprefix = \"segment\",\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n+ \"Element\": \"*segment\",\n\"Linker\": \"*segment\",\n},\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/BUILD",
"new_path": "pkg/tcpip/transport/udp/BUILD",
"diff": "@@ -10,6 +10,7 @@ go_template_instance(\nprefix = \"udpPacket\",\ntemplate = \"//pkg/ilist:generic_list\",\ntypes = {\n+ \"Element\": \"*udpPacket\",\n\"Linker\": \"*udpPacket\",\n},\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Distinguish Element and Linker for ilist.
Furthermore, allow for the specification of an ElementMapper. This allows a
single "Element" type to exist on multiple inline lists, and work without
having to embed the entry type.
This is a requisite change for supporting a per-Inode list of Dirents.
PiperOrigin-RevId: 211467497
Change-Id: If2768999b43e03fdaecf8ed15f435fe37518d163 |
259,881 | 04.09.2018 13:28:37 | 25,200 | 3944cb41cbef64ac507e87f258441000a46424d5 | /proc/PID/mounts is not tab-delimited | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/mounts.go",
"new_path": "pkg/sentry/fs/proc/mounts.go",
"diff": "@@ -173,7 +173,7 @@ func (mf *mountsFile) ReadSeqFileData(ctx context.Context, handle seqfile.SeqHan\nvar buf bytes.Buffer\nforEachMountSource(mf.t, func(mountPath string, m *fs.MountSource) {\n- // Format (tab-separated):\n+ // Format:\n// <special device or remote filesystem> <mount point> <filesystem type> <mount options> <needs dump> <fsck order>\n//\n// We use the filesystem name as the first field, since there\n@@ -191,7 +191,7 @@ func (mf *mountsFile) ReadSeqFileData(ctx context.Context, handle seqfile.SeqHan\nif m.Filesystem != nil {\nname = m.Filesystem.Name()\n}\n- fmt.Fprintf(&buf, \"%s\\t%s\\t%s\\t%s\\t%d\\t%d\\n\", \"none\", mountPath, name, opts, 0, 0)\n+ fmt.Fprintf(&buf, \"%s %s %s %s %d %d\\n\", \"none\", mountPath, name, opts, 0, 0)\n})\nreturn []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*mountsFile)(nil)}}, 0\n"
}
] | Go | Apache License 2.0 | google/gvisor | /proc/PID/mounts is not tab-delimited
PiperOrigin-RevId: 211513847
Change-Id: Ib484dd2d921c3e5d70d0e410cd973d3bff4f6b73 |
260,013 | 05.09.2018 09:20:18 | 25,200 | 2b8dae0bc5594f7088dd028268efaedbb5a72507 | Open(2) isn't honoring O_NOFOLLOW | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/file.go",
"new_path": "pkg/abi/linux/file.go",
"diff": "@@ -37,6 +37,7 @@ const (\nO_DIRECT = 00040000\nO_LARGEFILE = 00100000\nO_DIRECTORY = 00200000\n+ O_NOFOLLOW = 00400000\nO_CLOEXEC = 02000000\nO_SYNC = 04010000\nO_PATH = 010000000\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_file.go",
"new_path": "pkg/sentry/syscalls/linux/sys_file.go",
"diff": "@@ -136,7 +136,8 @@ func openAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint) (fd u\nreturn 0, err\n}\n- err = fileOpOn(t, dirFD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error {\n+ resolve := flags&linux.O_NOFOLLOW == 0\n+ err = fileOpOn(t, dirFD, path, resolve, func(root *fs.Dirent, d *fs.Dirent) error {\n// First check a few things about the filesystem before trying to get the file\n// reference.\n//\n@@ -147,6 +148,10 @@ func openAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint) (fd u\nreturn err\n}\n+ if fs.IsSymlink(d.Inode.StableAttr) && !resolve {\n+ return syserror.ELOOP\n+ }\n+\nfileFlags := linuxToFlags(flags)\n// Linux always adds the O_LARGEFILE flag when running in 64-bit mode.\nfileFlags.LargeFile = true\n"
}
] | Go | Apache License 2.0 | google/gvisor | Open(2) isn't honoring O_NOFOLLOW
PiperOrigin-RevId: 211644897
Change-Id: I882ed827a477d6c03576463ca5bf2d6351892b90 |
259,992 | 05.09.2018 13:12:15 | 25,200 | 1d22d87fdc464b0641eca69f730777c27984c2ff | Move multi-container test to a single file | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/BUILD",
"new_path": "runsc/container/BUILD",
"diff": "@@ -38,6 +38,7 @@ go_test(\nsrcs = [\n\"container_test.go\",\n\"fs_test.go\",\n+ \"multi_container_test.go\",\n],\ndata = [\n\":uds_test_app\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -38,7 +38,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.googlesource.com/gvisor/pkg/unet\"\n\"gvisor.googlesource.com/gvisor/runsc/boot\"\n- \"gvisor.googlesource.com/gvisor/runsc/specutils\"\n\"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n)\n@@ -1366,200 +1365,6 @@ func TestAbbreviatedIDs(t *testing.T) {\n}\n}\n-// TestMultiContainerSanity checks that it is possible to run 2 dead-simple\n-// containers in the same sandbox.\n-func TestMultiContainerSanity(t *testing.T) {\n- for _, conf := range configs(all...) {\n- t.Logf(\"Running test with conf: %+v\", conf)\n-\n- containerIDs := []string{\n- testutil.UniqueContainerID(),\n- testutil.UniqueContainerID(),\n- }\n- containerAnnotations := []map[string]string{\n- // The first container creates a sandbox.\n- map[string]string{\n- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,\n- },\n- // The second container creates a container within the first\n- // container's sandbox.\n- map[string]string{\n- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,\n- specutils.ContainerdSandboxIDAnnotation: containerIDs[0],\n- },\n- }\n-\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n-\n- // Setup the containers.\n- containers := make([]*Container, 0, len(containerIDs))\n- for i, annotations := range containerAnnotations {\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n- spec.Annotations = annotations\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n- if err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n- }\n- defer os.RemoveAll(bundleDir)\n- cont, err := Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\")\n- if err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n- }\n- defer cont.Destroy()\n- if err := cont.Start(conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n- }\n- containers = append(containers, cont)\n- }\n-\n- expectedPL := []*control.Process{\n- {\n- UID: 0,\n- PID: 1,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- },\n- {\n- UID: 0,\n- PID: 2,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- },\n- }\n-\n- // Check via ps that multiple processes are running.\n- if err := waitForProcessList(containers[0], expectedPL); err != nil {\n- t.Errorf(\"failed to wait for sleep to start: %v\", err)\n- }\n- }\n-}\n-\n-func TestMultiContainerWait(t *testing.T) {\n- t.Skip(\"Test is flakey.\") // TODO: Remove.\n- containerIDs := []string{\n- testutil.UniqueContainerID(),\n- testutil.UniqueContainerID(),\n- }\n- containerAnnotations := []map[string]string{\n- // The first container creates a sandbox.\n- map[string]string{\n- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,\n- },\n- // The second container creates a container within the first\n- // container's sandbox.\n- map[string]string{\n- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,\n- specutils.ContainerdSandboxIDAnnotation: containerIDs[0],\n- },\n- }\n- args := [][]string{\n- // The first container should run the entire duration of the\n- // test.\n- {\"sleep\", \"100\"},\n- // We'll wait on the second container, which is much shorter\n- // lived.\n- {\"sleep\", \"1\"},\n- }\n-\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n-\n- // Setup the containers.\n- containers := make([]*Container, 0, len(containerIDs))\n- for i, annotations := range containerAnnotations {\n- spec := testutil.NewSpecWithArgs(args[i][0], args[i][1])\n- spec.Annotations = annotations\n- conf := testutil.TestConfig()\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n- if err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n- }\n- defer os.RemoveAll(bundleDir)\n- cont, err := Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\")\n- if err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n- }\n- defer cont.Destroy()\n- if err := cont.Start(conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n- }\n- containers = append(containers, cont)\n- }\n-\n- expectedPL := []*control.Process{\n- {\n- UID: 0,\n- PID: 1,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- },\n- {\n- UID: 0,\n- PID: 2,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- },\n- }\n-\n- // Check via ps that multiple processes are running.\n- if err := waitForProcessList(containers[0], expectedPL); err != nil {\n- t.Errorf(\"failed to wait for sleep to start: %v\", err)\n- }\n-\n- // Wait on the short lived container from multiple goroutines.\n- wg := sync.WaitGroup{}\n- for i := 0; i < 3; i++ {\n- wg.Add(1)\n- go func() {\n- defer wg.Done()\n- if ws, err := containers[1].Wait(); err != nil {\n- t.Errorf(\"failed to wait for process %q: %v\", strings.Join(containers[1].Spec.Process.Args, \" \"), err)\n- } else if es := ws.ExitStatus(); es != 0 {\n- t.Errorf(\"process %q exited with non-zero status %d\", strings.Join(containers[1].Spec.Process.Args, \" \"), es)\n- }\n- if _, err := containers[1].Wait(); err == nil {\n- t.Errorf(\"wait for stopped process %q should fail\", strings.Join(containers[1].Spec.Process.Args, \" \"))\n- }\n-\n- // After Wait returns, ensure that the root container is running and\n- // the child has finished.\n- if err := waitForProcessList(containers[0], expectedPL[:1]); err != nil {\n- t.Errorf(\"failed to wait for %q to start: %v\", strings.Join(containers[0].Spec.Process.Args, \" \"), err)\n- }\n- }()\n- }\n-\n- // Also wait via PID.\n- for i := 0; i < 3; i++ {\n- wg.Add(1)\n- go func() {\n- defer wg.Done()\n- const pid = 2\n- if ws, err := containers[0].WaitPID(pid); err != nil {\n- t.Errorf(\"failed to wait for PID %d: %v\", pid, err)\n- } else if es := ws.ExitStatus(); es != 0 {\n- t.Errorf(\"PID %d exited with non-zero status %d\", pid, es)\n- }\n- if _, err := containers[0].WaitPID(pid); err == nil {\n- t.Errorf(\"wait for stopped PID %d should fail\", pid)\n- }\n- }()\n- }\n-\n- wg.Wait()\n-}\n-\n// Check that modifications to a volume mount are propigated into and out of\n// the sandbox.\nfunc TestContainerVolumeContentsShared(t *testing.T) {\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/container/multi_container_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package container\n+\n+import (\n+ \"os\"\n+ \"strings\"\n+ \"sync\"\n+ \"testing\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/control\"\n+ \"gvisor.googlesource.com/gvisor/runsc/specutils\"\n+ \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n+)\n+\n+// TestMultiContainerSanity checks that it is possible to run 2 dead-simple\n+// containers in the same sandbox.\n+func TestMultiContainerSanity(t *testing.T) {\n+ for _, conf := range configs(all...) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\n+ containerIDs := []string{\n+ testutil.UniqueContainerID(),\n+ testutil.UniqueContainerID(),\n+ }\n+ containerAnnotations := []map[string]string{\n+ // The first container creates a sandbox.\n+ map[string]string{\n+ specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,\n+ },\n+ // The second container creates a container within the first\n+ // container's sandbox.\n+ map[string]string{\n+ specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,\n+ specutils.ContainerdSandboxIDAnnotation: containerIDs[0],\n+ },\n+ }\n+\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ // Setup the containers.\n+ containers := make([]*Container, 0, len(containerIDs))\n+ for i, annotations := range containerAnnotations {\n+ spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+ spec.Annotations = annotations\n+ bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(bundleDir)\n+ cont, err := Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+ containers = append(containers, cont)\n+ }\n+\n+ expectedPL := []*control.Process{\n+ {\n+ UID: 0,\n+ PID: 1,\n+ PPID: 0,\n+ C: 0,\n+ Cmd: \"sleep\",\n+ },\n+ {\n+ UID: 0,\n+ PID: 2,\n+ PPID: 0,\n+ C: 0,\n+ Cmd: \"sleep\",\n+ },\n+ }\n+\n+ // Check via ps that multiple processes are running.\n+ if err := waitForProcessList(containers[0], expectedPL); err != nil {\n+ t.Errorf(\"failed to wait for sleep to start: %v\", err)\n+ }\n+ }\n+}\n+\n+func TestMultiContainerWait(t *testing.T) {\n+ t.Skip(\"Test is flakey.\") // TODO: Remove.\n+ containerIDs := []string{\n+ testutil.UniqueContainerID(),\n+ testutil.UniqueContainerID(),\n+ }\n+ containerAnnotations := []map[string]string{\n+ // The first container creates a sandbox.\n+ map[string]string{\n+ specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,\n+ },\n+ // The second container creates a container within the first\n+ // container's sandbox.\n+ map[string]string{\n+ specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,\n+ specutils.ContainerdSandboxIDAnnotation: containerIDs[0],\n+ },\n+ }\n+ args := [][]string{\n+ // The first container should run the entire duration of the\n+ // test.\n+ {\"sleep\", \"100\"},\n+ // We'll wait on the second container, which is much shorter\n+ // lived.\n+ {\"sleep\", \"1\"},\n+ }\n+\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ // Setup the containers.\n+ containers := make([]*Container, 0, len(containerIDs))\n+ for i, annotations := range containerAnnotations {\n+ spec := testutil.NewSpecWithArgs(args[i][0], args[i][1])\n+ spec.Annotations = annotations\n+ conf := testutil.TestConfig()\n+ bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(bundleDir)\n+ cont, err := Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+ containers = append(containers, cont)\n+ }\n+\n+ expectedPL := []*control.Process{\n+ {\n+ UID: 0,\n+ PID: 1,\n+ PPID: 0,\n+ C: 0,\n+ Cmd: \"sleep\",\n+ },\n+ {\n+ UID: 0,\n+ PID: 2,\n+ PPID: 0,\n+ C: 0,\n+ Cmd: \"sleep\",\n+ },\n+ }\n+\n+ // Check via ps that multiple processes are running.\n+ if err := waitForProcessList(containers[0], expectedPL); err != nil {\n+ t.Errorf(\"failed to wait for sleep to start: %v\", err)\n+ }\n+\n+ // Wait on the short lived container from multiple goroutines.\n+ wg := sync.WaitGroup{}\n+ for i := 0; i < 3; i++ {\n+ wg.Add(1)\n+ go func() {\n+ defer wg.Done()\n+ if ws, err := containers[1].Wait(); err != nil {\n+ t.Errorf(\"failed to wait for process %q: %v\", strings.Join(containers[1].Spec.Process.Args, \" \"), err)\n+ } else if es := ws.ExitStatus(); es != 0 {\n+ t.Errorf(\"process %q exited with non-zero status %d\", strings.Join(containers[1].Spec.Process.Args, \" \"), es)\n+ }\n+ if _, err := containers[1].Wait(); err == nil {\n+ t.Errorf(\"wait for stopped process %q should fail\", strings.Join(containers[1].Spec.Process.Args, \" \"))\n+ }\n+\n+ // After Wait returns, ensure that the root container is running and\n+ // the child has finished.\n+ if err := waitForProcessList(containers[0], expectedPL[:1]); err != nil {\n+ t.Errorf(\"failed to wait for %q to start: %v\", strings.Join(containers[0].Spec.Process.Args, \" \"), err)\n+ }\n+ }()\n+ }\n+\n+ // Also wait via PID.\n+ for i := 0; i < 3; i++ {\n+ wg.Add(1)\n+ go func() {\n+ defer wg.Done()\n+ const pid = 2\n+ if ws, err := containers[0].WaitPID(pid); err != nil {\n+ t.Errorf(\"failed to wait for PID %d: %v\", pid, err)\n+ } else if es := ws.ExitStatus(); es != 0 {\n+ t.Errorf(\"PID %d exited with non-zero status %d\", pid, es)\n+ }\n+ if _, err := containers[0].WaitPID(pid); err == nil {\n+ t.Errorf(\"wait for stopped PID %d should fail\", pid)\n+ }\n+ }()\n+ }\n+\n+ wg.Wait()\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move multi-container test to a single file
PiperOrigin-RevId: 211685288
Change-Id: I7872f2a83fcaaa54f385e6e567af6e72320c5aa0 |
259,992 | 05.09.2018 13:16:49 | 25,200 | 4b57fd920d2d9fe3c8351d5b73b496902c928d95 | Add MADVISE to fsgofer seccomp profile | [
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/filter/config.go",
"new_path": "runsc/fsgofer/filter/config.go",
"diff": "@@ -105,6 +105,7 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_GETTIMEOFDAY: {},\nsyscall.SYS_LINKAT: {},\nsyscall.SYS_LSEEK: {},\n+ syscall.SYS_MADVISE: {},\nsyscall.SYS_MKDIRAT: {},\nsyscall.SYS_MMAP: []seccomp.Rule{\n{\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add MADVISE to fsgofer seccomp profile
PiperOrigin-RevId: 211686037
Change-Id: I0e776ca760b65ba100e495f471b6e811dbd6590a |
259,992 | 05.09.2018 14:01:25 | 25,200 | 0c7cfca0da234ae34497c420a23fea91a47a566c | Running container should have a valid sandbox | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -517,6 +517,7 @@ func (c *Container) Destroy() error {\nlog.Warningf(\"Failed to destroy sandbox %q: %v\", c.Sandbox.ID, err)\n}\n}\n+ c.Status = Stopped\nc.Sandbox = nil\nif c.GoferPid != 0 {\n@@ -536,15 +537,11 @@ func (c *Container) Destroy() error {\nreturn fmt.Errorf(\"error deleting container root directory %q: %v\", c.Root, err)\n}\n- c.Status = Stopped\nreturn nil\n}\n// IsRunning returns true if the sandbox or gofer process is running.\nfunc (c *Container) IsRunning() bool {\n- if c.Status == Stopped {\n- return false\n- }\nif c.Sandbox != nil && c.Sandbox.IsRunning() {\nreturn true\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "@@ -20,34 +20,42 @@ import (\n\"sync\"\n\"testing\"\n+ specs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/control\"\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n\"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n)\n+func createSpecs(cmds ...[]string) ([]*specs.Spec, []string) {\n+ var specs []*specs.Spec\n+ var ids []string\n+ rootID := testutil.UniqueContainerID()\n+\n+ for i, cmd := range cmds {\n+ spec := testutil.NewSpecWithArgs(cmd...)\n+ if i == 0 {\n+ spec.Annotations = map[string]string{\n+ specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,\n+ }\n+ ids = append(ids, rootID)\n+ } else {\n+ spec.Annotations = map[string]string{\n+ specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,\n+ specutils.ContainerdSandboxIDAnnotation: rootID,\n+ }\n+ ids = append(ids, testutil.UniqueContainerID())\n+ }\n+ specs = append(specs, spec)\n+ }\n+ return specs, ids\n+}\n+\n// TestMultiContainerSanity checks that it is possible to run 2 dead-simple\n// containers in the same sandbox.\nfunc TestMultiContainerSanity(t *testing.T) {\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n- containerIDs := []string{\n- testutil.UniqueContainerID(),\n- testutil.UniqueContainerID(),\n- }\n- containerAnnotations := []map[string]string{\n- // The first container creates a sandbox.\n- map[string]string{\n- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,\n- },\n- // The second container creates a container within the first\n- // container's sandbox.\n- map[string]string{\n- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,\n- specutils.ContainerdSandboxIDAnnotation: containerIDs[0],\n- },\n- }\n-\nrootDir, err := testutil.SetupRootDir()\nif err != nil {\nt.Fatalf(\"error creating root dir: %v\", err)\n@@ -55,16 +63,16 @@ func TestMultiContainerSanity(t *testing.T) {\ndefer os.RemoveAll(rootDir)\n// Setup the containers.\n- containers := make([]*Container, 0, len(containerIDs))\n- for i, annotations := range containerAnnotations {\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n- spec.Annotations = annotations\n+ sleep := []string{\"sleep\", \"100\"}\n+ specs, ids := createSpecs(sleep, sleep)\n+ var containers []*Container\n+ for i, spec := range specs {\nbundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\ndefer os.RemoveAll(bundleDir)\n- cont, err := Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\")\n+ cont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -75,24 +83,11 @@ func TestMultiContainerSanity(t *testing.T) {\ncontainers = append(containers, cont)\n}\n+ // Check via ps that multiple processes are running.\nexpectedPL := []*control.Process{\n- {\n- UID: 0,\n- PID: 1,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- },\n- {\n- UID: 0,\n- PID: 2,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- },\n+ {PID: 1, Cmd: \"sleep\"},\n+ {PID: 2, Cmd: \"sleep\"},\n}\n-\n- // Check via ps that multiple processes are running.\nif err := waitForProcessList(containers[0], expectedPL); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n}\n@@ -100,50 +95,28 @@ func TestMultiContainerSanity(t *testing.T) {\n}\nfunc TestMultiContainerWait(t *testing.T) {\n- t.Skip(\"Test is flakey.\") // TODO: Remove.\n- containerIDs := []string{\n- testutil.UniqueContainerID(),\n- testutil.UniqueContainerID(),\n- }\n- containerAnnotations := []map[string]string{\n- // The first container creates a sandbox.\n- map[string]string{\n- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,\n- },\n- // The second container creates a container within the first\n- // container's sandbox.\n- map[string]string{\n- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,\n- specutils.ContainerdSandboxIDAnnotation: containerIDs[0],\n- },\n- }\n- args := [][]string{\n- // The first container should run the entire duration of the\n- // test.\n- {\"sleep\", \"100\"},\n- // We'll wait on the second container, which is much shorter\n- // lived.\n- {\"sleep\", \"1\"},\n- }\n-\nrootDir, err := testutil.SetupRootDir()\nif err != nil {\nt.Fatalf(\"error creating root dir: %v\", err)\n}\ndefer os.RemoveAll(rootDir)\n+ // The first container should run the entire duration of the test.\n+ cmd1 := []string{\"sleep\", \"100\"}\n+ // We'll wait on the second container, which is much shorter lived.\n+ cmd2 := []string{\"sleep\", \"1\"}\n+ specs, ids := createSpecs(cmd1, cmd2)\n+\n// Setup the containers.\n- containers := make([]*Container, 0, len(containerIDs))\n- for i, annotations := range containerAnnotations {\n- spec := testutil.NewSpecWithArgs(args[i][0], args[i][1])\n- spec.Annotations = annotations\n+ var containers []*Container\n+ for i, spec := range specs {\nconf := testutil.TestConfig()\nbundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\ndefer os.RemoveAll(bundleDir)\n- cont, err := Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\")\n+ cont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -154,24 +127,11 @@ func TestMultiContainerWait(t *testing.T) {\ncontainers = append(containers, cont)\n}\n+ // Check via ps that multiple processes are running.\nexpectedPL := []*control.Process{\n- {\n- UID: 0,\n- PID: 1,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- },\n- {\n- UID: 0,\n- PID: 2,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- },\n+ {PID: 1, Cmd: \"sleep\"},\n+ {PID: 2, Cmd: \"sleep\"},\n}\n-\n- // Check via ps that multiple processes are running.\nif err := waitForProcessList(containers[0], expectedPL); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n}\n@@ -180,41 +140,42 @@ func TestMultiContainerWait(t *testing.T) {\nwg := sync.WaitGroup{}\nfor i := 0; i < 3; i++ {\nwg.Add(1)\n- go func() {\n+ go func(c *Container) {\ndefer wg.Done()\n- if ws, err := containers[1].Wait(); err != nil {\n- t.Errorf(\"failed to wait for process %q: %v\", strings.Join(containers[1].Spec.Process.Args, \" \"), err)\n+ if ws, err := c.Wait(); err != nil {\n+ t.Errorf(\"failed to wait for process %s: %v\", c.Spec.Process.Args, err)\n} else if es := ws.ExitStatus(); es != 0 {\n- t.Errorf(\"process %q exited with non-zero status %d\", strings.Join(containers[1].Spec.Process.Args, \" \"), es)\n+ t.Errorf(\"process %s exited with non-zero status %d\", c.Spec.Process.Args, es)\n}\n- if _, err := containers[1].Wait(); err == nil {\n- t.Errorf(\"wait for stopped process %q should fail\", strings.Join(containers[1].Spec.Process.Args, \" \"))\n+ if _, err := c.Wait(); err == nil {\n+ t.Errorf(\"wait for stopped process %s should fail\", c.Spec.Process.Args)\n}\n-\n- // After Wait returns, ensure that the root container is running and\n- // the child has finished.\n- if err := waitForProcessList(containers[0], expectedPL[:1]); err != nil {\n- t.Errorf(\"failed to wait for %q to start: %v\", strings.Join(containers[0].Spec.Process.Args, \" \"), err)\n- }\n- }()\n+ }(containers[1])\n}\n// Also wait via PID.\nfor i := 0; i < 3; i++ {\nwg.Add(1)\n- go func() {\n+ go func(c *Container) {\ndefer wg.Done()\nconst pid = 2\n- if ws, err := containers[0].WaitPID(pid); err != nil {\n+ if ws, err := c.WaitPID(pid); err != nil {\nt.Errorf(\"failed to wait for PID %d: %v\", pid, err)\n} else if es := ws.ExitStatus(); es != 0 {\nt.Errorf(\"PID %d exited with non-zero status %d\", pid, es)\n}\n- if _, err := containers[0].WaitPID(pid); err == nil {\n+ if _, err := c.WaitPID(pid); err == nil {\nt.Errorf(\"wait for stopped PID %d should fail\", pid)\n}\n- }()\n+ // TODO: use 'container[1]' when PID namespace is supported.\n+ }(containers[0])\n}\nwg.Wait()\n+\n+ // After Wait returns, ensure that the root container is running and\n+ // the child has finished.\n+ if err := waitForProcessList(containers[0], expectedPL[:1]); err != nil {\n+ t.Errorf(\"failed to wait for %q to start: %v\", strings.Join(containers[0].Spec.Process.Args, \" \"), err)\n+ }\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Running container should have a valid sandbox
PiperOrigin-RevId: 211693868
Change-Id: Iea340dd78bf26ae6409c310b63c17cc611c2055f |
259,992 | 05.09.2018 14:28:52 | 25,200 | 12aef686af3f37029e619602286f00a40144c52d | Enabled bind mounts in sub-containers
With multi-gofers, bind mounts in sub-containers should
just work. Removed restrictions and added test. There are
also a few cleanups along the way, e.g. retry unmounting
in case cleanup races with gofer teardown. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fds.go",
"new_path": "runsc/boot/fds.go",
"diff": "@@ -28,11 +28,6 @@ import (\n// createFDMap creates an fd map that contains stdin, stdout, and stderr. If\n// console is true, then ioctl calls will be passed through to the host fd.\n-//\n-// TODO: We currently arn't passing any FDs in to the sandbox, so\n-// there's not much else for this function to do. It will get more complicated\n-// when gofers enter the picture. Also the LISTEN_FDS environment variable\n-// allows passing arbitrary FDs to the sandbox, which we do not yet support.\nfunc createFDMap(ctx context.Context, k *kernel.Kernel, l *limits.LimitSet, console bool) (*kernel.FDMap, error) {\nfdm := k.NewFDMap()\ndefer fdm.DecRef()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -685,11 +685,6 @@ func setFileSystemForProcess(procArgs *kernel.CreateProcessArgs, spec *specs.Spe\n// Mount all submounts.\nmounts := compileMounts(spec)\nfor _, m := range mounts {\n- // TODO: Enable bind mounts in child containers.\n- if m.Type == bind {\n- log.Infof(\"Bind mounts in child containers are not yet supported: %+v\", m)\n- continue\n- }\ndest := filepath.Join(containerRoot, m.Destination)\nif err := mountSubmount(rootCtx, conf, k.RootMountNamespace(), fds, m, mounts, dest); err != nil {\nreturn fmt.Errorf(\"error mounting filesystem for container: %v\", err)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -520,24 +520,35 @@ func (c *Container) Destroy() error {\nc.Status = Stopped\nc.Sandbox = nil\n+ if err := c.destroyGofer(); err != nil {\n+ return fmt.Errorf(\"error destroying gofer: %v\", err)\n+ }\n+\n+ if err := os.RemoveAll(c.Root); err != nil && !os.IsNotExist(err) {\n+ return fmt.Errorf(\"error deleting container root directory %q: %v\", c.Root, err)\n+ }\n+\n+ return nil\n+}\n+\n+func (c *Container) destroyGofer() error {\nif c.GoferPid != 0 {\nlog.Debugf(\"Killing gofer for container %q, PID: %d\", c.ID, c.GoferPid)\nif err := syscall.Kill(c.GoferPid, syscall.SIGKILL); err != nil {\nlog.Warningf(\"error sending signal %d to pid %d: %v\", syscall.SIGKILL, c.GoferPid, err)\n- } else {\n- c.GoferPid = 0\n}\n}\n- if err := destroyFS(c.Spec); err != nil {\n- return fmt.Errorf(\"error destroying container fs: %v\", err)\n- }\n-\n- if err := os.RemoveAll(c.Root); err != nil && !os.IsNotExist(err) {\n- return fmt.Errorf(\"error deleting container root directory %q: %v\", c.Root, err)\n+ // Gofer process may take some time to teardown. Retry in case of failure.\n+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)\n+ defer cancel()\n+ b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\n+ err := backoff.Retry(func() error { return destroyFS(c.Spec) }, b)\n+ if err == nil {\n+ // Success!\n+ c.GoferPid = 0\n}\n-\n- return nil\n+ return err\n}\n// IsRunning returns true if the sandbox or gofer process is running.\n@@ -549,8 +560,9 @@ func (c *Container) IsRunning() bool {\n// Send a signal 0 to the gofer process.\nif err := syscall.Kill(c.GoferPid, 0); err == nil {\nlog.Warningf(\"Found orphan gofer process, pid: %d\", c.GoferPid)\n- // Attempt to kill gofer if it's orphan.\n- syscall.Kill(c.GoferPid, syscall.SIGKILL)\n+ if err := c.destroyGofer(); err != nil {\n+ log.Warningf(\"Error destroying gofer: %v\", err)\n+ }\n// Don't wait for gofer to die. Return 'running' and hope gofer is dead\n// next time around.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "package container\nimport (\n+ \"io/ioutil\"\n\"os\"\n+ \"path/filepath\"\n\"strings\"\n\"sync\"\n\"testing\"\n@@ -179,3 +181,59 @@ func TestMultiContainerWait(t *testing.T) {\nt.Errorf(\"failed to wait for %q to start: %v\", strings.Join(containers[0].Spec.Process.Args, \" \"), err)\n}\n}\n+\n+// TestMultiContainerMount tests that bind mounts can be used with multiple\n+// containers.\n+func TestMultiContainerMount(t *testing.T) {\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ cmd1 := []string{\"sleep\", \"100\"}\n+\n+ // 'src != dst' ensures that 'dst' doesn't exist in the host and must be\n+ // properly mapped inside the container to work.\n+ src, err := ioutil.TempDir(testutil.TmpDir(), \"container\")\n+ if err != nil {\n+ t.Fatal(\"ioutil.TempDir failed:\", err)\n+ }\n+ dst := src + \".dst\"\n+ cmd2 := []string{\"touch\", filepath.Join(dst, \"file\")}\n+\n+ sps, ids := createSpecs(cmd1, cmd2)\n+ sps[1].Mounts = append(sps[1].Mounts, specs.Mount{\n+ Source: src,\n+ Destination: dst,\n+ Type: \"bind\",\n+ })\n+\n+ // Setup the containers.\n+ var containers []*Container\n+ for i, spec := range sps {\n+ conf := testutil.TestConfig()\n+ bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(bundleDir)\n+ cont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+ containers = append(containers, cont)\n+ }\n+\n+ ws, err := containers[1].Wait()\n+ if err != nil {\n+ t.Error(\"error waiting on container:\", err)\n+ }\n+ if !ws.Exited() || ws.ExitStatus() != 0 {\n+ t.Error(\"container failed, waitStatus:\", ws)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enabled bind mounts in sub-containers
With multi-gofers, bind mounts in sub-containers should
just work. Removed restrictions and added test. There are
also a few cleanups along the way, e.g. retry unmounting
in case cleanup races with gofer teardown.
PiperOrigin-RevId: 211699569
Change-Id: Ic0a69c29d7c31cd7e038909cc686c6ac98703374 |
259,942 | 05.09.2018 16:46:47 | 25,200 | b3b66dbd1f7f1d5411fd1f7ea76fc5ea7027ec35 | Enable constructing a Prependable from a View without allocating. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/buffer/prependable.go",
"new_path": "pkg/tcpip/buffer/prependable.go",
"diff": "@@ -32,6 +32,15 @@ func NewPrependable(size int) Prependable {\nreturn Prependable{buf: NewView(size), usedIdx: size}\n}\n+// NewPrependableFromView creates an entirely-used Prependable from a View.\n+//\n+// NewPrependableFromView takes ownership of v. Note that since the entire\n+// prependable is used, further attempts to call Prepend will note that size >\n+// p.usedIdx and return nil.\n+func NewPrependableFromView(v View) Prependable {\n+ return Prependable{buf: v, usedIdx: 0}\n+}\n+\n// Prepend reserves the requested space in front of the buffer, returning a\n// slice that represents the reserved space.\nfunc (p *Prependable) Prepend(size int) []byte {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable constructing a Prependable from a View without allocating.
PiperOrigin-RevId: 211722525
Change-Id: Ie73753fd09d67d6a2ce70cfe2d4ecf7275f09ce0 |
259,992 | 05.09.2018 18:05:59 | 25,200 | 41b56696c4923276c6269812bb3dfa7643dab65d | Imported FD in exec was leaking
Imported file needs to be closed after it's
been imported. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/proc.go",
"new_path": "pkg/sentry/control/proc.go",
"diff": "@@ -119,6 +119,10 @@ func (proc *Proc) Exec(args *ExecArgs, waitStatus *uint32) error {\nreturn err\n}\ndefer file.DecRef()\n+\n+ // We're done with this file.\n+ f.Close()\n+\nif err := fdm.NewFDAt(kdefs.FD(appFD), file, kernel.FDFlags{}, l); err != nil {\nreturn err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/host/BUILD",
"new_path": "pkg/sentry/fs/host/BUILD",
"diff": "@@ -55,6 +55,7 @@ go_test(\nname = \"host_test\",\nsize = \"small\",\nsrcs = [\n+ \"descriptor_test.go\",\n\"fs_test.go\",\n\"inode_test.go\",\n\"socket_test.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/host/descriptor.go",
"new_path": "pkg/sentry/fs/host/descriptor.go",
"diff": "@@ -31,9 +31,9 @@ type descriptor struct {\n// donated is true if the host fd was donated by another process.\ndonated bool\n- // If origFD >= 0, it is the host fd that this file was\n- // originally created from, which must be available at time\n- // of restore. Only valid if donated is true.\n+ // If origFD >= 0, it is the host fd that this file was originally created\n+ // from, which must be available at time of restore. The FD can be closed\n+ // after descriptor is created. Only set if donated is true.\norigFD int\n// wouldBlock is true if value (below) points to a file that can\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fs/host/descriptor_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package host\n+\n+import (\n+ \"io/ioutil\"\n+ \"path/filepath\"\n+ \"syscall\"\n+ \"testing\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/waiter\"\n+ \"gvisor.googlesource.com/gvisor/pkg/waiter/fdnotifier\"\n+)\n+\n+func TestDescriptorRelease(t *testing.T) {\n+ for _, tc := range []struct {\n+ name string\n+ saveable bool\n+ wouldBlock bool\n+ }{\n+ {name: \"all false\"},\n+ {name: \"saveable\", saveable: true},\n+ {name: \"wouldBlock\", wouldBlock: true},\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ dir, err := ioutil.TempDir(\"\", \"descriptor_test\")\n+ if err != nil {\n+ t.Fatal(\"ioutil.TempDir() failed:\", err)\n+ }\n+\n+ fd, err := syscall.Open(filepath.Join(dir, \"file\"), syscall.O_RDWR|syscall.O_CREAT, 0666)\n+ if err != nil {\n+ t.Fatal(\"failed to open temp file:\", err)\n+ }\n+\n+ // FD ownership is transferred to the descritor.\n+ queue := &waiter.Queue{}\n+ d, err := newDescriptor(fd, false /* donated*/, tc.saveable, tc.wouldBlock, queue)\n+ if err != nil {\n+ syscall.Close(fd)\n+ t.Fatalf(\"newDescriptor(%d, %t, false, %t, queue) failed, err: %v\", fd, tc.saveable, tc.wouldBlock, err)\n+ }\n+ if tc.saveable {\n+ if d.origFD < 0 {\n+ t.Errorf(\"saveable descriptor must preserve origFD, desc: %+v\", d)\n+ }\n+ }\n+ if tc.wouldBlock {\n+ if !fdnotifier.HasFD(int32(d.value)) {\n+ t.Errorf(\"FD not registered with notifier, desc: %+v\", d)\n+ }\n+ }\n+\n+ oldVal := d.value\n+ d.Release()\n+ if d.value != -1 {\n+ t.Errorf(\"d.value want: -1, got: %d\", d.value)\n+ }\n+ if tc.wouldBlock {\n+ if fdnotifier.HasFD(int32(oldVal)) {\n+ t.Errorf(\"FD not unregistered with notifier, desc: %+v\", d)\n+ }\n+ }\n+ })\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Imported FD in exec was leaking
Imported file needs to be closed after it's
been imported.
PiperOrigin-RevId: 211732472
Change-Id: Ia9249210558b77be076bcce465b832a22eed301f |
259,992 | 05.09.2018 18:31:37 | 25,200 | 5f0002fc83a77a39d9a2ef1443bc6c18e22ea779 | Use container's capabilities in exec
When no capabilities are specified in exec, use the
container's capabilities to match runc's behavior. | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/exec.go",
"new_path": "runsc/cmd/exec.go",
"diff": "@@ -115,16 +115,22 @@ func (ex *Exec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nFatalf(\"error loading sandbox: %v\", err)\n}\n+ // Replace empty settings with defaults from container.\nif e.WorkingDirectory == \"\" {\ne.WorkingDirectory = c.Spec.Process.Cwd\n}\n-\nif e.Envv == nil {\ne.Envv, err = resolveEnvs(c.Spec.Process.Env, ex.env)\nif err != nil {\nFatalf(\"error getting environment variables: %v\", err)\n}\n}\n+ if e.Capabilities == nil {\n+ e.Capabilities, err = specutils.Capabilities(c.Spec.Process.Capabilities)\n+ if err != nil {\n+ Fatalf(\"error creating capabilities: %v\", err)\n+ }\n+ }\n// containerd expects an actual process to represent the container being\n// executed. If detach was specified, starts a child in non-detach mode,\n@@ -265,10 +271,14 @@ func (ex *Exec) argsFromCLI(argv []string) (*control.ExecArgs, error) {\nextraKGIDs = append(extraKGIDs, auth.KGID(kgid))\n}\n- caps, err := capabilities(ex.caps)\n+ var caps *auth.TaskCapabilities\n+ if len(ex.caps) > 0 {\n+ var err error\n+ caps, err = capabilities(ex.caps)\nif err != nil {\nreturn nil, fmt.Errorf(\"capabilities error: %v\", err)\n}\n+ }\nreturn &control.ExecArgs{\nArgv: argv,\n@@ -299,10 +309,14 @@ func (ex *Exec) argsFromProcessFile() (*control.ExecArgs, error) {\n// to ExecArgs.\nfunc argsFromProcess(p *specs.Process) (*control.ExecArgs, error) {\n// Create capabilities.\n- caps, err := specutils.Capabilities(p.Capabilities)\n+ var caps *auth.TaskCapabilities\n+ if p.Capabilities != nil {\n+ var err error\n+ caps, err = specutils.Capabilities(p.Capabilities)\nif err != nil {\nreturn nil, fmt.Errorf(\"error creating capabilities: %v\", err)\n}\n+ }\n// Convert the spec's additional GIDs to KGIDs.\nextraKGIDs := make([]auth.KGID, 0, len(p.User.AdditionalGids))\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/image_test.go",
"new_path": "runsc/test/image/image_test.go",
"diff": "@@ -44,7 +44,7 @@ func TestHelloWorld(t *testing.T) {\n}\ndefer d.CleanUp()\n- if err := d.WaitForOutput(\"Hello from Docker!\", 5*time.Second); err != nil {\n+ if _, err := d.WaitForOutput(\"Hello from Docker!\", 5*time.Second); err != nil {\nt.Fatalf(\"docker didn't say hello: %v\", err)\n}\n}\n@@ -160,7 +160,7 @@ func TestMysql(t *testing.T) {\ndefer d.CleanUp()\n// Wait until it's up and running.\n- if err := d.WaitForOutput(\"port: 3306 MySQL Community Server\", 3*time.Minute); err != nil {\n+ if _, err := d.WaitForOutput(\"port: 3306 MySQL Community Server\", 3*time.Minute); err != nil {\nt.Fatalf(\"docker.WaitForOutput() timeout: %v\", err)\n}\n@@ -184,10 +184,10 @@ func TestMysql(t *testing.T) {\ndefer client.CleanUp()\n// Ensure file executed to the end and shutdown mysql.\n- if err := client.WaitForOutput(\"--------------\\nshutdown\\n--------------\", 15*time.Second); err != nil {\n+ if _, err := client.WaitForOutput(\"--------------\\nshutdown\\n--------------\", 15*time.Second); err != nil {\nt.Fatalf(\"docker.WaitForOutput() timeout: %v\", err)\n}\n- if err := d.WaitForOutput(\"mysqld: Shutdown complete\", 30*time.Second); err != nil {\n+ if _, err := d.WaitForOutput(\"mysqld: Shutdown complete\", 30*time.Second); err != nil {\nt.Fatalf(\"docker.WaitForOutput() timeout: %v\", err)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/integration/BUILD",
"new_path": "runsc/test/integration/BUILD",
"diff": "@@ -6,6 +6,7 @@ go_test(\nname = \"integration_test\",\nsize = \"large\",\nsrcs = [\n+ \"exec_test.go\",\n\"integration_test.go\",\n],\nembed = [\":integration\"],\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/integration/exec_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package image provides end-to-end integration tests for runsc. These tests require\n+// docker and runsc to be installed on the machine. To set it up, run:\n+//\n+// ./runsc/test/install.sh [--runtime <name>]\n+//\n+// The tests expect the runtime name to be provided in the RUNSC_RUNTIME\n+// environment variable (default: runsc-test).\n+//\n+// Each test calls docker commands to start up a container, and tests that it is\n+// behaving properly, with various runsc commands. The container is killed and deleted\n+// at the end.\n+\n+package integration\n+\n+import (\n+ \"testing\"\n+ \"time\"\n+\n+ \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n+)\n+\n+func TestExecCapabilities(t *testing.T) {\n+ if err := testutil.Pull(\"alpine\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\", err)\n+ }\n+ d := testutil.MakeDocker(\"exec-test\")\n+\n+ // Start the container.\n+ if _, err := d.Run(\"alpine\", \"sh\", \"-c\", \"cat /proc/self/status; sleep 100\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+ defer d.CleanUp()\n+\n+ want, err := d.WaitForOutput(\"CapEff:\\t[0-9a-f]+\\n\", 5*time.Second)\n+ if err != nil {\n+ t.Fatalf(\"WaitForOutput() timeout: %v\", err)\n+ }\n+ t.Log(\"Root capabilities:\", want)\n+\n+ // Now check that exec'd process capabilities match the root.\n+ got, err := d.Exec(\"grep\", \"CapEff:\", \"/proc/self/status\")\n+ if err != nil {\n+ t.Fatalf(\"docker exec failed: %v\", err)\n+ }\n+ if got != want {\n+ t.Errorf(\"wrong capabilities, got: %q, want: %q\", got, want)\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/integration/integration_test.go",
"new_path": "runsc/test/integration/integration_test.go",
"diff": "@@ -179,7 +179,7 @@ func TestConnectToSelf(t *testing.T) {\nif want := \"server\\n\"; reply != want {\nt.Errorf(\"Error on server, want: %q, got: %q\", want, reply)\n}\n- if err := d.WaitForOutput(\"^client\\n$\", 1*time.Second); err != nil {\n+ if _, err := d.WaitForOutput(\"^client\\n$\", 1*time.Second); err != nil {\nt.Fatal(\"docker.WaitForOutput(client) timeout:\", err)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/docker.go",
"new_path": "runsc/test/testutil/docker.go",
"diff": "@@ -218,20 +218,20 @@ func (d *Docker) FindPort(sandboxPort int) (int, error) {\n// WaitForOutput calls 'docker logs' to retrieve containers output and searches\n// for the given pattern.\n-func (d *Docker) WaitForOutput(pattern string, timeout time.Duration) error {\n+func (d *Docker) WaitForOutput(pattern string, timeout time.Duration) (string, error) {\nre := regexp.MustCompile(pattern)\nvar out string\nfor exp := time.Now().Add(timeout); time.Now().Before(exp); {\nvar err error\nout, err = do(\"logs\", d.Name)\nif err != nil {\n- return err\n+ return \"\", err\n}\n- if re.MatchString(out) {\n+ if match := re.FindString(out); match != \"\" {\n// Success!\n- return nil\n+ return match, nil\n}\ntime.Sleep(100 * time.Millisecond)\n}\n- return fmt.Errorf(\"timeout waiting for output %q: %s\", re.String(), out)\n+ return \"\", fmt.Errorf(\"timeout waiting for output %q: %s\", re.String(), out)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use container's capabilities in exec
When no capabilities are specified in exec, use the
container's capabilities to match runc's behavior.
PiperOrigin-RevId: 211735186
Change-Id: Icd372ed64410c81144eae94f432dffc9fe3a86ce |
259,891 | 05.09.2018 21:13:46 | 25,200 | 8f0b6e7fc02919df034dea9e9c9dbab1b80de2be | runsc: Support runsc kill multi-container.
Now, we can kill individual containers rather than the entire sandbox. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -22,7 +22,6 @@ import (\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/pkg/control/server\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/control\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n@@ -387,13 +386,5 @@ type SignalArgs struct {\n// Signal sends a signal to the init process of the container.\nfunc (cm *containerManager) Signal(args *SignalArgs, _ *struct{}) error {\nlog.Debugf(\"containerManager.Signal\")\n- // TODO: Use the cid and send the signal to the init\n- // process in theat container. Currently we just signal PID 1 in the\n- // sandbox.\n- si := arch.SignalInfo{Signo: args.Signo}\n- t := cm.l.k.TaskSet().Root.TaskWithID(1)\n- if t == nil {\n- return fmt.Errorf(\"cannot signal: no task with id 1\")\n- }\n- return t.SendSignal(&si)\n+ return cm.l.signal(args.CID, args.Signo)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -31,6 +31,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/cpuid\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/inet\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n@@ -576,3 +577,19 @@ func newEmptyNetworkStack(conf *Config, clock tcpip.Clock) (inet.Stack, error) {\npanic(fmt.Sprintf(\"invalid network configuration: %v\", conf.Network))\n}\n}\n+\n+func (l *Loader) signal(cid string, signo int32) error {\n+ l.mu.Lock()\n+ tgid, ok := l.containerRootTGIDs[cid]\n+ l.mu.Unlock()\n+ if !ok {\n+ return fmt.Errorf(\"failed to signal container %q: no such container\", cid)\n+ }\n+\n+ // The thread group ID of a process is the leading task's thread ID.\n+ t := l.k.TaskSet().Root.TaskWithID(tgid)\n+ if t == nil {\n+ return fmt.Errorf(\"cannot signal: no task with ID %d\", tgid)\n+ }\n+ return t.SendSignal(&arch.SignalInfo{Signo: signo})\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/kill.go",
"new_path": "runsc/cmd/kill.go",
"diff": "@@ -81,6 +81,8 @@ func (*Kill) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su\nif err != nil {\nFatalf(\"%v\", err)\n}\n+ // TODO: Distinguish between already-exited containers and\n+ // genuine errors.\nif err := c.Signal(sig); err != nil {\nFatalf(\"%v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -427,6 +427,7 @@ func (c *Container) Signal(sig syscall.Signal) error {\nlog.Warningf(\"container %q not running, not sending signal %v\", c.ID, sig)\nreturn nil\n}\n+ // TODO: Query the container for its state, then save it.\nreturn c.Sandbox.Signal(c.ID, sig)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -38,6 +38,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.googlesource.com/gvisor/pkg/unet\"\n\"gvisor.googlesource.com/gvisor/runsc/boot\"\n+ \"gvisor.googlesource.com/gvisor/runsc/specutils\"\n\"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n)\n@@ -336,8 +337,8 @@ func TestLifecycle(t *testing.T) {\nwg.Done()\n}()\n- // Wait a bit to ensure that we've started waiting on the container\n- // before we signal.\n+ // Wait a bit to ensure that we've started waiting on the\n+ // container before we signal.\n<-ch\ntime.Sleep(100 * time.Millisecond)\n// Send the container a SIGTERM which will cause it to stop.\n@@ -347,11 +348,11 @@ func TestLifecycle(t *testing.T) {\n// Wait for it to die.\nwg.Wait()\n- // The sandbox process should have exited by now, but it is a zombie.\n- // In normal runsc usage, it will be parented to init, and init will\n- // reap the sandbox. However, in this case the test runner is the\n- // parent and will not reap the sandbox process, so we must do it\n- // ourselves.\n+ // The sandbox process should have exited by now, but it is a\n+ // zombie. In normal runsc usage, it will be parented to init,\n+ // and init will reap the sandbox. However, in this case the\n+ // test runner is the parent and will not reap the sandbox\n+ // process, so we must do it ourselves.\np, _ := os.FindProcess(s.Sandbox.Pid)\np.Wait()\ng, _ := os.FindProcess(s.GoferPid)\n@@ -1547,6 +1548,133 @@ func TestGoferExits(t *testing.T) {\n}\n}\n+// TestMultiContainerSignal checks that it is possible to signal individual\n+// containers without killing the entire sandbox.\n+func TestMultiContainerSignal(t *testing.T) {\n+ for _, conf := range configs(all...) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\n+ containerIDs := []string{\n+ testutil.UniqueContainerID(),\n+ testutil.UniqueContainerID(),\n+ }\n+ containerAnnotations := []map[string]string{\n+ // The first container creates a sandbox.\n+ map[string]string{\n+ specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,\n+ },\n+ // The second container creates a container within the first\n+ // container's sandbox.\n+ map[string]string{\n+ specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,\n+ specutils.ContainerdSandboxIDAnnotation: containerIDs[0],\n+ },\n+ }\n+\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ // Setup the containers.\n+ containers := make([]*Container, 0, len(containerIDs))\n+ for i, annotations := range containerAnnotations {\n+ spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+ spec.Annotations = annotations\n+ bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(bundleDir)\n+ cont, err := Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+ containers = append(containers, cont)\n+ }\n+\n+ expectedPL := []*control.Process{\n+ {\n+ UID: 0,\n+ PID: 1,\n+ PPID: 0,\n+ C: 0,\n+ Cmd: \"sleep\",\n+ },\n+ {\n+ UID: 0,\n+ PID: 2,\n+ PPID: 0,\n+ C: 0,\n+ Cmd: \"sleep\",\n+ },\n+ }\n+\n+ // Check via ps that multiple processes are running.\n+ if err := waitForProcessList(containers[0], expectedPL); err != nil {\n+ t.Errorf(\"failed to wait for sleep to start: %v\", err)\n+ }\n+\n+ // Kill process 2.\n+ if err := containers[1].Signal(syscall.SIGKILL); err != nil {\n+ t.Errorf(\"failed to kill process 2: %v\", err)\n+ }\n+\n+ // Make sure process 1 is still running.\n+ if err := waitForProcessList(containers[0], expectedPL[:1]); err != nil {\n+ t.Errorf(\"failed to wait for sleep to start: %v\", err)\n+ }\n+\n+ // Now that process 2 is gone, ensure we get an error trying to\n+ // signal it again.\n+ if err := containers[1].Signal(syscall.SIGKILL); err == nil {\n+ t.Errorf(\"container %q shouldn't exist, but we were able to signal it\", containers[1].ID)\n+ }\n+\n+ // Kill process 1.\n+ if err := containers[0].Signal(syscall.SIGKILL); err != nil {\n+ t.Errorf(\"failed to kill process 1: %v\", err)\n+ }\n+\n+ if err := waitForSandboxExit(containers[0]); err != nil {\n+ t.Errorf(\"failed to exit sandbox: %v\", err)\n+ }\n+\n+ // The sentry should be gone, so signaling should yield an\n+ // error.\n+ if err := containers[0].Signal(syscall.SIGKILL); err == nil {\n+ t.Errorf(\"sandbox %q shouldn't exist, but we were able to signal it\", containers[0].Sandbox.ID)\n+ }\n+ }\n+}\n+\n+// waitForSandboxExit waits until both the sandbox and gofer processes of the\n+// container have exited.\n+func waitForSandboxExit(container *Container) error {\n+ goferProc, _ := os.FindProcess(container.GoferPid)\n+ state, err := goferProc.Wait()\n+ if err != nil {\n+ return err\n+ }\n+ if !state.Exited() {\n+ return fmt.Errorf(\"gofer with PID %d failed to exit\", container.GoferPid)\n+ }\n+ sandboxProc, _ := os.FindProcess(container.Sandbox.Pid)\n+ state, err = sandboxProc.Wait()\n+ if err != nil {\n+ return err\n+ }\n+ if !state.Exited() {\n+ return fmt.Errorf(\"sandbox with PID %d failed to exit\", container.Sandbox.Pid)\n+ }\n+ return nil\n+}\n+\nfunc TestMain(m *testing.M) {\ntestutil.RunAsRoot(m)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Support runsc kill multi-container.
Now, we can kill individual containers rather than the entire sandbox.
PiperOrigin-RevId: 211748106
Change-Id: Ic97e91db33d53782f838338c4a6d0aab7a313ead |
259,891 | 06.09.2018 10:40:53 | 25,200 | d95663a6b9831b56602c09f33a9679fa15175b97 | runsc testing: Move TestMultiContainerSignal to multi_container_test. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -38,7 +38,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.googlesource.com/gvisor/pkg/unet\"\n\"gvisor.googlesource.com/gvisor/runsc/boot\"\n- \"gvisor.googlesource.com/gvisor/runsc/specutils\"\n\"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n)\n@@ -1548,133 +1547,6 @@ func TestGoferExits(t *testing.T) {\n}\n}\n-// TestMultiContainerSignal checks that it is possible to signal individual\n-// containers without killing the entire sandbox.\n-func TestMultiContainerSignal(t *testing.T) {\n- for _, conf := range configs(all...) {\n- t.Logf(\"Running test with conf: %+v\", conf)\n-\n- containerIDs := []string{\n- testutil.UniqueContainerID(),\n- testutil.UniqueContainerID(),\n- }\n- containerAnnotations := []map[string]string{\n- // The first container creates a sandbox.\n- map[string]string{\n- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,\n- },\n- // The second container creates a container within the first\n- // container's sandbox.\n- map[string]string{\n- specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,\n- specutils.ContainerdSandboxIDAnnotation: containerIDs[0],\n- },\n- }\n-\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n-\n- // Setup the containers.\n- containers := make([]*Container, 0, len(containerIDs))\n- for i, annotations := range containerAnnotations {\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n- spec.Annotations = annotations\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n- if err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n- }\n- defer os.RemoveAll(bundleDir)\n- cont, err := Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\")\n- if err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n- }\n- defer cont.Destroy()\n- if err := cont.Start(conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n- }\n- containers = append(containers, cont)\n- }\n-\n- expectedPL := []*control.Process{\n- {\n- UID: 0,\n- PID: 1,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- },\n- {\n- UID: 0,\n- PID: 2,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- },\n- }\n-\n- // Check via ps that multiple processes are running.\n- if err := waitForProcessList(containers[0], expectedPL); err != nil {\n- t.Errorf(\"failed to wait for sleep to start: %v\", err)\n- }\n-\n- // Kill process 2.\n- if err := containers[1].Signal(syscall.SIGKILL); err != nil {\n- t.Errorf(\"failed to kill process 2: %v\", err)\n- }\n-\n- // Make sure process 1 is still running.\n- if err := waitForProcessList(containers[0], expectedPL[:1]); err != nil {\n- t.Errorf(\"failed to wait for sleep to start: %v\", err)\n- }\n-\n- // Now that process 2 is gone, ensure we get an error trying to\n- // signal it again.\n- if err := containers[1].Signal(syscall.SIGKILL); err == nil {\n- t.Errorf(\"container %q shouldn't exist, but we were able to signal it\", containers[1].ID)\n- }\n-\n- // Kill process 1.\n- if err := containers[0].Signal(syscall.SIGKILL); err != nil {\n- t.Errorf(\"failed to kill process 1: %v\", err)\n- }\n-\n- if err := waitForSandboxExit(containers[0]); err != nil {\n- t.Errorf(\"failed to exit sandbox: %v\", err)\n- }\n-\n- // The sentry should be gone, so signaling should yield an\n- // error.\n- if err := containers[0].Signal(syscall.SIGKILL); err == nil {\n- t.Errorf(\"sandbox %q shouldn't exist, but we were able to signal it\", containers[0].Sandbox.ID)\n- }\n- }\n-}\n-\n-// waitForSandboxExit waits until both the sandbox and gofer processes of the\n-// container have exited.\n-func waitForSandboxExit(container *Container) error {\n- goferProc, _ := os.FindProcess(container.GoferPid)\n- state, err := goferProc.Wait()\n- if err != nil {\n- return err\n- }\n- if !state.Exited() {\n- return fmt.Errorf(\"gofer with PID %d failed to exit\", container.GoferPid)\n- }\n- sandboxProc, _ := os.FindProcess(container.Sandbox.Pid)\n- state, err = sandboxProc.Wait()\n- if err != nil {\n- return err\n- }\n- if !state.Exited() {\n- return fmt.Errorf(\"sandbox with PID %d failed to exit\", container.Sandbox.Pid)\n- }\n- return nil\n-}\n-\nfunc TestMain(m *testing.M) {\ntestutil.RunAsRoot(m)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "package container\nimport (\n+ \"fmt\"\n\"io/ioutil\"\n\"os\"\n\"path/filepath\"\n\"strings\"\n\"sync\"\n+ \"syscall\"\n\"testing\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n@@ -237,3 +239,101 @@ func TestMultiContainerMount(t *testing.T) {\nt.Error(\"container failed, waitStatus:\", ws)\n}\n}\n+\n+// TestMultiContainerSignal checks that it is possible to signal individual\n+// containers without killing the entire sandbox.\n+func TestMultiContainerSignal(t *testing.T) {\n+ for _, conf := range configs(all...) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ // Setup the containers.\n+ sleep := []string{\"sleep\", \"100\"}\n+ specs, ids := createSpecs(sleep, sleep)\n+ var containers []*Container\n+ for i, spec := range specs {\n+ bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(bundleDir)\n+ cont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+ containers = append(containers, cont)\n+ }\n+\n+ // Check via ps that multiple processes are running.\n+ expectedPL := []*control.Process{\n+ {PID: 1, Cmd: \"sleep\"},\n+ {PID: 2, Cmd: \"sleep\"},\n+ }\n+\n+ if err := waitForProcessList(containers[0], expectedPL); err != nil {\n+ t.Errorf(\"failed to wait for sleep to start: %v\", err)\n+ }\n+\n+ // Kill process 2.\n+ if err := containers[1].Signal(syscall.SIGKILL); err != nil {\n+ t.Errorf(\"failed to kill process 2: %v\", err)\n+ }\n+\n+ // Make sure process 1 is still running.\n+ if err := waitForProcessList(containers[0], expectedPL[:1]); err != nil {\n+ t.Errorf(\"failed to wait for sleep to start: %v\", err)\n+ }\n+\n+ // Now that process 2 is gone, ensure we get an error trying to\n+ // signal it again.\n+ if err := containers[1].Signal(syscall.SIGKILL); err == nil {\n+ t.Errorf(\"container %q shouldn't exist, but we were able to signal it\", containers[1].ID)\n+ }\n+\n+ // Kill process 1.\n+ if err := containers[0].Signal(syscall.SIGKILL); err != nil {\n+ t.Errorf(\"failed to kill process 1: %v\", err)\n+ }\n+\n+ if err := waitForSandboxExit(containers[0]); err != nil {\n+ t.Errorf(\"failed to exit sandbox: %v\", err)\n+ }\n+\n+ // The sentry should be gone, so signaling should yield an\n+ // error.\n+ if err := containers[0].Signal(syscall.SIGKILL); err == nil {\n+ t.Errorf(\"sandbox %q shouldn't exist, but we were able to signal it\", containers[0].Sandbox.ID)\n+ }\n+ }\n+}\n+\n+// waitForSandboxExit waits until both the sandbox and gofer processes of the\n+// container have exited.\n+func waitForSandboxExit(container *Container) error {\n+ goferProc, _ := os.FindProcess(container.GoferPid)\n+ state, err := goferProc.Wait()\n+ if err != nil {\n+ return err\n+ }\n+ if !state.Exited() {\n+ return fmt.Errorf(\"gofer with PID %d failed to exit\", container.GoferPid)\n+ }\n+ sandboxProc, _ := os.FindProcess(container.Sandbox.Pid)\n+ state, err = sandboxProc.Wait()\n+ if err != nil {\n+ return err\n+ }\n+ if !state.Exited() {\n+ return fmt.Errorf(\"sandbox with PID %d failed to exit\", container.Sandbox.Pid)\n+ }\n+ return nil\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc testing: Move TestMultiContainerSignal to multi_container_test.
PiperOrigin-RevId: 211831396
Change-Id: Id67f182cb43dccb696180ec967f5b96176f252e0 |
259,992 | 06.09.2018 10:58:58 | 25,200 | efac28976c6dbf40627d02753fee1467c8272b45 | Enable network for multi-container | [
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/network.go",
"new_path": "runsc/sandbox/network.go",
"diff": "@@ -57,6 +57,7 @@ const (\nfunc setupNetwork(conn *urpc.Client, pid int, spec *specs.Spec, conf *boot.Config) error {\nlog.Infof(\"Setting up network\")\n+ if !conf.MultiContainer {\n// HACK!\n//\n// When kubernetes starts a pod, it first creates a sandbox with an\n@@ -83,6 +84,7 @@ func setupNetwork(conn *urpc.Client, pid int, spec *specs.Spec, conf *boot.Confi\nlog.Warningf(\"HACK: Disabling network\")\nconf.Network = boot.NetworkNone\n}\n+ }\nswitch conf.Network {\ncase boot.NetworkNone:\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable network for multi-container
PiperOrigin-RevId: 211834411
Change-Id: I52311a6c5407f984e5069359d9444027084e4d2a |
259,992 | 07.09.2018 10:44:50 | 25,200 | f895cb4d8b4b37a563b7a5b9dc92eae552084b44 | Use root abstract socket namespace for exec | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/proc.go",
"new_path": "pkg/sentry/control/proc.go",
"diff": "@@ -106,6 +106,7 @@ func (proc *Proc) Exec(args *ExecArgs, waitStatus *uint32) error {\nMaxSymlinkTraversals: linux.MaxSymlinkTraversals,\nUTSNamespace: proc.Kernel.RootUTSNamespace(),\nIPCNamespace: proc.Kernel.RootIPCNamespace(),\n+ AbstractSocketNamespace: proc.Kernel.RootAbstractSocketNamespace(),\n}\nctx := initArgs.NewContext(proc.Kernel)\nmounter := fs.FileOwnerFromContext(ctx)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -101,6 +101,7 @@ type Kernel struct {\nvdso *loader.VDSO\nrootUTSNamespace *UTSNamespace\nrootIPCNamespace *IPCNamespace\n+ rootAbstractSocketNamespace *AbstractSocketNamespace\n// mounts holds the state of the virtual filesystem. mounts is initially\n// nil, and must be set by calling Kernel.SetRootMountNamespace before\n@@ -201,11 +202,14 @@ type InitKernelArgs struct {\n// Vdso holds the VDSO and its parameter page.\nVdso *loader.VDSO\n- // RootUTSNamespace is the root UTS namepsace.\n+ // RootUTSNamespace is the root UTS namespace.\nRootUTSNamespace *UTSNamespace\n- // RootIPCNamespace is the root IPC namepsace.\n+ // RootIPCNamespace is the root IPC namespace.\nRootIPCNamespace *IPCNamespace\n+\n+ // RootAbstractSocketNamespace is the root Abstract Socket namespace.\n+ RootAbstractSocketNamespace *AbstractSocketNamespace\n}\n// Init initialize the Kernel with no tasks.\n@@ -231,6 +235,7 @@ func (k *Kernel) Init(args InitKernelArgs) error {\nk.rootUserNamespace = args.RootUserNamespace\nk.rootUTSNamespace = args.RootUTSNamespace\nk.rootIPCNamespace = args.RootIPCNamespace\n+ k.rootAbstractSocketNamespace = args.RootAbstractSocketNamespace\nk.networkStack = args.NetworkStack\nk.applicationCores = args.ApplicationCores\nif args.UseHostCores {\n@@ -509,6 +514,9 @@ type CreateProcessArgs struct {\n// IPCNamespace is the initial IPC namespace.\nIPCNamespace *IPCNamespace\n+ // AbstractSocketNamespace is the initial Abstract Socket namespace.\n+ AbstractSocketNamespace *AbstractSocketNamespace\n+\n// Root optionally contains the dirent that serves as the root for the\n// process. If nil, the mount namespace's root is used as the process'\n// root.\n@@ -651,7 +659,7 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, error) {\nAllowedCPUMask: sched.NewFullCPUSet(k.applicationCores),\nUTSNamespace: args.UTSNamespace,\nIPCNamespace: args.IPCNamespace,\n- AbstractSocketNamespace: NewAbstractSocketNamespace(), // FIXME\n+ AbstractSocketNamespace: args.AbstractSocketNamespace,\n}\nt, err := k.tasks.NewTask(config)\nif err != nil {\n@@ -839,6 +847,11 @@ func (k *Kernel) RootIPCNamespace() *IPCNamespace {\nreturn k.rootIPCNamespace\n}\n+// RootAbstractSocketNamespace returns the root AbstractSocketNamespace.\n+func (k *Kernel) RootAbstractSocketNamespace() *AbstractSocketNamespace {\n+ return k.rootAbstractSocketNamespace\n+}\n+\n// RootMountNamespace returns the MountNamespace.\nfunc (k *Kernel) RootMountNamespace() *fs.MountNamespace {\nk.extMu.Lock()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -143,6 +143,19 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\n}\ntk.SetClocks(time.NewCalibratedClocks())\n+ if err := enableStrace(conf); err != nil {\n+ return nil, fmt.Errorf(\"failed to enable strace: %v\", err)\n+ }\n+\n+ // Create an empty network stack because the network namespace may be empty at\n+ // this point. Netns is configured before Run() is called. Netstack is\n+ // configured using a control uRPC message. Host network is configured inside\n+ // Run().\n+ networkStack, err := newEmptyNetworkStack(conf, k)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"failed to create network: %v\", err)\n+ }\n+\n// Create capabilities.\ncaps, err := specutils.Capabilities(spec.Process.Capabilities)\nif err != nil {\n@@ -163,26 +176,6 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\ncaps,\nauth.NewRootUserNamespace())\n- // Create user namespace.\n- // TODO: Not clear what domain name should be here. It is\n- // not configurable from runtime spec.\n- utsns := kernel.NewUTSNamespace(spec.Hostname, \"\", creds.UserNamespace)\n-\n- ipcns := kernel.NewIPCNamespace(creds.UserNamespace)\n-\n- if err := enableStrace(conf); err != nil {\n- return nil, fmt.Errorf(\"failed to enable strace: %v\", err)\n- }\n-\n- // Create an empty network stack because the network namespace may be empty at\n- // this point. Netns is configured before Run() is called. Netstack is\n- // configured using a control uRPC message. Host network is configured inside\n- // Run().\n- networkStack, err := newEmptyNetworkStack(conf, k)\n- if err != nil {\n- return nil, fmt.Errorf(\"failed to create network: %v\", err)\n- }\n-\n// Initiate the Kernel object, which is required by the Context passed\n// to createVFS in order to mount (among other things) procfs.\nif err = k.Init(kernel.InitKernelArgs{\n@@ -193,8 +186,9 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\n// TODO: use number of logical processors from cgroups.\nApplicationCores: uint(runtime.NumCPU()),\nVdso: vdso,\n- RootUTSNamespace: utsns,\n- RootIPCNamespace: ipcns,\n+ RootUTSNamespace: kernel.NewUTSNamespace(spec.Hostname, \"\", creds.UserNamespace),\n+ RootIPCNamespace: kernel.NewIPCNamespace(creds.UserNamespace),\n+ RootAbstractSocketNamespace: kernel.NewAbstractSocketNamespace(),\n}); err != nil {\nreturn nil, fmt.Errorf(\"error initializing kernel: %v\", err)\n}\n@@ -244,7 +238,7 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\nlog.Infof(\"Panic signal set to %v(%d)\", ps, conf.PanicSignal)\n}\n- procArgs, err := newProcess(spec, creds, utsns, ipcns, k)\n+ procArgs, err := newProcess(spec, creds, k)\nif err != nil {\nreturn nil, fmt.Errorf(\"failed to create root process: %v\", err)\n}\n@@ -265,7 +259,7 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\n}\n// newProcess creates a process that can be run with kernel.CreateProcess.\n-func newProcess(spec *specs.Spec, creds *auth.Credentials, utsns *kernel.UTSNamespace, ipcns *kernel.IPCNamespace, k *kernel.Kernel) (kernel.CreateProcessArgs, error) {\n+func newProcess(spec *specs.Spec, creds *auth.Credentials, k *kernel.Kernel) (kernel.CreateProcessArgs, error) {\n// Create initial limits.\nls, err := createLimitSet(spec)\nif err != nil {\n@@ -281,8 +275,9 @@ func newProcess(spec *specs.Spec, creds *auth.Credentials, utsns *kernel.UTSName\nUmask: 0022,\nLimits: ls,\nMaxSymlinkTraversals: linux.MaxSymlinkTraversals,\n- UTSNamespace: utsns,\n- IPCNamespace: ipcns,\n+ UTSNamespace: k.RootUTSNamespace(),\n+ IPCNamespace: k.RootIPCNamespace(),\n+ AbstractSocketNamespace: k.RootAbstractSocketNamespace(),\n}\nreturn procArgs, nil\n}\n@@ -421,12 +416,7 @@ func (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config\n// TODO New containers should be started in new PID namespaces\n// when indicated by the spec.\n- procArgs, err := newProcess(\n- spec,\n- creds,\n- l.k.RootUTSNamespace(),\n- l.k.RootIPCNamespace(),\n- l.k)\n+ procArgs, err := newProcess(spec, creds, l.k)\nif err != nil {\nreturn 0, fmt.Errorf(\"failed to create new process: %v\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use root abstract socket namespace for exec
PiperOrigin-RevId: 211999211
Change-Id: I5968dd1a8313d3e49bb6e6614e130107495de41d |
259,992 | 07.09.2018 12:27:44 | 25,200 | bc81f3fe4a042a15343d2eab44da32d818ac1ade | Remove '--file-access=direct' option
It was used before gofer was implemented and it's not
supported anymore.
BREAKING CHANGE: proxy-shared and proxy-exclusive options
are now: shared and exclusive. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/BUILD",
"new_path": "runsc/boot/BUILD",
"diff": "@@ -85,8 +85,11 @@ go_test(\ndeps = [\n\"//pkg/control/server\",\n\"//pkg/log\",\n+ \"//pkg/p9\",\n\"//pkg/sentry/context/contexttest\",\n\"//pkg/sentry/fs\",\n+ \"//pkg/unet\",\n+ \"//runsc/fsgofer\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/config.go",
"new_path": "runsc/boot/config.go",
"diff": "@@ -60,28 +60,23 @@ func (p PlatformType) String() string {\ntype FileAccessType int\nconst (\n- // FileAccessProxy sends IO requests to a Gofer process that validates the\n+ // FileAccessShared sends IO requests to a Gofer process that validates the\n// requests and forwards them to the host.\n- FileAccessProxy FileAccessType = iota\n+ FileAccessShared FileAccessType = iota\n- // FileAccessProxyExclusive is the same as FileAccessProxy, but enables\n+ // FileAccessExclusive is the same as FileAccessShared, but enables\n// extra caching for improved performance. It should only be used if\n// the sandbox has exclusive access to the filesystem.\n- FileAccessProxyExclusive\n-\n- // FileAccessDirect connects the sandbox directly to the host filesystem.\n- FileAccessDirect\n+ FileAccessExclusive\n)\n// MakeFileAccessType converts type from string.\nfunc MakeFileAccessType(s string) (FileAccessType, error) {\nswitch s {\n- case \"proxy-shared\":\n- return FileAccessProxy, nil\n- case \"proxy-exclusive\":\n- return FileAccessProxyExclusive, nil\n- case \"direct\":\n- return FileAccessDirect, nil\n+ case \"shared\":\n+ return FileAccessShared, nil\n+ case \"exclusive\":\n+ return FileAccessExclusive, nil\ndefault:\nreturn 0, fmt.Errorf(\"invalid file access type %q\", s)\n}\n@@ -89,12 +84,10 @@ func MakeFileAccessType(s string) (FileAccessType, error) {\nfunc (f FileAccessType) String() string {\nswitch f {\n- case FileAccessProxy:\n- return \"proxy-shared\"\n- case FileAccessProxyExclusive:\n- return \"proxy-exclusive\"\n- case FileAccessDirect:\n- return \"direct\"\n+ case FileAccessShared:\n+ return \"shared\"\n+ case FileAccessExclusive:\n+ return \"exclusive\"\ndefault:\nreturn fmt.Sprintf(\"unknown(%d)\", f)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/config.go",
"new_path": "runsc/boot/filter/config.go",
"diff": "@@ -264,45 +264,6 @@ var allowedSyscalls = seccomp.SyscallRules{\n},\n}\n-// whitelistFSFilters returns syscalls made by whitelistFS. Using WhitelistFS\n-// is less secure because it runs inside the Sentry and must be able to perform\n-// file operations that would otherwise be disabled by seccomp when a Gofer is\n-// used. When whitelistFS is not used, openning new FD in the Sentry is\n-// disallowed.\n-func whitelistFSFilters() seccomp.SyscallRules {\n- return seccomp.SyscallRules{\n- syscall.SYS_ACCESS: {},\n- syscall.SYS_FCHMOD: {},\n- syscall.SYS_FSTAT: {},\n- syscall.SYS_FSYNC: {},\n- syscall.SYS_FTRUNCATE: {},\n- syscall.SYS_GETCWD: {},\n- syscall.SYS_GETDENTS: {},\n- syscall.SYS_GETDENTS64: {},\n- syscall.SYS_LSEEK: {},\n- syscall.SYS_LSTAT: {},\n- syscall.SYS_MKDIR: {},\n- syscall.SYS_MKDIRAT: {},\n- syscall.SYS_NEWFSTATAT: {},\n- syscall.SYS_OPEN: {},\n- syscall.SYS_OPENAT: {},\n- syscall.SYS_PREAD64: {},\n- syscall.SYS_PWRITE64: {},\n- syscall.SYS_READ: {},\n- syscall.SYS_READLINK: {},\n- syscall.SYS_READLINKAT: {},\n- syscall.SYS_RENAMEAT: {},\n- syscall.SYS_STAT: {},\n- syscall.SYS_SYMLINK: {},\n- syscall.SYS_SYMLINKAT: {},\n- syscall.SYS_SYNC_FILE_RANGE: {},\n- syscall.SYS_UNLINK: {},\n- syscall.SYS_UNLINKAT: {},\n- syscall.SYS_UTIMENSAT: {},\n- syscall.SYS_WRITE: {},\n- }\n-}\n-\n// hostInetFilters contains syscalls that are needed by sentry/socket/hostinet.\nfunc hostInetFilters() seccomp.SyscallRules {\nreturn seccomp.SyscallRules{\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/filter.go",
"new_path": "runsc/boot/filter/filter.go",
"diff": "@@ -30,7 +30,6 @@ import (\n// Options are seccomp filter related options.\ntype Options struct {\nPlatform platform.Platform\n- WhitelistFS bool\nHostNetwork bool\nControllerFD int\n}\n@@ -44,10 +43,6 @@ func Install(opt Options) error {\n// when not enabled.\ns.Merge(instrumentationFilters())\n- if opt.WhitelistFS {\n- Report(\"direct file access allows unrestricted file access!\")\n- s.Merge(whitelistFSFilters())\n- }\nif opt.HostNetwork {\nReport(\"host networking enabled: syscall filters less restrictive!\")\ns.Merge(hostInetFilters())\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -204,7 +204,7 @@ func createRootMount(ctx context.Context, spec *specs.Spec, conf *Config, fds *f\n)\nswitch conf.FileAccess {\n- case FileAccessProxy, FileAccessProxyExclusive:\n+ case FileAccessShared, FileAccessExclusive:\nfd := fds.remove()\nlog.Infof(\"Mounting root over 9P, ioFD: %d\", fd)\nhostFS := mustFindFilesystem(\"9p\")\n@@ -214,13 +214,6 @@ func createRootMount(ctx context.Context, spec *specs.Spec, conf *Config, fds *f\nreturn nil, fmt.Errorf(\"failed to generate root mount point: %v\", err)\n}\n- case FileAccessDirect:\n- hostFS := mustFindFilesystem(\"whitelistfs\")\n- rootInode, err = hostFS.Mount(ctx, rootDevice, mf, \"root=\"+spec.Root.Path+\",dont_translate_ownership=true\")\n- if err != nil {\n- return nil, fmt.Errorf(\"failed to generate root mount point: %v\", err)\n- }\n-\ndefault:\nreturn nil, fmt.Errorf(\"invalid file access type: %v\", conf.FileAccess)\n}\n@@ -289,13 +282,10 @@ func getMountNameAndOptions(conf *Config, m specs.Mount, fds *fdDispenser) (stri\ncase bind:\nswitch conf.FileAccess {\n- case FileAccessProxy, FileAccessProxyExclusive:\n+ case FileAccessShared, FileAccessExclusive:\nfd := fds.remove()\nfsName = \"9p\"\nopts = p9MountOptions(conf, fd)\n- case FileAccessDirect:\n- fsName = \"whitelistfs\"\n- opts = []string{\"root=\" + m.Source, \"dont_translate_ownership=true\"}\ndefault:\nerr = fmt.Errorf(\"invalid file access type: %v\", conf.FileAccess)\n}\n@@ -423,7 +413,7 @@ func p9MountOptions(conf *Config, fd int) []string {\n\"wfdno=\" + strconv.Itoa(fd),\n\"privateunixsocket=true\",\n}\n- if conf.FileAccess == FileAccessProxy {\n+ if conf.FileAccess == FileAccessShared {\nopts = append(opts, \"cache=remote_revalidating\")\n}\nreturn opts\n@@ -503,9 +493,6 @@ func addRestoreMount(conf *Config, renv *fs.RestoreEnvironment, m specs.Mount, f\n// createRestoreEnvironment builds a fs.RestoreEnvironment called renv by adding the mounts\n// to the environment.\nfunc createRestoreEnvironment(spec *specs.Spec, conf *Config, fds *fdDispenser) (*fs.RestoreEnvironment, error) {\n- if conf.FileAccess == FileAccessDirect {\n- return nil, fmt.Errorf(\"host filesystem with whitelist not supported with S/R\")\n- }\nrenv := &fs.RestoreEnvironment{\nMountSources: make(map[string][]fs.MountArgs),\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -341,7 +341,6 @@ func (l *Loader) run() error {\n} else {\nopts := filter.Options{\nPlatform: l.k.Platform,\n- WhitelistFS: l.conf.FileAccess == FileAccessDirect,\nHostNetwork: l.conf.Network == NetworkHost,\nControllerFD: l.ctrl.srv.FD(),\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "@@ -20,14 +20,18 @@ import (\n\"os\"\n\"reflect\"\n\"sync\"\n+ \"syscall\"\n\"testing\"\n\"time\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/pkg/control/server\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/pkg/p9\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n+ \"gvisor.googlesource.com/gvisor/pkg/unet\"\n+ \"gvisor.googlesource.com/gvisor/runsc/fsgofer\"\n)\nfunc init() {\n@@ -39,7 +43,6 @@ func testConfig() *Config {\nreturn &Config{\nRootDir: \"unused_root_dir\",\nNetwork: NetworkNone,\n- FileAccess: FileAccessDirect,\nDisableSeccomp: true,\n}\n}\n@@ -58,23 +61,62 @@ func testSpec() *specs.Spec {\n}\n}\n-func createLoader() (*Loader, error) {\n+// startGofer starts a new gofer routine serving 'root' path. It returns the\n+// sandbox side of the connection, and a function that when called will stop the\n+// gofer.\n+func startGofer(root string) (int, func(), error) {\n+ fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+ sandboxEnd, goferEnd := fds[0], fds[1]\n+\n+ socket, err := unet.NewSocket(goferEnd)\n+ if err != nil {\n+ syscall.Close(sandboxEnd)\n+ syscall.Close(goferEnd)\n+ return 0, nil, fmt.Errorf(\"error creating server on FD %d: %v\", goferEnd, err)\n+ }\n+ go func() {\n+ at := fsgofer.NewAttachPoint(root, fsgofer.Config{ROMount: true})\n+ s := p9.NewServer(at)\n+ if err := s.Handle(socket); err != nil {\n+ log.Infof(\"Gofer is stopping. FD: %d, err: %v\\n\", goferEnd, err)\n+ }\n+ }()\n+ // Closing the gofer FD will stop the gofer and exit goroutine above.\n+ return sandboxEnd, func() { syscall.Close(goferEnd) }, nil\n+}\n+\n+func createLoader() (*Loader, func(), error) {\nfd, err := server.CreateSocket(ControlSocketAddr(fmt.Sprintf(\"%010d\", rand.Int())[:10]))\nif err != nil {\n- return nil, err\n+ return nil, nil, err\n}\nconf := testConfig()\nspec := testSpec()\n- return New(spec, conf, fd, nil, false)\n+\n+ sandEnd, cleanup, err := startGofer(spec.Root.Path)\n+ if err != nil {\n+ return nil, nil, err\n+ }\n+\n+ l, err := New(spec, conf, fd, []int{sandEnd}, false)\n+ if err != nil {\n+ cleanup()\n+ return nil, nil, err\n+ }\n+ return l, cleanup, nil\n}\n// TestRun runs a simple application in a sandbox and checks that it succeeds.\nfunc TestRun(t *testing.T) {\n- s, err := createLoader()\n+ s, cleanup, err := createLoader()\nif err != nil {\nt.Fatalf(\"error creating loader: %v\", err)\n}\ndefer s.Destroy()\n+ defer cleanup()\n// Start a goroutine to read the start chan result, otherwise Run will\n// block forever.\n@@ -106,11 +148,12 @@ func TestRun(t *testing.T) {\n// TestStartSignal tests that the controller Start message will cause\n// WaitForStartSignal to return.\nfunc TestStartSignal(t *testing.T) {\n- s, err := createLoader()\n+ s, cleanup, err := createLoader()\nif err != nil {\nt.Fatalf(\"error creating loader: %v\", err)\n}\ndefer s.Destroy()\n+ defer cleanup()\n// We aren't going to wait on this application, so the control server\n// needs to be shut down manually.\n@@ -330,7 +373,14 @@ func TestCreateMountNamespace(t *testing.T) {\nt.Run(tc.name, func(t *testing.T) {\nconf := testConfig()\nctx := contexttest.Context(t)\n- mm, err := createMountNamespace(ctx, ctx, &tc.spec, conf, nil)\n+\n+ sandEnd, cleanup, err := startGofer(tc.spec.Root.Path)\n+ if err != nil {\n+ t.Fatalf(\"failed to create gofer: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ mm, err := createMountNamespace(ctx, ctx, &tc.spec, conf, []int{sandEnd})\nif err != nil {\nt.Fatalf(\"createMountNamespace test case %q failed: %v\", tc.name, err)\n}\n@@ -352,7 +402,6 @@ func TestRestoreEnvironment(t *testing.T) {\ntestCases := []struct {\nname string\nspec *specs.Spec\n- fileAccess FileAccessType\nioFDs []int\nerrorExpected bool\nexpectedRenv fs.RestoreEnvironment\n@@ -375,7 +424,6 @@ func TestRestoreEnvironment(t *testing.T) {\n},\n},\n},\n- fileAccess: FileAccessProxy,\nioFDs: []int{0},\nerrorExpected: false,\nexpectedRenv: fs.RestoreEnvironment{\n@@ -430,7 +478,6 @@ func TestRestoreEnvironment(t *testing.T) {\n},\n},\n},\n- fileAccess: FileAccessProxy,\nioFDs: []int{0, 1},\nerrorExpected: false,\nexpectedRenv: fs.RestoreEnvironment{\n@@ -489,7 +536,6 @@ func TestRestoreEnvironment(t *testing.T) {\n},\n},\n},\n- fileAccess: FileAccessProxy,\nioFDs: []int{0},\nerrorExpected: false,\nexpectedRenv: fs.RestoreEnvironment{\n@@ -534,48 +580,10 @@ func TestRestoreEnvironment(t *testing.T) {\n},\n},\n},\n- {\n- name: \"whitelist error test\",\n- spec: &specs.Spec{\n- Root: &specs.Root{\n- Path: os.TempDir(),\n- Readonly: true,\n- },\n- Mounts: []specs.Mount{\n- {\n- Destination: \"/dev/fd-foo\",\n- Type: \"bind\",\n- },\n- },\n- },\n- fileAccess: FileAccessDirect,\n- ioFDs: []int{0, 1},\n- errorExpected: true,\n- },\n- {\n- name: \"bad options test\",\n- spec: &specs.Spec{\n- Root: &specs.Root{\n- Path: os.TempDir(),\n- Readonly: true,\n- },\n- Mounts: []specs.Mount{\n- {\n- Destination: \"/dev/fd-foo\",\n- Type: \"tmpfs\",\n- Options: []string{\"invalid_option=true\"},\n- },\n- },\n- },\n- fileAccess: FileAccessDirect,\n- ioFDs: []int{0},\n- errorExpected: true,\n- },\n}\nfor _, tc := range testCases {\nt.Run(tc.name, func(t *testing.T) {\nconf := testConfig()\n- conf.FileAccess = tc.fileAccess\nfds := &fdDispenser{fds: tc.ioFDs}\nactualRenv, err := createRestoreEnvironment(tc.spec, conf, fds)\nif !tc.errorExpected && err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -102,13 +102,6 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nconf := args[0].(*boot.Config)\nwaitStatus := args[1].(*syscall.WaitStatus)\n- // sentry should run with a umask of 0 when --file-access=direct, because we want\n- // to preserve file modes exactly as set by the sentry, which will have applied\n- // its own umask.\n- if conf.FileAccess == boot.FileAccessDirect {\n- syscall.Umask(0)\n- }\n-\nif b.applyCaps {\ncaps := spec.Process.Capabilities\nif caps == nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -615,11 +615,6 @@ func (c *Container) waitForStopped() error {\n}\nfunc (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundleDir string) ([]*os.File, error) {\n- if conf.FileAccess == boot.FileAccessDirect {\n- // Don't start a gofer. The sandbox will access host FS directly.\n- return nil, nil\n- }\n-\nif err := setupFS(spec, conf, bundleDir); err != nil {\nreturn nil, fmt.Errorf(\"failed to setup mounts: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -241,7 +241,7 @@ func configs(opts ...configOption) []*boot.Config {\n}\nc.Platform = boot.PlatformKVM\ncase nonExclusiveFS:\n- c.FileAccess = boot.FileAccessProxy\n+ c.FileAccess = boot.FileAccessShared\ndefault:\npanic(fmt.Sprintf(\"unknown config option %v\", o))\n@@ -1368,10 +1368,10 @@ func TestAbbreviatedIDs(t *testing.T) {\n// Check that modifications to a volume mount are propigated into and out of\n// the sandbox.\nfunc TestContainerVolumeContentsShared(t *testing.T) {\n- // Only run this test with shared proxy, since that is the only\n+ // Only run this test with shared file access, since that is the only\n// behavior it is testing.\nconf := testutil.TestConfig()\n- conf.FileAccess = boot.FileAccessProxy\n+ conf.FileAccess = boot.FileAccessShared\nt.Logf(\"Running test with conf: %+v\", conf)\n// Main process just sleeps. We will use \"exec\" to probe the state of\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/main.go",
"new_path": "runsc/main.go",
"diff": "@@ -58,7 +58,7 @@ var (\n// Flags that control sandbox runtime behavior.\nplatform = flag.String(\"platform\", \"ptrace\", \"specifies which platform to use: ptrace (default), kvm\")\nnetwork = flag.String(\"network\", \"sandbox\", \"specifies which network to use: sandbox (default), host, none. Using network inside the sandbox is more secure because it's isolated from the host network.\")\n- fileAccess = flag.String(\"file-access\", \"proxy-exclusive\", \"specifies which filesystem to use: proxy-exclusive (default), proxy-shared, or direct. Using a proxy is more secure because it disallows the sandbox from opening files directly in the host. Setting 'proxy-shared' will disable caches and should be used if external modifications to the filesystem are expected.\")\n+ fileAccess = flag.String(\"file-access\", \"exclusive\", \"specifies which filesystem to use: exclusive (default), shared. Setting 'shared' will disable caches and should be used if external modifications to the filesystem are expected.\")\noverlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable overlay. All modifications are stored in memory inside the sandbox.\")\nmultiContainer = flag.Bool(\"multi-container\", false, \"enable *experimental* multi-container support.\")\nwatchdogAction = flag.String(\"watchdog-action\", \"log\", \"sets what action the watchdog takes when triggered: log (default), panic.\")\n@@ -112,8 +112,8 @@ func main() {\ncmd.Fatalf(\"%v\", err)\n}\n- if fsAccess == boot.FileAccessProxy && *overlay {\n- cmd.Fatalf(\"overlay flag is incompatible with proxy-shared file access\")\n+ if fsAccess == boot.FileAccessShared && *overlay {\n+ cmd.Fatalf(\"overlay flag is incompatible with shared file access\")\n}\nnetType, err := boot.MakeNetworkType(*network)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -356,12 +356,8 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\nnss = append(nss, specs.LinuxNamespace{Type: specs.PIDNamespace})\n}\n- if conf.FileAccess == boot.FileAccessDirect {\n- log.Infof(\"Sandbox will be started in the current mount namespace\")\n- } else {\nlog.Infof(\"Sandbox will be started in new mount namespace\")\nnss = append(nss, specs.LinuxNamespace{Type: specs.MountNamespace})\n- }\n// Joins the network namespace if network is enabled. the sandbox talks\n// directly to the host network, which may have been configured in the\n@@ -377,9 +373,7 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\n// User namespace depends on the following options:\n// - Host network/filesystem: requires to run inside the user namespace\n// specified in the spec or the current namespace if none is configured.\n- // - Gofer: when using a Gofer, the sandbox process can run isolated in a\n- // new user namespace with only the \"nobody\" user and group.\n- if conf.Network == boot.NetworkHost || conf.FileAccess == boot.FileAccessDirect {\n+ if conf.Network == boot.NetworkHost {\nif userns, ok := specutils.GetNS(specs.UserNamespace, spec); ok {\nlog.Infof(\"Sandbox will be started in container's user namespace: %+v\", userns)\nnss = append(nss, userns)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -109,7 +109,7 @@ func TestConfig() *boot.Config {\nNetwork: boot.NetworkNone,\nStrace: true,\nMultiContainer: true,\n- FileAccess: boot.FileAccessProxyExclusive,\n+ FileAccess: boot.FileAccessExclusive,\nTestOnlyAllowRunAsCurrentUserWithoutChroot: true,\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove '--file-access=direct' option
It was used before gofer was implemented and it's not
supported anymore.
BREAKING CHANGE: proxy-shared and proxy-exclusive options
are now: shared and exclusive.
PiperOrigin-RevId: 212017643
Change-Id: If029d4073fe60583e5ca25f98abb2953de0d78fd |
259,992 | 07.09.2018 13:38:12 | 25,200 | 8ce3fbf9f87677ac34c577be9fb9b395ede8e714 | Only start signal forwarding after init process is created | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -84,6 +84,10 @@ type Loader struct {\n// spec is the base configuration for the root container.\nspec *specs.Spec\n+ // startSignalForwarding enables forwarding of signals to the sandboxed\n+ // container. It should be called after the init process is loaded.\n+ startSignalForwarding func() func()\n+\n// stopSignalForwarding disables forwarding of signals to the sandboxed\n// container. It should be called when a sandbox is destroyed.\nstopSignalForwarding func()\n@@ -226,7 +230,7 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\n}\n// Ensure that signals received are forwarded to the emulated kernel.\nps := syscall.Signal(conf.PanicSignal)\n- stopSignalForwarding := sighandling.PrepareForwarding(k, ps)()\n+ startSignalForwarding := sighandling.PrepareForwarding(k, ps)\nif conf.PanicSignal != -1 {\n// Panics if the sentry receives 'conf.PanicSignal'.\npanicChan := make(chan os.Signal, 1)\n@@ -251,7 +255,7 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\nwatchdog: watchdog,\nioFDs: ioFDs,\nspec: spec,\n- stopSignalForwarding: stopSignalForwarding,\n+ startSignalForwarding: startSignalForwarding,\nrootProcArgs: procArgs,\n}\nctrl.manager.l = l\n@@ -291,7 +295,9 @@ func (l *Loader) Destroy() {\nif l.ctrl != nil {\nl.ctrl.srv.Stop()\n}\n+ if l.stopSignalForwarding != nil {\nl.stopSignalForwarding()\n+ }\nl.watchdog.Stop()\n}\n@@ -380,6 +386,9 @@ func (l *Loader) run() error {\nl.rootProcArgs.FDMap.DecRef()\n}\n+ // Start signal forwarding only after an init process is created.\n+ l.stopSignalForwarding = l.startSignalForwarding()\n+\nlog.Infof(\"Process should have started...\")\nl.watchdog.Start()\nreturn l.k.Start()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Only start signal forwarding after init process is created
PiperOrigin-RevId: 212028121
Change-Id: If9c2c62f3be103e2bb556b8d154c169888e34369 |
259,858 | 07.09.2018 16:52:02 | 25,200 | 6cfb5cd56d4660cc0de6cd991a7ed4601824a7e6 | Add additional sanity checks for walk. | [
{
"change_type": "MODIFY",
"old_path": "pkg/p9/file.go",
"new_path": "pkg/p9/file.go",
"diff": "@@ -20,10 +20,13 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/fd\"\n)\n-// Attacher is provided by the user.\n+// Attacher is provided by the server.\ntype Attacher interface {\n// Attach returns a new File.\n- Attach(attachName string) (File, error)\n+ //\n+ // The client-side attach will be translate to a series of walks from\n+ // the file returned by this Attach call.\n+ Attach() (File, error)\n}\n// File is a set of operations corresponding to a single node.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/handlers.go",
"new_path": "pkg/p9/handlers.go",
"diff": "@@ -17,6 +17,7 @@ package p9\nimport (\n\"io\"\n\"os\"\n+ \"path\"\n\"strings\"\n\"sync/atomic\"\n\"syscall\"\n@@ -93,39 +94,6 @@ func isSafeName(name string) bool {\nreturn name != \"\" && !strings.Contains(name, \"/\") && name != \".\" && name != \"..\"\n}\n-// handle implements handler.handle.\n-func (t *Twalk) handle(cs *connState) message {\n- // Check the names.\n- for _, name := range t.Names {\n- if !isSafeName(name) {\n- return newErr(syscall.EINVAL)\n- }\n- }\n-\n- // Lookup the FID.\n- ref, ok := cs.LookupFID(t.FID)\n- if !ok {\n- return newErr(syscall.EBADF)\n- }\n- defer ref.DecRef()\n-\n- // Has it been opened already?\n- if _, opened := ref.OpenFlags(); opened {\n- return newErr(syscall.EBUSY)\n- }\n-\n- // Do the walk.\n- qids, sf, err := ref.file.Walk(t.Names)\n- if err != nil {\n- return newErr(err)\n- }\n-\n- // Install the new FID.\n- cs.InsertFID(t.NewFID, &fidRef{file: sf})\n-\n- return &Rwalk{QIDs: qids}\n-}\n-\n// handle implements handler.handle.\nfunc (t *Tclunk) handle(cs *connState) message {\nif !cs.DeleteFID(t.FID) {\n@@ -175,14 +143,57 @@ func (t *Tattach) handle(cs *connState) message {\nreturn newErr(syscall.EINVAL)\n}\n- // Do the attach.\n- sf, err := cs.server.attacher.Attach(t.Auth.AttachName)\n+ // Must provide an absolute path.\n+ if path.IsAbs(t.Auth.AttachName) {\n+ // Trim off the leading / if the path is absolute. We always\n+ // treat attach paths as absolute and call attach with the root\n+ // argument on the server file for clarity.\n+ t.Auth.AttachName = t.Auth.AttachName[1:]\n+ }\n+\n+ // Do the attach on the root.\n+ sf, err := cs.server.attacher.Attach()\nif err != nil {\nreturn newErr(err)\n}\n- cs.InsertFID(t.FID, &fidRef{file: sf})\n+ _, valid, attr, err := sf.GetAttr(AttrMaskAll())\n+ if err != nil {\n+ sf.Close() // Drop file.\n+ return newErr(err)\n+ }\n+ if !valid.Mode {\n+ sf.Close() // Drop file.\n+ return newErr(syscall.EINVAL)\n+ }\n+\n+ // Build a transient reference.\n+ root := &fidRef{\n+ file: sf,\n+ refs: 1,\n+ walkable: attr.Mode.IsDir(),\n+ }\n+ defer root.DecRef()\n+\n+ // Attach the root?\n+ if len(t.Auth.AttachName) == 0 {\n+ cs.InsertFID(t.FID, root)\n+ return &Rattach{}\n+ }\n+\n+ // We want the same traversal checks to apply on attach, so always\n+ // attach at the root and use the regular walk paths.\n+ names := strings.Split(t.Auth.AttachName, \"/\")\n+ _, target, _, attr, err := doWalk(cs, root, names)\n+ if err != nil {\n+ return newErr(err)\n+ }\n+\n+ // Insert the FID.\n+ cs.InsertFID(t.FID, &fidRef{\n+ file: target,\n+ walkable: attr.Mode.IsDir(),\n+ })\n- // Return an empty QID.\nreturn &Rattach{}\n}\n@@ -678,15 +689,104 @@ func (t *Tflushf) handle(cs *connState) message {\nreturn &Rflushf{}\n}\n-// handle implements handler.handle.\n-func (t *Twalkgetattr) handle(cs *connState) message {\n+// walkOne walks zero or one path elements.\n+//\n+// The slice passed as qids is append and returned.\n+func walkOne(qids []QID, from File, names []string) ([]QID, File, AttrMask, Attr, error) {\n+ if len(names) > 1 {\n+ // We require exactly zero or one elements.\n+ return nil, nil, AttrMask{}, Attr{}, syscall.EINVAL\n+ }\n+ var localQIDs []QID\n+ localQIDs, sf, valid, attr, err := from.WalkGetAttr(names)\n+ if err == syscall.ENOSYS {\n+ localQIDs, sf, err = from.Walk(names)\n+ if err != nil {\n+ // No way to walk this element.\n+ return nil, nil, AttrMask{}, Attr{}, err\n+ }\n+ // Need a manual getattr.\n+ _, valid, attr, err = sf.GetAttr(AttrMaskAll())\n+ if err != nil {\n+ // Don't leak the file.\n+ sf.Close()\n+ }\n+ }\n+ if err != nil {\n+ // Error walking, don't return anything.\n+ return nil, nil, AttrMask{}, Attr{}, err\n+ }\n+ if len(localQIDs) != 1 {\n+ // Expected a single QID.\n+ sf.Close()\n+ return nil, nil, AttrMask{}, Attr{}, syscall.EINVAL\n+ }\n+ return append(qids, localQIDs...), sf, valid, attr, nil\n+}\n+\n+// doWalk walks from a given fidRef.\n+//\n+// This enforces that all intermediate nodes are walkable (directories).\n+func doWalk(cs *connState, ref *fidRef, names []string) (qids []QID, sf File, valid AttrMask, attr Attr, err error) {\n// Check the names.\n- for _, name := range t.Names {\n+ for _, name := range names {\nif !isSafeName(name) {\n- return newErr(syscall.EINVAL)\n+ err = syscall.EINVAL\n+ return\n+ }\n+ }\n+\n+ // Has it been opened already?\n+ if _, opened := ref.OpenFlags(); opened {\n+ err = syscall.EBUSY\n+ return\n+ }\n+\n+ // Is this an empty list? Handle specially. We don't actually need to\n+ // validate anything since this is always permitted.\n+ if len(names) == 0 {\n+ return walkOne(nil, ref.file, nil)\n+ }\n+\n+ // Is it walkable?\n+ if !ref.walkable {\n+ err = syscall.EINVAL\n+ return\n+ }\n+\n+ from := ref.file // Start at the passed ref.\n+\n+ // Do the walk, one element at a time.\n+ for i := 0; i < len(names); i++ {\n+ qids, sf, valid, attr, err = walkOne(qids, from, names[i:i+1])\n+\n+ // Close the intermediate file. Note that we don't close the\n+ // first file because in that case we are walking from the\n+ // existing reference.\n+ if i > 0 {\n+ from.Close()\n+ }\n+ from = sf // Use the new file.\n+\n+ // Was there an error walking?\n+ if err != nil {\n+ return nil, nil, AttrMask{}, Attr{}, err\n+ }\n+\n+ // We won't allow beyond past symlinks; stop here if this isn't\n+ // a proper directory and we have additional paths to walk.\n+ if !valid.Mode || (!attr.Mode.IsDir() && i < len(names)-1) {\n+ from.Close() // Not using the file object.\n+ return nil, nil, AttrMask{}, Attr{}, syscall.EINVAL\n}\n}\n+ // Success.\n+ return qids, sf, valid, attr, nil\n+}\n+\n+// handle implements handler.handle.\n+func (t *Twalk) handle(cs *connState) message {\n// Lookup the FID.\nref, ok := cs.LookupFID(t.FID)\nif !ok {\n@@ -694,26 +794,41 @@ func (t *Twalkgetattr) handle(cs *connState) message {\n}\ndefer ref.DecRef()\n- // Has it been opened already?\n- if _, opened := ref.OpenFlags(); opened {\n- return newErr(syscall.EBUSY)\n- }\n-\n// Do the walk.\n- qids, sf, valid, attr, err := ref.file.WalkGetAttr(t.Names)\n- if err == syscall.ENOSYS {\n- qids, sf, err = ref.file.Walk(t.Names)\n+ qids, sf, _, attr, err := doWalk(cs, ref, t.Names)\nif err != nil {\nreturn newErr(err)\n}\n- _, valid, attr, err = sf.GetAttr(AttrMaskAll())\n+\n+ // Install the new FID.\n+ cs.InsertFID(t.NewFID, &fidRef{\n+ file: sf,\n+ walkable: attr.Mode.IsDir(),\n+ })\n+\n+ return &Rwalk{QIDs: qids}\n}\n+\n+// handle implements handler.handle.\n+func (t *Twalkgetattr) handle(cs *connState) message {\n+ // Lookup the FID.\n+ ref, ok := cs.LookupFID(t.FID)\n+ if !ok {\n+ return newErr(syscall.EBADF)\n+ }\n+ defer ref.DecRef()\n+\n+ // Do the walk.\n+ qids, sf, valid, attr, err := doWalk(cs, ref, t.Names)\nif err != nil {\nreturn newErr(err)\n}\n// Install the new FID.\n- cs.InsertFID(t.NewFID, &fidRef{file: sf})\n+ cs.InsertFID(t.NewFID, &fidRef{\n+ file: sf,\n+ walkable: attr.Mode.IsDir(),\n+ })\nreturn &Rwalkgetattr{QIDs: qids, Valid: valid, Attr: attr}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/local_server/local_server.go",
"new_path": "pkg/p9/local_server/local_server.go",
"diff": "@@ -70,8 +70,8 @@ func (l *local) info() (p9.QID, os.FileInfo, error) {\n}\n// Attach implements p9.Attacher.Attach.\n-func (l *local) Attach(name string) (p9.File, error) {\n- return &local{path: path.Clean(name)}, nil\n+func (l *local) Attach() (p9.File, error) {\n+ return &local{path: \"/\"}, nil\n}\n// Walk implements p9.File.Walk.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/p9test/client_test.go",
"new_path": "pkg/p9/p9test/client_test.go",
"diff": "@@ -50,8 +50,16 @@ func TestDonateFD(t *testing.T) {\n// Craft attacher to attach to the mocked file which will return our\n// temporary file.\n- fileMock := &FileMock{OpenMock: OpenMock{File: f}}\n- attacher := &AttachMock{File: fileMock}\n+ fileMock := &FileMock{\n+ OpenMock: OpenMock{File: f},\n+ GetAttrMock: GetAttrMock{\n+ // The mode must be valid always.\n+ Valid: p9.AttrMask{Mode: true},\n+ },\n+ }\n+ attacher := &AttachMock{\n+ File: fileMock,\n+ }\n// Make socket pair.\nserverSocket, clientSocket, err := unet.SocketPair(false)\n@@ -139,15 +147,14 @@ func TestClient(t *testing.T) {\na.Called = false\na.File = sf\na.Err = nil\n+ // The attached root must have a valid mode.\n+ sf.GetAttrMock.Attr = p9.Attr{Mode: p9.ModeDirectory}\n+ sf.GetAttrMock.Valid = p9.AttrMask{Mode: true}\nvar err error\n- sfFile, err = c.Attach(\"foo\")\n+ sfFile, err = c.Attach(\"\")\nif !a.Called {\nt.Errorf(\"Attach never Called?\")\n}\n- if a.AttachName != \"foo\" {\n- // This wasn't carried through?\n- t.Errorf(\"attachName got %v wanted foo\", a.AttachName)\n- }\nreturn err\n},\n},\n@@ -155,6 +162,8 @@ func TestClient(t *testing.T) {\nname: \"bad-walk\",\nwant: sentinelErr,\nfn: func(c *p9.Client) error {\n+ // Walk only called when WalkGetAttr not available.\n+ sf.WalkGetAttrMock.Err = syscall.ENOSYS\nsf.WalkMock.File = d\nsf.WalkMock.Err = sentinelErr\n_, _, err := sfFile.Walk([]string{\"foo\", \"bar\"})\n@@ -164,21 +173,39 @@ func TestClient(t *testing.T) {\n{\nname: \"walk-to-dir\",\nfn: func(c *p9.Client) error {\n+ // Walk only called when WalkGetAttr not available.\n+ sf.WalkGetAttrMock.Err = syscall.ENOSYS\nsf.WalkMock.Called = false\n+ sf.WalkMock.Names = nil\nsf.WalkMock.File = d\nsf.WalkMock.Err = nil\nsf.WalkMock.QIDs = []p9.QID{{Type: 1}}\n+ // All intermediate values must be directories.\n+ d.WalkGetAttrMock.Err = syscall.ENOSYS\n+ d.WalkMock.Called = false\n+ d.WalkMock.Names = nil\n+ d.WalkMock.File = d // Walk to self.\n+ d.WalkMock.Err = nil\n+ d.WalkMock.QIDs = []p9.QID{{Type: 1}}\n+ d.GetAttrMock.Attr = p9.Attr{Mode: p9.ModeDirectory}\n+ d.GetAttrMock.Valid = p9.AttrMask{Mode: true}\nvar qids []p9.QID\nvar err error\nqids, _, err = sfFile.Walk([]string{\"foo\", \"bar\"})\nif !sf.WalkMock.Called {\nt.Errorf(\"Walk never Called?\")\n}\n- if !reflect.DeepEqual(sf.WalkMock.Names, []string{\"foo\", \"bar\"}) {\n- t.Errorf(\"got names %v wanted []{foo, bar}\", sf.WalkMock.Names)\n+ if !d.GetAttrMock.Called {\n+ t.Errorf(\"GetAttr never Called?\")\n}\n- if len(qids) != 1 || qids[0].Type != 1 {\n- t.Errorf(\"got qids %v wanted []{{Type: 1}}\", qids)\n+ if !reflect.DeepEqual(sf.WalkMock.Names, []string{\"foo\"}) {\n+ t.Errorf(\"got names %v wanted []{foo}\", sf.WalkMock.Names)\n+ }\n+ if !reflect.DeepEqual(d.WalkMock.Names, []string{\"bar\"}) {\n+ t.Errorf(\"got names %v wanted []{bar}\", d.WalkMock.Names)\n+ }\n+ if len(qids) != 2 || qids[len(qids)-1].Type != 1 {\n+ t.Errorf(\"got qids %v wanted []{..., {Type: 1}}\", qids)\n}\nreturn err\n},\n@@ -187,11 +214,20 @@ func TestClient(t *testing.T) {\nname: \"walkgetattr-to-dir\",\nfn: func(c *p9.Client) error {\nsf.WalkGetAttrMock.Called = false\n+ sf.WalkGetAttrMock.Names = nil\nsf.WalkGetAttrMock.File = d\nsf.WalkGetAttrMock.Err = nil\nsf.WalkGetAttrMock.QIDs = []p9.QID{{Type: 1}}\n- sf.WalkGetAttrMock.Attr = p9.Attr{UID: 1}\n+ sf.WalkGetAttrMock.Attr = p9.Attr{Mode: p9.ModeDirectory, UID: 1}\nsf.WalkGetAttrMock.Valid = p9.AttrMask{Mode: true}\n+ // See above.\n+ d.WalkGetAttrMock.Called = false\n+ d.WalkGetAttrMock.Names = nil\n+ d.WalkGetAttrMock.File = d // Walk to self.\n+ d.WalkGetAttrMock.Err = nil\n+ d.WalkGetAttrMock.QIDs = []p9.QID{{Type: 1}}\n+ d.WalkGetAttrMock.Attr = p9.Attr{Mode: p9.ModeDirectory, UID: 1}\n+ d.WalkGetAttrMock.Valid = p9.AttrMask{Mode: true}\nvar qids []p9.QID\nvar err error\nvar mask p9.AttrMask\n@@ -200,11 +236,14 @@ func TestClient(t *testing.T) {\nif !sf.WalkGetAttrMock.Called {\nt.Errorf(\"Walk never Called?\")\n}\n- if !reflect.DeepEqual(sf.WalkGetAttrMock.Names, []string{\"foo\", \"bar\"}) {\n- t.Errorf(\"got names %v wanted []{foo, bar}\", sf.WalkGetAttrMock.Names)\n+ if !reflect.DeepEqual(sf.WalkGetAttrMock.Names, []string{\"foo\"}) {\n+ t.Errorf(\"got names %v wanted []{foo}\", sf.WalkGetAttrMock.Names)\n+ }\n+ if !reflect.DeepEqual(d.WalkGetAttrMock.Names, []string{\"bar\"}) {\n+ t.Errorf(\"got names %v wanted []{bar}\", d.WalkGetAttrMock.Names)\n}\n- if len(qids) != 1 || qids[0].Type != 1 {\n- t.Errorf(\"got qids %v wanted []{{Type: 1}}\", qids)\n+ if len(qids) != 2 || qids[len(qids)-1].Type != 1 {\n+ t.Errorf(\"got qids %v wanted []{..., {Type: 1}}\", qids)\n}\nif !reflect.DeepEqual(attr, sf.WalkGetAttrMock.Attr) {\nt.Errorf(\"got attrs %s wanted %s\", attr, sf.WalkGetAttrMock.Attr)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/p9test/mocks.go",
"new_path": "pkg/p9/p9test/mocks.go",
"diff": "@@ -71,7 +71,8 @@ type WalkGetAttrMock struct {\n// WalkGetAttr implements p9.File.WalkGetAttr.\nfunc (w *WalkGetAttrMock) WalkGetAttr(names []string) ([]p9.QID, p9.File, p9.AttrMask, p9.Attr, error) {\n- w.Called, w.Names = true, names\n+ w.Called = true\n+ w.Names = append(w.Names, names...)\nreturn w.QIDs, w.File, w.Valid, w.Attr, w.Err\n}\n@@ -300,17 +301,14 @@ func (r *ReadlinkMock) Readlink() (string, error) {\ntype AttachMock struct {\nCalled bool\n- // Args.\n- AttachName string\n-\n// Return.\nFile p9.File\nErr error\n}\n// Attach implements p9.Attacher.Attach.\n-func (a *AttachMock) Attach(attachName string) (p9.File, error) {\n- a.Called, a.AttachName = true, attachName\n+func (a *AttachMock) Attach() (p9.File, error) {\n+ a.Called = true\nreturn a.File, a.Err\n}\n@@ -329,7 +327,8 @@ type WalkMock struct {\n// Walk implements p9.File.Walk.\nfunc (w *WalkMock) Walk(names []string) ([]p9.QID, p9.File, error) {\n- w.Called, w.Names = true, names\n+ w.Called = true\n+ w.Names = append(w.Names, names...)\nreturn w.QIDs, w.File, w.Err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/p9/server.go",
"new_path": "pkg/p9/server.go",
"diff": "@@ -97,6 +97,9 @@ type fidRef struct {\n// This is updated in handlers.go.\nopened bool\n+ // walkable indicates this fidRef may be walked.\n+ walkable bool\n+\n// openFlags is the mode used in the open.\n//\n// This is updated in handlers.go.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer.go",
"new_path": "runsc/fsgofer/fsgofer.go",
"diff": "@@ -117,17 +117,9 @@ func NewAttachPoint(prefix string, c Config) p9.Attacher {\n}\n// Attach implements p9.Attacher.\n-func (a *attachPoint) Attach(appPath string) (p9.File, error) {\n- // Only proceed if 'appPath' is valid.\n- if !path.IsAbs(appPath) {\n- return nil, fmt.Errorf(\"invalid path %q\", appPath)\n- }\n- if path.Clean(appPath) != appPath {\n- return nil, fmt.Errorf(\"invalid path %q\", appPath)\n- }\n-\n- root := path.Join(a.prefix, appPath)\n- fi, err := os.Stat(root)\n+func (a *attachPoint) Attach() (p9.File, error) {\n+ // Sanity check the prefix.\n+ fi, err := os.Stat(a.prefix)\nif err != nil {\nreturn nil, err\n}\n@@ -136,14 +128,15 @@ func (a *attachPoint) Attach(appPath string) (p9.File, error) {\nmode = os.O_RDONLY\n}\n- f, err := os.OpenFile(root, mode|openFlags, 0)\n+ // Open the root directory.\n+ f, err := os.OpenFile(a.prefix, mode|openFlags, 0)\nif err != nil {\n- return nil, fmt.Errorf(\"unable to open file %q, err: %v\", root, err)\n+ return nil, fmt.Errorf(\"unable to open file %q, err: %v\", a.prefix, err)\n}\nstat, err := stat(int(f.Fd()))\nif err != nil {\nf.Close()\n- return nil, fmt.Errorf(\"failed to stat file %q, err: %v\", root, err)\n+ return nil, fmt.Errorf(\"failed to stat file %q, err: %v\", a.prefix, err)\n}\na.attachedMu.Lock()\n@@ -154,7 +147,7 @@ func (a *attachPoint) Attach(appPath string) (p9.File, error) {\n}\na.attached = true\n- return newLocalFile(a, f, root, stat)\n+ return newLocalFile(a, f, a.prefix, stat)\n}\n// makeQID returns a unique QID for the given stat buffer.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer_test.go",
"new_path": "runsc/fsgofer/fsgofer_test.go",
"diff": "@@ -19,7 +19,6 @@ import (\n\"io/ioutil\"\n\"os\"\n\"path\"\n- \"strings\"\n\"syscall\"\n\"testing\"\n@@ -88,9 +87,9 @@ func runCustom(t *testing.T, types []fileType, confs []Config, test func(*testin\ndefer os.RemoveAll(path)\na := NewAttachPoint(path, c)\n- root, err := a.Attach(\"/\")\n+ root, err := a.Attach()\nif err != nil {\n- t.Fatalf(\"Attach(%q) failed, err: %v\", \"/\", err)\n+ t.Fatalf(\"Attach failed, err: %v\", err)\n}\n_, file, err := root.Walk([]string{name})\n@@ -115,9 +114,9 @@ func setup(ft fileType) (string, string, error) {\n// First attach with writable configuration to setup tree.\na := NewAttachPoint(path, Config{})\n- root, err := a.Attach(\"/\")\n+ root, err := a.Attach()\nif err != nil {\n- return \"\", \"\", fmt.Errorf(\"Attach(%q) failed, err: %v\", \"/\", err)\n+ return \"\", \"\", fmt.Errorf(\"Attach failed, err: %v\", err)\n}\ndefer root.Close()\n@@ -618,9 +617,9 @@ func TestAttachFile(t *testing.T) {\n}\na := NewAttachPoint(path, conf)\n- root, err := a.Attach(\"/\")\n+ root, err := a.Attach()\nif err != nil {\n- t.Fatalf(\"Attach(%q) failed, err: %v\", \"/\", err)\n+ t.Fatalf(\"Attach failed, err: %v\", err)\n}\nif _, _, _, err := root.Open(p9.ReadWrite); err != nil {\n@@ -649,31 +648,6 @@ func TestAttachFile(t *testing.T) {\n}\n}\n-func TestAttachError(t *testing.T) {\n- conf := Config{ROMount: false}\n- root, err := ioutil.TempDir(\"\", \"root-\")\n- if err != nil {\n- t.Fatalf(\"ioutil.TempDir() failed, err: %v\", err)\n- }\n- defer os.RemoveAll(root)\n- a := NewAttachPoint(root, conf)\n-\n- c := path.Join(root, \"test\")\n- if err := os.Mkdir(c, 0700); err != nil {\n- t.Fatalf(\"os.Create(%q) failed, err: %v\", c, err)\n- }\n-\n- for _, p := range []string{\"test\", \"/test/../\", \"/test/./\", \"/test//\"} {\n- _, err := a.Attach(p)\n- if err == nil {\n- t.Fatalf(\"Attach(%q) should have failed\", p)\n- }\n- if want := \"invalid path\"; !strings.Contains(err.Error(), want) {\n- t.Fatalf(\"Attach(%q) wrong error, got: %v, wanted: %v\", p, err, want)\n- }\n- }\n-}\n-\nfunc TestDoubleAttachError(t *testing.T) {\nconf := Config{ROMount: false}\nroot, err := ioutil.TempDir(\"\", \"root-\")\n@@ -683,10 +657,10 @@ func TestDoubleAttachError(t *testing.T) {\ndefer os.RemoveAll(root)\na := NewAttachPoint(root, conf)\n- if _, err := a.Attach(\"/\"); err != nil {\n- t.Fatalf(\"Attach(%q) failed: %v\", \"/\", err)\n+ if _, err := a.Attach(); err != nil {\n+ t.Fatalf(\"Attach failed: %v\", err)\n}\n- if _, err := a.Attach(\"/\"); err == nil {\n- t.Fatalf(\"Attach(%q) should have failed\", \"test\")\n+ if _, err := a.Attach(); err == nil {\n+ t.Fatalf(\"Attach should have failed, got %v want non-nil\", err)\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add additional sanity checks for walk.
PiperOrigin-RevId: 212058684
Change-Id: I319709b9ffcfccb3231bac98df345d2a20eca24b |
259,992 | 07.09.2018 16:58:19 | 25,200 | 172860a059ce2cff68aa85a3f66319ee52bdec13 | Add 'Starting gVisor...' message to syslog
This allows applications to verify they are running with gVisor. It
also helps debugging when running with a mix of container runtimes.
Closes | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/syslog.go",
"new_path": "pkg/sentry/kernel/syslog.go",
"diff": "@@ -86,14 +86,18 @@ func (s *syslog) Log() []byte {\nreturn m\n}\n- time := 0.0\n+ const format = \"<6>[%11.6f] %s\\n\"\n+\n+ s.msg = append(s.msg, []byte(fmt.Sprintf(format, 0.0, \"Starting gVisor...\"))...)\n+\n+ time := 0.1\nfor i := 0; i < 10; i++ {\ntime += rand.Float64() / 2\n- s.msg = append(s.msg, []byte(fmt.Sprintf(\"<6>[%11.6f] %s\\n\", time, selectMessage()))...)\n+ s.msg = append(s.msg, []byte(fmt.Sprintf(format, time, selectMessage()))...)\n}\ntime += rand.Float64() / 2\n- s.msg = append(s.msg, []byte(fmt.Sprintf(\"<6>[%11.6f] Ready!\\n\", time))...)\n+ s.msg = append(s.msg, []byte(fmt.Sprintf(format, time, \"Ready!\"))...)\n// Return a copy.\no := make([]byte, len(s.msg))\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add 'Starting gVisor...' message to syslog
This allows applications to verify they are running with gVisor. It
also helps debugging when running with a mix of container runtimes.
Closes #54
PiperOrigin-RevId: 212059457
Change-Id: I51d9595ee742b58c1f83f3902ab2e2ecbd5cedec |
259,992 | 07.09.2018 16:59:33 | 25,200 | cf5006ff24c966a652f5b9cbce3ba363208c197a | Disable test until we figure out what's broken | [
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/image_test.go",
"new_path": "runsc/test/image/image_test.go",
"diff": "@@ -147,7 +147,8 @@ func TestNginx(t *testing.T) {\n}\n}\n-func TestMysql(t *testing.T) {\n+// TODO: Enable again when bug is fixed.\n+func DISABLED_TestMysql(t *testing.T) {\nif err := testutil.Pull(\"mysql\"); err != nil {\nt.Fatalf(\"docker pull failed: %v\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Disable test until we figure out what's broken
PiperOrigin-RevId: 212059579
Change-Id: I052c2192d3483d7bd0fd2232ef2023a12da66446 |
259,881 | 07.09.2018 18:13:50 | 25,200 | 7045828a310d47a2940214f71ae75b8b7b682b78 | Update cleanup TODO | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/sighandling/sighandling.go",
"new_path": "pkg/sentry/sighandling/sighandling.go",
"diff": "@@ -87,9 +87,8 @@ func forwardSignals(k *kernel.Kernel, sigchans []chan os.Signal, start, stop, do\n//\n// Otherwise ignore the signal.\n//\n- // TODO: Convert Go's runtime.raise from\n- // tkill to tgkill so PrepareForwarding doesn't need to\n- // be called until after filter installation.\n+ // TODO: Drop in Go 1.12, which uses tgkill\n+ // in runtime.raise.\nswitch signal {\ncase linux.SIGHUP, linux.SIGINT, linux.SIGTERM:\ndieFromSignal(signal)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update cleanup TODO
PiperOrigin-RevId: 212068327
Change-Id: I3f360cdf7d6caa1c96fae68ae3a1caaf440f0cbe |
259,942 | 10.09.2018 13:02:43 | 25,200 | da9ecb748cf6eb26e43338481d1ecba22eea09b2 | Simplify some code in VectorisedView#ToView. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/buffer/view.go",
"new_path": "pkg/tcpip/buffer/view.go",
"diff": "@@ -154,13 +154,11 @@ func (vv *VectorisedView) Size() int {\n// ToView returns a single view containing the content of the vectorised view.\nfunc (vv *VectorisedView) ToView() View {\n- v := make([]byte, vv.size)\n- u := v\n- for i := range vv.views {\n- n := copy(u, vv.views[i])\n- u = u[n:]\n+ u := make([]byte, 0, vv.size)\n+ for _, v := range vv.views {\n+ u = append(u, v...)\n}\n- return v\n+ return u\n}\n// Views returns the slice containing the all views.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Simplify some code in VectorisedView#ToView.
PiperOrigin-RevId: 212317717
Change-Id: Ic77449c53bf2f8be92c9f0a7a726c45bd35ec435 |
259,992 | 10.09.2018 13:23:49 | 25,200 | 7e9e6745ca1f17031bbea14cb08b3ee3c0f9f818 | Allow '/dev/zero' to be mapped with unaligned length | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/special_mappable.go",
"new_path": "pkg/sentry/mm/special_mappable.go",
"diff": "@@ -138,10 +138,15 @@ func (m *SpecialMappable) Length() uint64 {\n// uses an ephemeral file created by mm/shmem.c:shmem_zero_setup(); we should\n// do the same to get non-zero device and inode IDs.\nfunc NewSharedAnonMappable(length uint64, p platform.Platform) (*SpecialMappable, error) {\n- if length == 0 || length != uint64(usermem.Addr(length).RoundDown()) {\n+ if length == 0 {\nreturn nil, syserror.EINVAL\n}\n- fr, err := p.Memory().Allocate(length, usage.Anonymous)\n+ alignedLen, ok := usermem.Addr(length).RoundUp()\n+ if !ok {\n+ return nil, syserror.EINVAL\n+ }\n+\n+ fr, err := p.Memory().Allocate(uint64(alignedLen), usage.Anonymous)\nif err != nil {\nreturn nil, err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow '/dev/zero' to be mapped with unaligned length
PiperOrigin-RevId: 212321271
Change-Id: I79d71c2e6f4b8fcd3b9b923fe96c2256755f4c48 |
259,885 | 10.09.2018 15:22:44 | 25,200 | a29c39aa629b6118765e5075eb228752934d7081 | Map committed chunks concurrently in FileMem.LoadFrom. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/filemem/filemem_state.go",
"new_path": "pkg/sentry/platform/filemem/filemem_state.go",
"diff": "@@ -19,6 +19,7 @@ import (\n\"fmt\"\n\"io\"\n\"runtime\"\n+ \"sync/atomic\"\n\"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n@@ -127,6 +128,29 @@ func (f *FileMem) LoadFrom(r io.Reader) error {\nreturn err\n}\n+ // Try to map committed chunks concurrently: For any given chunk, either\n+ // this loop or the following one will mmap the chunk first and cache it in\n+ // f.mappings for the other, but this loop is likely to run ahead of the\n+ // other since it doesn't do any work between mmaps. The rest of this\n+ // function doesn't mutate f.usage, so it's safe to iterate concurrently.\n+ mapperDone := make(chan struct{})\n+ mapperCanceled := int32(0)\n+ go func() { // S/R-SAFE: see comment\n+ defer func() { close(mapperDone) }()\n+ for seg := f.usage.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {\n+ if atomic.LoadInt32(&mapperCanceled) != 0 {\n+ return\n+ }\n+ if seg.Value().knownCommitted {\n+ f.forEachMappingSlice(seg.Range(), func(s []byte) {})\n+ }\n+ }\n+ }()\n+ defer func() {\n+ atomic.StoreInt32(&mapperCanceled, 1)\n+ <-mapperDone\n+ }()\n+\n// Load committed pages.\nfor seg := f.usage.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {\nif !seg.Value().knownCommitted {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Map committed chunks concurrently in FileMem.LoadFrom.
PiperOrigin-RevId: 212345401
Change-Id: Iac626ee87ba312df88ab1019ade6ecd62c04c75c |
259,992 | 11.09.2018 11:04:06 | 25,200 | c44bc6612fc4554d0aa4e484a46cd1f6b6a7b5c5 | Allow fstatat back in syscall filters | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/config.go",
"new_path": "runsc/boot/filter/config.go",
"diff": "@@ -208,6 +208,7 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_MPROTECT: {},\nsyscall.SYS_MUNMAP: {},\nsyscall.SYS_NANOSLEEP: {},\n+ syscall.SYS_NEWFSTATAT: {},\nsyscall.SYS_POLL: {},\nsyscall.SYS_PREAD64: {},\nsyscall.SYS_PWRITE64: {},\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow fstatat back in syscall filters
PiperOrigin-RevId: 212483372
Change-Id: If95f32a8e41126cf3dc8bd6c8b2fb0fcfefedc6d |
259,881 | 12.09.2018 10:50:22 | 25,200 | 0efde2bfbde2fea78134a32f5fb34332ec0ce531 | Remove getdents from filters
It was only used by whitelistfs, which was removed in | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/config.go",
"new_path": "runsc/boot/filter/config.go",
"diff": "@@ -98,7 +98,6 @@ var allowedSyscalls = seccomp.SyscallRules{\nseccomp.AllowValue(0),\n},\n},\n- syscall.SYS_GETDENTS64: {},\nsyscall.SYS_GETPID: {},\nunix.SYS_GETRANDOM: {},\nsyscall.SYS_GETSOCKOPT: []seccomp.Rule{\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove getdents from filters
It was only used by whitelistfs, which was removed in
bc81f3fe4a042a15343d2eab44da32d818ac1ade.
PiperOrigin-RevId: 212666374
Change-Id: Ia35e6dc9d68c1a3b015d5b5f71ea3e68e46c5bed |
259,891 | 12.09.2018 15:22:24 | 25,200 | 2eff1fdd061be9cfabc36532dda8cbefeb02e534 | runsc: Add exec flag that specifies where to save the sandbox-internal pid.
This is different from the existing -pid-file flag, which saves a host pid. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/proc.go",
"new_path": "pkg/sentry/control/proc.go",
"diff": "@@ -87,6 +87,24 @@ type ExecArgs struct {\n// Exec runs a new task.\nfunc (proc *Proc) Exec(args *ExecArgs, waitStatus *uint32) error {\n+ newTG, err := proc.execAsync(args)\n+ if err != nil {\n+ return err\n+ }\n+\n+ // Wait for completion.\n+ newTG.WaitExited()\n+ *waitStatus = newTG.ExitStatus().Status()\n+ return nil\n+}\n+\n+// ExecAsync runs a new task, but doesn't wait for it to finish. It is defined\n+// as a function rather than a method to avoid exposing execAsync as an RPC.\n+func ExecAsync(proc *Proc, args *ExecArgs) (*kernel.ThreadGroup, error) {\n+ return proc.execAsync(args)\n+}\n+\n+func (proc *Proc) execAsync(args *ExecArgs) (*kernel.ThreadGroup, error) {\n// Import file descriptors.\nl := limits.NewLimitSet()\nfdm := proc.Kernel.NewFDMap()\n@@ -121,7 +139,7 @@ func (proc *Proc) Exec(args *ExecArgs, waitStatus *uint32) error {\npaths := fs.GetPath(initArgs.Envv)\nf, err := proc.Kernel.RootMountNamespace().ResolveExecutablePath(ctx, initArgs.WorkingDirectory, initArgs.Argv[0], paths)\nif err != nil {\n- return fmt.Errorf(\"error finding executable %q in PATH %v: %v\", initArgs.Argv[0], paths, err)\n+ return nil, fmt.Errorf(\"error finding executable %q in PATH %v: %v\", initArgs.Argv[0], paths, err)\n}\ninitArgs.Filename = f\n}\n@@ -133,7 +151,7 @@ func (proc *Proc) Exec(args *ExecArgs, waitStatus *uint32) error {\n// Import the given file FD. This dups the FD as well.\nfile, err := host.ImportFile(ctx, int(f.Fd()), mounter, enableIoctl)\nif err != nil {\n- return err\n+ return nil, err\n}\ndefer file.DecRef()\n@@ -141,20 +159,11 @@ func (proc *Proc) Exec(args *ExecArgs, waitStatus *uint32) error {\nf.Close()\nif err := fdm.NewFDAt(kdefs.FD(appFD), file, kernel.FDFlags{}, l); err != nil {\n- return err\n+ return nil, err\n}\n}\n- // Start the new task.\n- newTG, err := proc.Kernel.CreateProcess(initArgs)\n- if err != nil {\n- return err\n- }\n-\n- // Wait for completion.\n- newTG.WaitExited()\n- *waitStatus = newTG.ExitStatus().Status()\n- return nil\n+ return proc.Kernel.CreateProcess(initArgs)\n}\n// PsArgs is the set of arguments to ps.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -41,9 +41,9 @@ const (\n// container used by \"runsc events\".\nContainerEvent = \"containerManager.Event\"\n- // ContainerExecute is the URPC endpoint for executing a command in a\n+ // ContainerExecuteAsync is the URPC endpoint for executing a command in a\n// container..\n- ContainerExecute = \"containerManager.Execute\"\n+ ContainerExecuteAsync = \"containerManager.ExecuteAsync\"\n// ContainerPause pauses the container.\nContainerPause = \"containerManager.Pause\"\n@@ -233,33 +233,40 @@ type ExecArgs struct {\nCID string\n}\n-// Execute runs a command on a created or running sandbox.\n-func (cm *containerManager) Execute(e *ExecArgs, waitStatus *uint32) error {\n- log.Debugf(\"containerManager.Execute: %+v\", *e)\n+// ExecuteAsync starts running a command on a created or running sandbox. It\n+// returns the pid of the new process.\n+func (cm *containerManager) ExecuteAsync(args *ExecArgs, pid *int32) error {\n+ log.Debugf(\"containerManager.ExecuteAsync: %+v\", args)\n// Get the container Root Dirent from the Task, since we must run this\n// process with the same Root.\ncm.l.mu.Lock()\n- tgid, ok := cm.l.containerRootTGIDs[e.CID]\n+ tgid, ok := cm.l.containerRootTGIDs[args.CID]\ncm.l.mu.Unlock()\nif !ok {\n- return fmt.Errorf(\"cannot exec in container %q: no such container\", e.CID)\n+ return fmt.Errorf(\"cannot exec in container %q: no such container\", args.CID)\n}\nt := cm.l.k.TaskSet().Root.TaskWithID(kernel.ThreadID(tgid))\nif t == nil {\n- return fmt.Errorf(\"cannot exec in container %q: no thread group with ID %d\", e.CID, tgid)\n+ return fmt.Errorf(\"cannot exec in container %q: no thread group with ID %d\", args.CID, tgid)\n}\nt.WithMuLocked(func(t *kernel.Task) {\n- e.Root = t.FSContext().RootDirectory()\n+ args.Root = t.FSContext().RootDirectory()\n})\n- if e.Root != nil {\n- defer e.Root.DecRef()\n+ if args.Root != nil {\n+ defer args.Root.DecRef()\n}\n+ // Start the process.\nproc := control.Proc{Kernel: cm.l.k}\n- if err := proc.Exec(&e.ExecArgs, waitStatus); err != nil {\n- return fmt.Errorf(\"error executing: %+v: %v\", e, err)\n+ newTG, err := control.ExecAsync(&proc, &args.ExecArgs)\n+ if err != nil {\n+ return fmt.Errorf(\"error executing: %+v: %v\", args, err)\n}\n+\n+ // Return the pid of the newly-created process.\n+ ts := cm.l.k.TaskSet()\n+ *pid = int32(ts.Root.IDOfThreadGroup(newTG))\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/exec.go",
"new_path": "runsc/cmd/exec.go",
"diff": "@@ -51,6 +51,7 @@ type Exec struct {\ndetach bool\nprocessPath string\npidFile string\n+ internalPidFile string\n// consoleSocket is the path to an AF_UNIX socket which will receive a\n// file descriptor referencing the master end of the console's\n@@ -97,6 +98,7 @@ func (ex *Exec) SetFlags(f *flag.FlagSet) {\nf.BoolVar(&ex.detach, \"detach\", false, \"detach from the container's process\")\nf.StringVar(&ex.processPath, \"process\", \"\", \"path to the process.json\")\nf.StringVar(&ex.pidFile, \"pid-file\", \"\", \"filename that the container pid will be written to\")\n+ f.StringVar(&ex.internalPidFile, \"internal-pid-file\", \"\", \"filename that the container-internal pid will be written to\")\nf.StringVar(&ex.consoleSocket, \"console-socket\", \"\", \"path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal\")\n}\n@@ -146,10 +148,25 @@ func (ex *Exec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n}\n}\n- ws, err := c.Execute(e)\n+ // Start the new process and get it pid.\n+ pid, err := c.Execute(e)\nif err != nil {\nFatalf(\"error getting processes for container: %v\", err)\n}\n+\n+ // Write the sandbox-internal pid if required.\n+ if ex.internalPidFile != \"\" {\n+ pidStr := []byte(strconv.Itoa(int(pid)))\n+ if err := ioutil.WriteFile(ex.internalPidFile, pidStr, 0644); err != nil {\n+ Fatalf(\"error writing internal pid file %q: %v\", ex.internalPidFile, err)\n+ }\n+ }\n+\n+ // Wait for the process to exit.\n+ ws, err := c.WaitPID(pid)\n+ if err != nil {\n+ Fatalf(\"error waiting on pid %d: %v\", pid, err)\n+ }\n*waitStatus = ws\nreturn subcommands.ExitSuccess\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -353,13 +353,14 @@ func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocke\nreturn c.Wait()\n}\n-// Execute runs the specified command in the container.\n-func (c *Container) Execute(e *control.ExecArgs) (syscall.WaitStatus, error) {\n- log.Debugf(\"Execute in container %q, args: %+v\", c.ID, e)\n+// Execute runs the specified command in the container. It returns the pid of\n+// the newly created process.\n+func (c *Container) Execute(args *control.ExecArgs) (int32, error) {\n+ log.Debugf(\"Execute in container %q, args: %+v\", c.ID, args)\nif c.Status != Created && c.Status != Running {\nreturn 0, fmt.Errorf(\"cannot exec in container in state %s\", c.Status)\n}\n- return c.Sandbox.Execute(c.ID, e)\n+ return c.Sandbox.Execute(c.ID, args)\n}\n// Event returns events for the container.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -49,11 +49,11 @@ func init() {\n}\n// waitForProcessList waits for the given process list to show up in the container.\n-func waitForProcessList(s *Container, expected []*control.Process) error {\n+func waitForProcessList(cont *Container, expected []*control.Process) error {\nvar got []*control.Process\nfor start := time.Now(); time.Now().Sub(start) < 10*time.Second; {\nvar err error\n- got, err = s.Processes()\n+ got, err = cont.Processes()\nif err != nil {\nreturn fmt.Errorf(\"error getting process data from container: %v\", err)\n}\n@@ -485,12 +485,12 @@ func TestExec(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create and start the container.\n- s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ cont, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n- defer s.Destroy()\n- if err := s.Start(conf); err != nil {\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\nt.Fatalf(\"error starting container: %v\", err)\n}\n@@ -513,11 +513,11 @@ func TestExec(t *testing.T) {\n}\n// Verify that \"sleep 100\" is running.\n- if err := waitForProcessList(s, expectedPL[:1]); err != nil {\n+ if err := waitForProcessList(cont, expectedPL[:1]); err != nil {\nt.Error(err)\n}\n- execArgs := control.ExecArgs{\n+ args := &control.ExecArgs{\nFilename: \"/bin/sleep\",\nArgv: []string{\"sleep\", \"5\"},\nWorkingDirectory: \"/\",\n@@ -528,17 +528,19 @@ func TestExec(t *testing.T) {\n// First, start running exec (whick blocks).\nstatus := make(chan error, 1)\ngo func() {\n- exitStatus, err := s.Execute(&execArgs)\n+ exitStatus, err := cont.executeSync(args)\nif err != nil {\n+ log.Debugf(\"error executing: %v\", err)\nstatus <- err\n} else if exitStatus != 0 {\n+ log.Debugf(\"bad status: %d\", exitStatus)\nstatus <- fmt.Errorf(\"failed with exit status: %v\", exitStatus)\n} else {\nstatus <- nil\n}\n}()\n- if err := waitForProcessList(s, expectedPL); err != nil {\n+ if err := waitForProcessList(cont, expectedPL); err != nil {\nt.Fatal(err)\n}\n@@ -548,7 +550,7 @@ func TestExec(t *testing.T) {\nt.Fatalf(\"container timed out waiting for exec to finish.\")\ncase st := <-status:\nif st != nil {\n- t.Errorf(\"container failed to exec %v: %v\", execArgs, err)\n+ t.Errorf(\"container failed to exec %v: %v\", args, err)\n}\n}\n}\n@@ -884,15 +886,18 @@ func TestPauseResume(t *testing.T) {\n}\nscript := fmt.Sprintf(\"while [[ -f %q ]]; do sleep 0.1; done\", lock.Name())\n- execArgs := control.ExecArgs{\n+ args := &control.ExecArgs{\nFilename: \"/bin/bash\",\nArgv: []string{\"bash\", \"-c\", script},\nWorkingDirectory: \"/\",\nKUID: uid,\n}\n- // First, start running exec (which blocks).\n- go cont.Execute(&execArgs)\n+ // First, start running exec.\n+ _, err = cont.Execute(args)\n+ if err != nil {\n+ t.Fatalf(\"error executing: %v\", err)\n+ }\n// Verify that \"sleep 5\" is running.\nif err := waitForProcessList(cont, expectedPL); err != nil {\n@@ -1022,12 +1027,12 @@ func TestCapabilities(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create and start the container.\n- s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ cont, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n- defer s.Destroy()\n- if err := s.Start(conf); err != nil {\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\nt.Fatalf(\"error starting container: %v\", err)\n}\n@@ -1048,7 +1053,7 @@ func TestCapabilities(t *testing.T) {\nCmd: \"exe\",\n},\n}\n- if err := waitForProcessList(s, expectedPL[:1]); err != nil {\n+ if err := waitForProcessList(cont, expectedPL[:1]); err != nil {\nt.Fatalf(\"Failed to wait for sleep to start, err: %v\", err)\n}\n@@ -1064,7 +1069,7 @@ func TestCapabilities(t *testing.T) {\n// Need to traverse the intermediate directory.\nos.Chmod(rootDir, 0755)\n- execArgs := control.ExecArgs{\n+ args := &control.ExecArgs{\nFilename: exePath,\nArgv: []string{exePath},\nWorkingDirectory: \"/\",\n@@ -1074,17 +1079,17 @@ func TestCapabilities(t *testing.T) {\n}\n// \"exe\" should fail because we don't have the necessary permissions.\n- if _, err := s.Execute(&execArgs); err == nil {\n+ if _, err := cont.executeSync(args); err == nil {\nt.Fatalf(\"container executed without error, but an error was expected\")\n}\n// Now we run with the capability enabled and should succeed.\n- execArgs.Capabilities = &auth.TaskCapabilities{\n+ args.Capabilities = &auth.TaskCapabilities{\nEffectiveCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),\n}\n// \"exe\" should not fail this time.\n- if _, err := s.Execute(&execArgs); err != nil {\n- t.Fatalf(\"container failed to exec %v: %v\", execArgs, err)\n+ if _, err := cont.executeSync(args); err != nil {\n+ t.Fatalf(\"container failed to exec %v: %v\", args, err)\n}\n}\n}\n@@ -1404,11 +1409,11 @@ func TestContainerVolumeContentsShared(t *testing.T) {\nfilename := filepath.Join(dir, \"file\")\n// File does not exist yet. Reading from the sandbox should fail.\n- execArgsTestFile := control.ExecArgs{\n+ argsTestFile := &control.ExecArgs{\nFilename: \"/usr/bin/test\",\nArgv: []string{\"test\", \"-f\", filename},\n}\n- if ws, err := c.Execute(&execArgsTestFile); err != nil {\n+ if ws, err := c.executeSync(argsTestFile); err != nil {\nt.Fatalf(\"unexpected error testing file %q: %v\", filename, err)\n} else if ws.ExitStatus() == 0 {\nt.Errorf(\"test %q exited with code %v, wanted not zero\", ws.ExitStatus(), err)\n@@ -1420,7 +1425,7 @@ func TestContainerVolumeContentsShared(t *testing.T) {\n}\n// Now we should be able to test the file from within the sandbox.\n- if ws, err := c.Execute(&execArgsTestFile); err != nil {\n+ if ws, err := c.executeSync(argsTestFile); err != nil {\nt.Fatalf(\"unexpected error testing file %q: %v\", filename, err)\n} else if ws.ExitStatus() != 0 {\nt.Errorf(\"test %q exited with code %v, wanted zero\", filename, ws.ExitStatus())\n@@ -1433,18 +1438,18 @@ func TestContainerVolumeContentsShared(t *testing.T) {\n}\n// File should no longer exist at the old path within the sandbox.\n- if ws, err := c.Execute(&execArgsTestFile); err != nil {\n+ if ws, err := c.executeSync(argsTestFile); err != nil {\nt.Fatalf(\"unexpected error testing file %q: %v\", filename, err)\n} else if ws.ExitStatus() == 0 {\nt.Errorf(\"test %q exited with code %v, wanted not zero\", filename, ws.ExitStatus())\n}\n// We should be able to test the new filename from within the sandbox.\n- execArgsTestNewFile := control.ExecArgs{\n+ argsTestNewFile := &control.ExecArgs{\nFilename: \"/usr/bin/test\",\nArgv: []string{\"test\", \"-f\", newFilename},\n}\n- if ws, err := c.Execute(&execArgsTestNewFile); err != nil {\n+ if ws, err := c.executeSync(argsTestNewFile); err != nil {\nt.Fatalf(\"unexpected error testing file %q: %v\", newFilename, err)\n} else if ws.ExitStatus() != 0 {\nt.Errorf(\"test %q exited with code %v, wanted zero\", newFilename, ws.ExitStatus())\n@@ -1456,20 +1461,20 @@ func TestContainerVolumeContentsShared(t *testing.T) {\n}\n// Renamed file should no longer exist at the old path within the sandbox.\n- if ws, err := c.Execute(&execArgsTestNewFile); err != nil {\n+ if ws, err := c.executeSync(argsTestNewFile); err != nil {\nt.Fatalf(\"unexpected error testing file %q: %v\", newFilename, err)\n} else if ws.ExitStatus() == 0 {\nt.Errorf(\"test %q exited with code %v, wanted not zero\", newFilename, ws.ExitStatus())\n}\n// Now create the file from WITHIN the sandbox.\n- execArgsTouch := control.ExecArgs{\n+ argsTouch := &control.ExecArgs{\nFilename: \"/usr/bin/touch\",\nArgv: []string{\"touch\", filename},\nKUID: auth.KUID(os.Getuid()),\nKGID: auth.KGID(os.Getgid()),\n}\n- if ws, err := c.Execute(&execArgsTouch); err != nil {\n+ if ws, err := c.executeSync(argsTouch); err != nil {\nt.Fatalf(\"unexpected error touching file %q: %v\", filename, err)\n} else if ws.ExitStatus() != 0 {\nt.Errorf(\"touch %q exited with code %v, wanted zero\", filename, ws.ExitStatus())\n@@ -1486,11 +1491,11 @@ func TestContainerVolumeContentsShared(t *testing.T) {\n}\n// Delete the file from within the sandbox.\n- execArgsRemove := control.ExecArgs{\n+ argsRemove := &control.ExecArgs{\nFilename: \"/bin/rm\",\nArgv: []string{\"rm\", filename},\n}\n- if ws, err := c.Execute(&execArgsRemove); err != nil {\n+ if ws, err := c.executeSync(argsRemove); err != nil {\nt.Fatalf(\"unexpected error removing file %q: %v\", filename, err)\n} else if ws.ExitStatus() != 0 {\nt.Errorf(\"remove %q exited with code %v, wanted zero\", filename, ws.ExitStatus())\n@@ -1547,6 +1552,19 @@ func TestGoferExits(t *testing.T) {\n}\n}\n+// executeSync synchronously executes a new process.\n+func (cont *Container) executeSync(args *control.ExecArgs) (syscall.WaitStatus, error) {\n+ pid, err := cont.Execute(args)\n+ if err != nil {\n+ return 0, fmt.Errorf(\"error executing: %v\", err)\n+ }\n+ ws, err := cont.WaitPID(pid)\n+ if err != nil {\n+ return 0, fmt.Errorf(\"error waiting: %v\", err)\n+ }\n+ return ws, nil\n+}\n+\nfunc TestMain(m *testing.M) {\ntestutil.RunAsRoot(m)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -187,8 +187,9 @@ func (s *Sandbox) Processes(cid string) ([]*control.Process, error) {\nreturn pl, nil\n}\n-// Execute runs the specified command in the container.\n-func (s *Sandbox) Execute(cid string, e *control.ExecArgs) (syscall.WaitStatus, error) {\n+// Execute runs the specified command in the container. It returns the pid of\n+// the newly created process.\n+func (s *Sandbox) Execute(cid string, args *control.ExecArgs) (int32, error) {\nlog.Debugf(\"Executing new process in container %q in sandbox %q\", cid, s.ID)\nconn, err := s.sandboxConnect()\nif err != nil {\n@@ -196,20 +197,14 @@ func (s *Sandbox) Execute(cid string, e *control.ExecArgs) (syscall.WaitStatus,\n}\ndefer conn.Close()\n- ea := &boot.ExecArgs{\n- ExecArgs: *e,\n- CID: cid,\n- }\n+ rpcArgs := &boot.ExecArgs{ExecArgs: *args, CID: cid}\n// Send a message to the sandbox control server to start the container.\n- var waitStatus uint32\n- // TODO: Pass in the container id (cid) here. The sandbox\n- // should execute in the context of that container.\n- if err := conn.Call(boot.ContainerExecute, ea, &waitStatus); err != nil {\n+ var pid int32\n+ if err := conn.Call(boot.ContainerExecuteAsync, rpcArgs, &pid); err != nil {\nreturn 0, fmt.Errorf(\"error executing in sandbox: %v\", err)\n}\n-\n- return syscall.WaitStatus(waitStatus), nil\n+ return pid, nil\n}\n// Event retrieves stats about the sandbox such as memory and CPU utilization.\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Add exec flag that specifies where to save the sandbox-internal pid.
This is different from the existing -pid-file flag, which saves a host pid.
PiperOrigin-RevId: 212713968
Change-Id: I2c486de8dd5cfd9b923fb0970165ef7c5fc597f0 |
259,948 | 12.09.2018 17:23:56 | 25,200 | 9dec7a3db99d8c7045324bc6d8f0c27e88407f6c | compressio: stop worker-pool reference / dependency loop. | [
{
"change_type": "MODIFY",
"old_path": "pkg/compressio/compressio.go",
"new_path": "pkg/compressio/compressio.go",
"diff": "@@ -127,7 +127,7 @@ type result struct {\n// The goroutine will exit when input is closed, and the goroutine will close\n// output.\ntype worker struct {\n- pool *pool\n+ hashPool *hashPool\ninput chan *chunk\noutput chan result\n}\n@@ -139,8 +139,8 @@ func (w *worker) work(compress bool, level int) {\nvar h hash.Hash\nfor c := range w.input {\n- if h == nil && w.pool.key != nil {\n- h = w.pool.getHash()\n+ if h == nil && w.hashPool != nil {\n+ h = w.hashPool.getHash()\n}\nif compress {\nmw := io.Writer(c.compressed)\n@@ -201,6 +201,42 @@ func (w *worker) work(compress bool, level int) {\n}\n}\n+type hashPool struct {\n+ // mu protexts the hash list.\n+ mu sync.Mutex\n+\n+ // key is the key used to create hash objects.\n+ key []byte\n+\n+ // hashes is the hash object free list. Note that this cannot be\n+ // globally shared across readers or writers, as it is key-specific.\n+ hashes []hash.Hash\n+}\n+\n+// getHash gets a hash object for the pool. It should only be called when the\n+// pool key is non-nil.\n+func (p *hashPool) getHash() hash.Hash {\n+ p.mu.Lock()\n+ defer p.mu.Unlock()\n+\n+ if len(p.hashes) == 0 {\n+ return hmac.New(sha256.New, p.key)\n+ }\n+\n+ h := p.hashes[len(p.hashes)-1]\n+ p.hashes = p.hashes[:len(p.hashes)-1]\n+ return h\n+}\n+\n+func (p *hashPool) putHash(h hash.Hash) {\n+ h.Reset()\n+\n+ p.mu.Lock()\n+ defer p.mu.Unlock()\n+\n+ p.hashes = append(p.hashes, h)\n+}\n+\n// pool is common functionality for reader/writers.\ntype pool struct {\n// workers are the compression/decompression workers.\n@@ -210,16 +246,6 @@ type pool struct {\n// stream and is shared across both the reader and writer.\nchunkSize uint32\n- // key is the key used to create hash objects.\n- key []byte\n-\n- // hashMu protexts the hash list.\n- hashMu sync.Mutex\n-\n- // hashes is the hash object free list. Note that this cannot be\n- // globally shared across readers or writers, as it is key-specific.\n- hashes []hash.Hash\n-\n// mu protects below; it is generally the responsibility of users to\n// acquire this mutex before calling any methods on the pool.\nmu sync.Mutex\n@@ -236,17 +262,24 @@ type pool struct {\n// lasSum records the hash of the last chunk processed.\nlastSum []byte\n+\n+ // hashPool is the hash object pool. It cannot be embedded into pool\n+ // itself as worker refers to it and that would stop pool from being\n+ // GCed.\n+ hashPool *hashPool\n}\n// init initializes the worker pool.\n//\n// This should only be called once.\nfunc (p *pool) init(key []byte, workers int, compress bool, level int) {\n- p.key = key\n+ if key != nil {\n+ p.hashPool = &hashPool{key: key}\n+ }\np.workers = make([]worker, workers)\nfor i := 0; i < len(p.workers); i++ {\np.workers[i] = worker{\n- pool: p,\n+ hashPool: p.hashPool,\ninput: make(chan *chunk, 1),\noutput: make(chan result, 1),\n}\n@@ -261,30 +294,7 @@ func (p *pool) stop() {\nclose(p.workers[i].input)\n}\np.workers = nil\n-}\n-\n-// getHash gets a hash object for the pool. It should only be called when the\n-// pool key is non-nil.\n-func (p *pool) getHash() hash.Hash {\n- p.hashMu.Lock()\n- defer p.hashMu.Unlock()\n-\n- if len(p.hashes) == 0 {\n- return hmac.New(sha256.New, p.key)\n- }\n-\n- h := p.hashes[len(p.hashes)-1]\n- p.hashes = p.hashes[:len(p.hashes)-1]\n- return h\n-}\n-\n-func (p *pool) putHash(h hash.Hash) {\n- h.Reset()\n-\n- p.hashMu.Lock()\n- defer p.hashMu.Unlock()\n-\n- p.hashes = append(p.hashes, h)\n+ p.hashPool = nil\n}\n// handleResult calls the callback.\n@@ -361,11 +371,11 @@ func NewReader(in io.Reader, key []byte) (io.Reader, error) {\nreturn nil, err\n}\n- if r.key != nil {\n- h := r.getHash()\n+ if r.hashPool != nil {\n+ h := r.hashPool.getHash()\nbinary.WriteUint32(h, binary.BigEndian, r.chunkSize)\nr.lastSum = h.Sum(nil)\n- r.putHash(h)\n+ r.hashPool.putHash(h)\nsum := make([]byte, len(r.lastSum))\nif _, err := io.ReadFull(r.in, sum); err != nil {\nreturn nil, err\n@@ -477,7 +487,7 @@ func (r *reader) Read(p []byte) (int, error) {\n}\nvar sum []byte\n- if r.key != nil {\n+ if r.hashPool != nil {\nsum = make([]byte, len(r.lastSum))\nif _, err := io.ReadFull(r.in, sum); err != nil {\nif err == io.EOF {\n@@ -573,11 +583,11 @@ func NewWriter(out io.Writer, key []byte, chunkSize uint32, level int) (io.Write\nreturn nil, err\n}\n- if w.key != nil {\n- h := w.getHash()\n+ if w.hashPool != nil {\n+ h := w.hashPool.getHash()\nbinary.WriteUint32(h, binary.BigEndian, chunkSize)\nw.lastSum = h.Sum(nil)\n- w.putHash(h)\n+ w.hashPool.putHash(h)\nif _, err := io.CopyN(w.out, bytes.NewReader(w.lastSum), int64(len(w.lastSum))); err != nil {\nreturn nil, err\n}\n@@ -600,10 +610,10 @@ func (w *writer) flush(c *chunk) error {\nreturn err\n}\n- if w.key != nil {\n+ if w.hashPool != nil {\nio.CopyN(c.h, bytes.NewReader(w.lastSum), int64(len(w.lastSum)))\nsum := c.h.Sum(nil)\n- w.putHash(c.h)\n+ w.hashPool.putHash(c.h)\nc.h = nil\nif _, err := io.CopyN(w.out, bytes.NewReader(sum), int64(len(sum))); err != nil {\nreturn err\n"
}
] | Go | Apache License 2.0 | google/gvisor | compressio: stop worker-pool reference / dependency loop.
PiperOrigin-RevId: 212732300
Change-Id: I9a0b9b7c28e7b7439d34656dd4f2f6114d173e22 |
259,881 | 13.09.2018 14:06:34 | 25,200 | 9c6b38e2952650cba32e21d0719bcb0ffdc10860 | Format struct itimerspec | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/linux64.go",
"new_path": "pkg/sentry/strace/linux64.go",
"diff": "@@ -240,8 +240,8 @@ var linuxAMD64 = SyscallMap{\n220: makeSyscallInfo(\"semtimedop\", Hex, Hex, Hex, Hex),\n221: makeSyscallInfo(\"fadvise64\", Hex, Hex, Hex, Hex),\n222: makeSyscallInfo(\"timer_create\", Hex, Hex, Hex),\n- 223: makeSyscallInfo(\"timer_settime\", Hex, Hex, Hex, Hex),\n- 224: makeSyscallInfo(\"timer_gettime\", Hex, Hex),\n+ 223: makeSyscallInfo(\"timer_settime\", Hex, Hex, ItimerSpec, PostItimerSpec),\n+ 224: makeSyscallInfo(\"timer_gettime\", Hex, PostItimerSpec),\n225: makeSyscallInfo(\"timer_getoverrun\", Hex),\n226: makeSyscallInfo(\"timer_delete\", Hex),\n227: makeSyscallInfo(\"clock_settime\", Hex, Timespec),\n@@ -303,8 +303,8 @@ var linuxAMD64 = SyscallMap{\n283: makeSyscallInfo(\"timerfd_create\", Hex, Hex),\n284: makeSyscallInfo(\"eventfd\", Hex),\n285: makeSyscallInfo(\"fallocate\", Hex, Hex, Hex, Hex),\n- 286: makeSyscallInfo(\"timerfd_settime\", Hex, Hex, Hex, Hex),\n- 287: makeSyscallInfo(\"timerfd_gettime\", Hex, Hex),\n+ 286: makeSyscallInfo(\"timerfd_settime\", Hex, Hex, ItimerSpec, PostItimerSpec),\n+ 287: makeSyscallInfo(\"timerfd_gettime\", Hex, PostItimerSpec),\n288: makeSyscallInfo(\"accept4\", Hex, PostSockAddr, SockLen, SockFlags),\n289: makeSyscallInfo(\"signalfd4\", Hex, Hex, Hex, Hex),\n290: makeSyscallInfo(\"eventfd2\", Hex, Hex),\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/strace.go",
"new_path": "pkg/sentry/strace/strace.go",
"diff": "@@ -224,6 +224,16 @@ func itimerval(t *kernel.Task, addr usermem.Addr) string {\nreturn fmt.Sprintf(\"%#x {interval=%s, value=%s}\", addr, interval, value)\n}\n+func itimerspec(t *kernel.Task, addr usermem.Addr) string {\n+ if addr == 0 {\n+ return \"null\"\n+ }\n+\n+ interval := timespec(t, addr)\n+ value := timespec(t, addr+usermem.Addr(binary.Size(linux.Timespec{})))\n+ return fmt.Sprintf(\"%#x {interval=%s, value=%s}\", addr, interval, value)\n+}\n+\nfunc stringVector(t *kernel.Task, addr usermem.Addr) string {\nvec, err := t.CopyInVector(addr, slinux.ExecMaxElemSize, slinux.ExecMaxTotalSize)\nif err != nil {\n@@ -296,6 +306,8 @@ func (i *SyscallInfo) pre(t *kernel.Task, args arch.SyscallArguments, maximumBlo\noutput = append(output, utimensTimespec(t, args[arg].Pointer()))\ncase ItimerVal:\noutput = append(output, itimerval(t, args[arg].Pointer()))\n+ case ItimerSpec:\n+ output = append(output, itimerspec(t, args[arg].Pointer()))\ncase Timeval:\noutput = append(output, timeval(t, args[arg].Pointer()))\ncase Utimbuf:\n@@ -362,6 +374,8 @@ func (i *SyscallInfo) post(t *kernel.Task, args arch.SyscallArguments, rval uint\noutput[arg] = timespec(t, args[arg].Pointer())\ncase PostItimerVal:\noutput[arg] = itimerval(t, args[arg].Pointer())\n+ case PostItimerSpec:\n+ output[arg] = itimerspec(t, args[arg].Pointer())\ncase Timeval:\noutput[arg] = timeval(t, args[arg].Pointer())\ncase Rusage:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/syscalls.go",
"new_path": "pkg/sentry/strace/syscalls.go",
"diff": "@@ -132,10 +132,17 @@ const (\n// ItimerVal is a pointer to a struct itimerval.\nItimerVal\n- // ItimerVal is a pointer to a struct itimerval, formatted after\n+ // PostItimerVal is a pointer to a struct itimerval, formatted after\n// syscall execution.\nPostItimerVal\n+ // ItimerSpec is a pointer to a struct itimerspec.\n+ ItimerSpec\n+\n+ // PostItimerSpec is a pointer to a struct itimerspec, formatted after\n+ // syscall execution.\n+ PostItimerSpec\n+\n// Timeval is a pointer to a struct timeval, formatted before and after\n// syscall execution.\nTimeval\n"
}
] | Go | Apache License 2.0 | google/gvisor | Format struct itimerspec
PiperOrigin-RevId: 212874745
Change-Id: I0c3e8e6a9e8976631cee03bf0b8891b336ddb8c8 |
259,985 | 13.09.2018 15:15:33 | 25,200 | adf8f339703922211886d3e5588160f65bc131b3 | Extend memory usage events to report mapped memory usage. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/memevent/memory_events.go",
"new_path": "pkg/sentry/kernel/memevent/memory_events.go",
"diff": "@@ -94,5 +94,8 @@ func (m *MemoryEvents) emit() {\nsnapshot, _ := usage.MemoryAccounting.Copy()\ntotal := totalPlatform + snapshot.Mapped\n- eventchannel.Emit(&pb.MemoryUsageEvent{Total: total})\n+ eventchannel.Emit(&pb.MemoryUsageEvent{\n+ Mapped: snapshot.Mapped,\n+ Total: total,\n+ })\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/memevent/memory_events.proto",
"new_path": "pkg/sentry/kernel/memevent/memory_events.proto",
"diff": "@@ -22,4 +22,8 @@ message MemoryUsageEvent {\n// The total memory usage of the sandboxed application in bytes, calculated\n// using the 'fast' method.\nuint64 total = 1;\n+\n+ // Memory used to back memory-mapped regions for files in the application, in\n+ // bytes. This corresponds to the usage.MemoryKind.Mapped memory type.\n+ uint64 mapped = 2;\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Extend memory usage events to report mapped memory usage.
PiperOrigin-RevId: 212887555
Change-Id: I3545383ce903cbe9f00d9b5288d9ef9a049b9f4f |
259,854 | 13.09.2018 19:11:12 | 25,200 | 29a7271f5da9fdb7b4a9a6c9ea61421ce6844a73 | Plumb monotonic time to netstack
Netstack needs to be portable, so this seems to be preferable to using raw
system calls. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -943,6 +943,8 @@ func (k *Kernel) SetExitError(err error) {\n}\n}\n+var _ tcpip.Clock = (*Kernel)(nil)\n+\n// NowNanoseconds implements tcpip.Clock.NowNanoseconds.\nfunc (k *Kernel) NowNanoseconds() int64 {\nnow, err := k.timekeeper.GetTime(sentrytime.Realtime)\n@@ -952,6 +954,15 @@ func (k *Kernel) NowNanoseconds() int64 {\nreturn now\n}\n+// NowMonotonic implements tcpip.Clock.NowMonotonic.\n+func (k *Kernel) NowMonotonic() int64 {\n+ now, err := k.timekeeper.GetTime(sentrytime.Monotonic)\n+ if err != nil {\n+ panic(\"Kernel.NowMonotonic: \" + err.Error())\n+ }\n+ return now\n+}\n+\n// SupervisorContext returns a Context with maximum privileges in k. It should\n// only be used by goroutines outside the control of the emulated kernel\n// defined by e.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/BUILD",
"new_path": "pkg/tcpip/BUILD",
"diff": "@@ -4,7 +4,10 @@ load(\"//tools/go_stateify:defs.bzl\", \"go_library\", \"go_test\")\ngo_library(\nname = \"tcpip\",\n- srcs = [\"tcpip.go\"],\n+ srcs = [\n+ \"tcpip.go\",\n+ \"time_unsafe.go\",\n+ ],\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/tcpip\",\nvisibility = [\"//visibility:public\"],\ndeps = [\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -123,16 +123,11 @@ func (e ErrSaveRejection) Error() string {\n// time, but never for netstack internal timekeeping.\ntype Clock interface {\n// NowNanoseconds returns the current real time as a number of\n- // nanoseconds since some epoch.\n+ // nanoseconds since the Unix epoch.\nNowNanoseconds() int64\n-}\n-\n-// StdClock implements Clock with the time package.\n-type StdClock struct{}\n-// NowNanoseconds implements Clock.NowNanoseconds.\n-func (*StdClock) NowNanoseconds() int64 {\n- return time.Now().UnixNano()\n+ // NowMonotonic returns a monotonic time value.\n+ NowMonotonic() int64\n}\n// Address is a byte slice cast as a string that represents the address of a\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/time_unsafe.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build go1.9\n+// +build !go1.12\n+\n+package tcpip\n+\n+import (\n+ _ \"time\" // Used with go:linkname.\n+ _ \"unsafe\" // Required for go:linkname.\n+)\n+\n+// StdClock implements Clock with the time package.\n+type StdClock struct{}\n+\n+var _ Clock = (*StdClock)(nil)\n+\n+//go:linkname now time.now\n+func now() (sec int64, nsec int32, mono int64)\n+\n+// NowNanoseconds implements Clock.NowNanoseconds.\n+func (*StdClock) NowNanoseconds() int64 {\n+ sec, nsec, _ := now()\n+ return sec*1e9 + int64(nsec)\n+}\n+\n+// NowMonotonic implements Clock.NowMonotonic.\n+func (*StdClock) NowMonotonic() int64 {\n+ _, _, mono := now()\n+ return mono\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Plumb monotonic time to netstack
Netstack needs to be portable, so this seems to be preferable to using raw
system calls.
PiperOrigin-RevId: 212917409
Change-Id: I7b2073e7db4b4bf75300717ca23aea4c15be944c |
259,981 | 13.09.2018 21:46:03 | 25,200 | faa34a0738456f5328cf99de13622a150042776d | platform/kvm: Get max vcpu number dynamically by ioctl
The old kernel version, such as 4.4, only support 255 vcpus.
While gvisor is ran on these kernels, it could panic because the
vcpu id and vcpu number beyond max_vcpus.
Use ioctl(vmfd, _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_VCPUS) to get max
vcpus number dynamically. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/address_space.go",
"new_path": "pkg/sentry/platform/kvm/address_space.go",
"diff": "@@ -26,31 +26,26 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n)\n-type vCPUBitArray [(_KVM_NR_VCPUS + 63) / 64]uint64\n-\n// dirtySet tracks vCPUs for invalidation.\ntype dirtySet struct {\n- vCPUs vCPUBitArray\n+ vCPUs []uint64\n}\n// forEach iterates over all CPUs in the dirty set.\nfunc (ds *dirtySet) forEach(m *machine, fn func(c *vCPU)) {\n- var localSet vCPUBitArray\n- for index := 0; index < len(ds.vCPUs); index++ {\n- // Clear the dirty set, copy to the local one.\n- localSet[index] = atomic.SwapUint64(&ds.vCPUs[index], 0)\n- }\n-\nm.mu.RLock()\ndefer m.mu.RUnlock()\n- for _, c := range m.vCPUs {\n- index := uint64(c.id) / 64\n- bit := uint64(1) << uint(c.id%64)\n-\n- // Call the function if it was set.\n- if localSet[index]&bit != 0 {\n- fn(c)\n+ for index := range ds.vCPUs {\n+ mask := atomic.SwapUint64(&ds.vCPUs[index], 0)\n+ if mask != 0 {\n+ for bit := 0; bit < 64; bit++ {\n+ if mask&(1<<uint64(bit)) == 0 {\n+ continue\n+ }\n+ id := 64*index + bit\n+ fn(m.vCPUsByID[id])\n+ }\n}\n}\n}\n@@ -92,7 +87,7 @@ type addressSpace struct {\npageTables *pagetables.PageTables\n// dirtySet is the set of dirty vCPUs.\n- dirtySet dirtySet\n+ dirtySet *dirtySet\n// files contains files mapped in the host address space.\n//\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm.go",
"new_path": "pkg/sentry/platform/kvm/kvm.go",
"diff": "@@ -143,6 +143,7 @@ func (k *KVM) NewAddressSpace(_ interface{}) (platform.AddressSpace, <-chan stru\nfilemem: k.FileMem,\nmachine: k.machine,\npageTables: pageTables,\n+ dirtySet: k.machine.newDirtySet(),\n}, nil, nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm_const.go",
"new_path": "pkg/sentry/platform/kvm/kvm_const.go",
"diff": "@@ -25,6 +25,7 @@ const (\n_KVM_SET_TSS_ADDR = 0xae47\n_KVM_RUN = 0xae80\n_KVM_NMI = 0xae9a\n+ _KVM_CHECK_EXTENSION = 0xae03\n_KVM_INTERRUPT = 0x4004ae86\n_KVM_SET_MSRS = 0x4008ae89\n_KVM_SET_USER_MEMORY_REGION = 0x4020ae46\n@@ -49,9 +50,14 @@ const (\n_KVM_EXIT_INTERNAL_ERROR = 0x11\n)\n+// KVM capability options.\n+const (\n+ _KVM_CAP_MAX_VCPUS = 0x42\n+)\n+\n// KVM limits.\nconst (\n- _KVM_NR_VCPUS = 0x100\n+ _KVM_NR_VCPUS = 0xff\n_KVM_NR_INTERRUPTS = 0x100\n_KVM_NR_CPUID_ENTRIES = 0x100\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine.go",
"new_path": "pkg/sentry/platform/kvm/machine.go",
"diff": "@@ -22,6 +22,7 @@ import (\n\"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/atomicbitops\"\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/procid\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0/pagetables\"\n@@ -55,6 +56,12 @@ type machine struct {\n//\n// These are populated dynamically.\nvCPUs map[uint64]*vCPU\n+\n+ // vCPUsByID are the machine vCPUs, can be indexed by the vCPU's ID.\n+ vCPUsByID map[int]*vCPU\n+\n+ // maxVCPUs is the maximum number of vCPUs supported by the machine.\n+ maxVCPUs int\n}\nconst (\n@@ -135,6 +142,7 @@ func (m *machine) newVCPU() *vCPU {\nc.CPU.Init(&m.kernel)\nc.CPU.KernelSyscall = bluepillSyscall\nc.CPU.KernelException = bluepillException\n+ m.vCPUsByID[c.id] = c\n// Ensure the signal mask is correct.\nif err := c.setSignalMask(); err != nil {\n@@ -162,12 +170,21 @@ func newMachine(vm int) (*machine, error) {\nm := &machine{\nfd: vm,\nvCPUs: make(map[uint64]*vCPU),\n+ vCPUsByID: make(map[int]*vCPU),\n}\nm.available.L = &m.mu\nm.kernel.Init(ring0.KernelOpts{\nPageTables: pagetables.New(newAllocator()),\n})\n+ maxVCPUs, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(m.fd), _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_VCPUS)\n+ if errno != 0 {\n+ m.maxVCPUs = _KVM_NR_VCPUS\n+ } else {\n+ m.maxVCPUs = int(maxVCPUs)\n+ }\n+ log.Debugf(\"The maximum number of vCPUs is %d.\", m.maxVCPUs)\n+\n// Apply the physical mappings. Note that these mappings may point to\n// guest physical addresses that are not actually available. These\n// physical pages are mapped on demand, see kernel_unsafe.go.\n@@ -315,7 +332,7 @@ func (m *machine) Get() *vCPU {\n}\n// Create a new vCPU (maybe).\n- if len(m.vCPUs) < _KVM_NR_VCPUS {\n+ if len(m.vCPUs) < m.maxVCPUs {\nc := m.newVCPU()\nc.lock()\nm.vCPUs[tid] = c\n@@ -365,6 +382,13 @@ func (m *machine) Put(c *vCPU) {\nm.available.Signal()\n}\n+// newDirtySet returns a new dirty set.\n+func (m *machine) newDirtySet() *dirtySet {\n+ return &dirtySet{\n+ vCPUs: make([]uint64, (m.maxVCPUs+63)/64, (m.maxVCPUs+63)/64),\n+ }\n+}\n+\n// lock marks the vCPU as in user mode.\n//\n// This should only be called directly when known to be safe, i.e. when\n"
}
] | Go | Apache License 2.0 | google/gvisor | platform/kvm: Get max vcpu number dynamically by ioctl
The old kernel version, such as 4.4, only support 255 vcpus.
While gvisor is ran on these kernels, it could panic because the
vcpu id and vcpu number beyond max_vcpus.
Use ioctl(vmfd, _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_VCPUS) to get max
vcpus number dynamically.
Change-Id: I50dd859a11b1c2cea854a8e27d4bf11a411aa45c
PiperOrigin-RevId: 212929704 |
259,885 | 14.09.2018 11:09:41 | 25,200 | 0380bcb3a4125723dc5248f70174ff64fb1942a2 | Fix interaction between rt_sigtimedwait and ignored signals. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task.go",
"new_path": "pkg/sentry/kernel/task.go",
"diff": "@@ -108,9 +108,12 @@ type Task struct {\n// goroutine.\nsignalMask linux.SignalSet\n- // FIXME: An equivalent to task_struct::real_blocked is needed\n- // to prevent signals that are ignored, but transiently unblocked by\n- // sigtimedwait(2), from being dropped in Task.sendSignalTimerLocked.\n+ // If the task goroutine is currently executing Task.sigtimedwait,\n+ // realSignalMask is the previous value of signalMask, which has temporarily\n+ // been replaced by Task.sigtimedwait. Otherwise, realSignalMask is 0.\n+ //\n+ // realSignalMask is exclusive to the task goroutine.\n+ realSignalMask linux.SignalSet\n// If haveSavedSignalMask is true, savedSignalMask is the signal mask that\n// should be applied after the task has either delivered one signal to a\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_signals.go",
"new_path": "pkg/sentry/kernel/task_signals.go",
"diff": "@@ -19,6 +19,7 @@ package kernel\nimport (\n\"fmt\"\n\"sync/atomic\"\n+ \"time\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n@@ -119,25 +120,11 @@ var UnblockableSignals = linux.MakeSignalSet(linux.SIGKILL, linux.SIGSTOP)\n// StopSignals is the set of signals whose default action is SignalActionStop.\nvar StopSignals = linux.MakeSignalSet(linux.SIGSTOP, linux.SIGTSTP, linux.SIGTTIN, linux.SIGTTOU)\n-// dequeueSignalLocked returns a pending unmasked signal. If there are no\n-// pending unmasked signals, dequeueSignalLocked returns nil.\n+// dequeueSignalLocked returns a pending signal that is *not* included in mask.\n+// If there are no pending unmasked signals, dequeueSignalLocked returns nil.\n//\n// Preconditions: t.tg.signalHandlers.mu must be locked.\n-func (t *Task) dequeueSignalLocked() *arch.SignalInfo {\n- if info := t.pendingSignals.dequeue(t.signalMask); info != nil {\n- return info\n- }\n- return t.tg.pendingSignals.dequeue(t.signalMask)\n-}\n-\n-// TakeSignal returns a pending signal not blocked by mask. Signal handlers are\n-// not affected. If there are no pending signals not blocked by mask,\n-// TakeSignal returns a nil SignalInfo.\n-func (t *Task) TakeSignal(mask linux.SignalSet) *arch.SignalInfo {\n- t.tg.pidns.owner.mu.RLock()\n- defer t.tg.pidns.owner.mu.RUnlock()\n- t.tg.signalHandlers.mu.Lock()\n- defer t.tg.signalHandlers.mu.Unlock()\n+func (t *Task) dequeueSignalLocked(mask linux.SignalSet) *arch.SignalInfo {\nif info := t.pendingSignals.dequeue(mask); info != nil {\nreturn info\n}\n@@ -294,6 +281,49 @@ func (t *Task) SignalReturn(rt bool) (*SyscallControl, error) {\nreturn ctrlResume, nil\n}\n+// Sigtimedwait implements the semantics of sigtimedwait(2).\n+//\n+// Preconditions: The caller must be running on the task goroutine. t.exitState\n+// < TaskExitZombie.\n+func (t *Task) Sigtimedwait(set linux.SignalSet, timeout time.Duration) (*arch.SignalInfo, error) {\n+ // set is the set of signals we're interested in; invert it to get the set\n+ // of signals to block.\n+ mask := ^set &^ UnblockableSignals\n+\n+ t.tg.signalHandlers.mu.Lock()\n+ defer t.tg.signalHandlers.mu.Unlock()\n+ if info := t.dequeueSignalLocked(mask); info != nil {\n+ return info, nil\n+ }\n+\n+ if timeout == 0 {\n+ return nil, syserror.EAGAIN\n+ }\n+\n+ // Unblock signals we're waiting for. Remember the original signal mask so\n+ // that Task.sendSignalTimerLocked doesn't discard ignored signals that\n+ // we're temporarily unblocking.\n+ t.realSignalMask = t.signalMask\n+ t.setSignalMaskLocked(t.signalMask & mask)\n+\n+ // Wait for a timeout or new signal.\n+ t.tg.signalHandlers.mu.Unlock()\n+ _, err := t.BlockWithTimeout(nil, true, timeout)\n+ t.tg.signalHandlers.mu.Lock()\n+\n+ // Restore the original signal mask.\n+ t.setSignalMaskLocked(t.realSignalMask)\n+ t.realSignalMask = 0\n+\n+ if info := t.dequeueSignalLocked(mask); info != nil {\n+ return info, nil\n+ }\n+ if err == syserror.ETIMEDOUT {\n+ return nil, syserror.EAGAIN\n+ }\n+ return nil, err\n+}\n+\n// SendSignal sends the given signal to t.\n//\n// The following errors may be returned:\n@@ -431,7 +461,7 @@ func (t *Task) sendSignalTimerLocked(info *arch.SignalInfo, group bool, timer *I\n// Linux's kernel/signal.c:__send_signal() => prepare_signal() =>\n// sig_ignored().\nignored := computeAction(sig, t.tg.signalHandlers.actions[sig]) == SignalActionIgnore\n- if linux.SignalSetOf(sig)&t.signalMask == 0 && ignored && !t.hasTracer() {\n+ if sigset := linux.SignalSetOf(sig); sigset&t.signalMask == 0 && sigset&t.realSignalMask == 0 && ignored && !t.hasTracer() {\nt.Debugf(\"Discarding ignored signal %d\", sig)\nif timer != nil {\ntimer.signalRejectedLocked()\n@@ -1010,7 +1040,7 @@ func (*runInterrupt) execute(t *Task) taskRunState {\n}\n// Are there signals pending?\n- if info := t.dequeueSignalLocked(); info != nil {\n+ if info := t.dequeueSignalLocked(t.signalMask); info != nil {\nif linux.SignalSetOf(linux.Signal(info.Signo))&StopSignals != 0 && t.tg.groupStopPhase == groupStopNone {\n// Indicate that we've dequeued a stop signal before\n// unlocking the signal mutex; initiateGroupStop will check\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_signal.go",
"new_path": "pkg/sentry/syscalls/linux/sys_signal.go",
"diff": "@@ -343,44 +343,6 @@ func Pause(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nreturn 0, nil, syserror.ConvertIntr(t.Block(nil), kernel.ERESTARTNOHAND)\n}\n-func sigtimedwait(t *kernel.Task, mask linux.SignalSet, timeout time.Duration) (*arch.SignalInfo, error) {\n- // Is it already pending?\n- if info := t.TakeSignal(^mask); info != nil {\n- return info, nil\n- }\n-\n- // No signals available immediately and asked not to wait.\n- if timeout == 0 {\n- return nil, syserror.EAGAIN\n- }\n-\n- // No signals available yet. Temporarily unblock the ones we are interested\n- // in then wait for either a timeout or a new signal.\n- oldmask := t.SignalMask()\n- t.SetSignalMask(oldmask &^ mask)\n- _, err := t.BlockWithTimeout(nil, true, timeout)\n- t.SetSignalMask(oldmask)\n-\n- // How did the wait go?\n- switch err {\n- case syserror.ErrInterrupted:\n- if info := t.TakeSignal(^mask); info != nil {\n- // Got one of the signals we were waiting for.\n- return info, nil\n- }\n- // Got a signal we weren't waiting for.\n- return nil, syserror.EINTR\n- case syserror.ETIMEDOUT:\n- // Timed out and still no signals.\n- return nil, syserror.EAGAIN\n- default:\n- // Some other error? Shouldn't be possible. The event channel\n- // passed to BlockWithTimeout was nil, so the only two ways the\n- // block could've ended are a timeout or an interrupt.\n- panic(\"unreachable\")\n- }\n-}\n-\n// RtSigpending implements linux syscall rt_sigpending(2).\nfunc RtSigpending(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\naddr := args[0].Pointer()\n@@ -415,12 +377,11 @@ func RtSigtimedwait(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne\ntimeout = time.Duration(math.MaxInt64)\n}\n- si, err := sigtimedwait(t, mask, timeout)\n+ si, err := t.Sigtimedwait(mask, timeout)\nif err != nil {\nreturn 0, nil, err\n}\n- if si != nil {\nif siginfo != 0 {\nsi.FixSignalCodeForUser()\nif _, err := t.CopyOut(siginfo, si); err != nil {\n@@ -430,10 +391,6 @@ func RtSigtimedwait(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne\nreturn uintptr(si.Signo), nil, nil\n}\n- // sigtimedwait's not supposed to return nil si and err...\n- return 0, nil, nil\n-}\n-\n// RtSigqueueinfo implements linux syscall rt_sigqueueinfo(2).\nfunc RtSigqueueinfo(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\npid := kernel.ThreadID(args[0].Int())\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix interaction between rt_sigtimedwait and ignored signals.
PiperOrigin-RevId: 213011782
Change-Id: I716c6ea3c586b0c6c5a892b6390d2d11478bc5af |
259,881 | 14.09.2018 15:58:56 | 25,200 | 3aa50f18a4102429aa40f5d0e518357ceaed2373 | Reuse readlink parameter, add sockaddr max. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/socket.go",
"new_path": "pkg/abi/linux/socket.go",
"diff": "@@ -140,6 +140,10 @@ const (\nSO_TYPE = 3\n)\n+// SockAddrMax is the maximum size of a struct sockaddr, from\n+// uapi/linux/socket.h.\n+const SockAddrMax = 128\n+\n// SockAddrInt is struct sockaddr_in, from uapi/linux/in.h.\ntype SockAddrInet struct {\nFamily uint16\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/host/util_unsafe.go",
"new_path": "pkg/sentry/fs/host/util_unsafe.go",
"diff": "@@ -23,6 +23,9 @@ import (\nktime \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time\"\n)\n+// NulByte is a single NUL byte. It is passed to readlinkat as an empty string.\n+var NulByte byte = '\\x00'\n+\nfunc createLink(fd int, name string, linkName string) error {\nnamePtr, err := syscall.BytePtrFromString(name)\nif err != nil {\n@@ -50,7 +53,7 @@ func readLink(fd int) (string, error) {\nn, _, errno := syscall.Syscall6(\nsyscall.SYS_READLINKAT,\nuintptr(fd),\n- uintptr(unsafe.Pointer(syscall.StringBytePtr(\"\"))),\n+ uintptr(unsafe.Pointer(&NulByte)), // \"\"\nuintptr(unsafe.Pointer(&b[0])),\nuintptr(l),\n0, 0)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Reuse readlink parameter, add sockaddr max.
PiperOrigin-RevId: 213058623
Change-Id: I522598c655d633b9330990951ff1c54d1023ec29 |
259,891 | 17.09.2018 11:30:16 | 25,200 | 25add7b22b1b0b6a4bac1e72536d3f3a0c70f048 | runsc: Fix stdin/out/err in multi-container mode.
Stdin/out/err weren't being sent to the sentry. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -186,7 +186,9 @@ type StartArgs struct {\n// CID is the ID of the container to start.\nCID string\n- // FilePayload contains the file descriptor over which the sandbox will\n+ // FilePayload contains, in order:\n+ // * stdin, stdout, and stderr.\n+ // * the file descriptor over which the sandbox will\n// request files from its root filesystem.\nurpc.FilePayload\n}\n@@ -215,8 +217,8 @@ func (cm *containerManager) Start(args *StartArgs, _ *struct{}) error {\nif path.Clean(args.CID) != args.CID {\nreturn fmt.Errorf(\"container ID shouldn't contain directory traversals such as \\\"..\\\": %q\", args.CID)\n}\n- if len(args.FilePayload.Files) == 0 {\n- return fmt.Errorf(\"start arguments must contain at least one file for the container root\")\n+ if len(args.FilePayload.Files) < 4 {\n+ return fmt.Errorf(\"start arguments must contain stdin, stderr, and stdout followed by at least one file for the container root gofer\")\n}\nerr := cm.l.startContainer(cm.l.k, args.Spec, args.Conf, args.CID, args.FilePayload.Files)\n@@ -339,7 +341,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\ncm.l.k = k\n// Set up the restore environment.\n- fds := &fdDispenser{fds: cm.l.ioFDs}\n+ fds := &fdDispenser{fds: cm.l.goferFDs}\nrenv, err := createRestoreEnvironment(cm.l.spec, cm.l.conf, fds)\nif err != nil {\nreturn fmt.Errorf(\"error creating RestoreEnvironment: %v\", err)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fds.go",
"new_path": "runsc/boot/fds.go",
"diff": "@@ -16,7 +16,6 @@ package boot\nimport (\n\"fmt\"\n- \"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n@@ -28,15 +27,19 @@ import (\n// createFDMap creates an fd map that contains stdin, stdout, and stderr. If\n// console is true, then ioctl calls will be passed through to the host fd.\n-func createFDMap(ctx context.Context, k *kernel.Kernel, l *limits.LimitSet, console bool) (*kernel.FDMap, error) {\n+func createFDMap(ctx context.Context, k *kernel.Kernel, l *limits.LimitSet, console bool, stdioFDs []int) (*kernel.FDMap, error) {\n+ if len(stdioFDs) != 3 {\n+ return nil, fmt.Errorf(\"stdioFDs should contain exactly 3 FDs (stdin, stdout, and stderr), but %d FDs received\", len(stdioFDs))\n+ }\n+\nfdm := k.NewFDMap()\ndefer fdm.DecRef()\n// Maps sandbox fd to host fd.\nfdMap := map[int]int{\n- 0: syscall.Stdin,\n- 1: syscall.Stdout,\n- 2: syscall.Stderr,\n+ 0: stdioFDs[0],\n+ 1: stdioFDs[1],\n+ 2: stdioFDs[2],\n}\nmounter := fs.FileOwnerFromContext(ctx)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -82,7 +82,7 @@ func (f *fdDispenser) empty() bool {\n// createMountNamespace creates a mount namespace containing the root filesystem\n// and all mounts. 'rootCtx' is used to walk directories to find mount points.\n-func createMountNamespace(userCtx context.Context, rootCtx context.Context, spec *specs.Spec, conf *Config, ioFDs []int) (*fs.MountNamespace, error) {\n+func createMountNamespace(userCtx context.Context, rootCtx context.Context, spec *specs.Spec, conf *Config, goferFDs []int) (*fs.MountNamespace, error) {\nmounts := compileMounts(spec)\n// Create a tmpfs mount where we create and mount a root filesystem for\n// each child container.\n@@ -90,7 +90,7 @@ func createMountNamespace(userCtx context.Context, rootCtx context.Context, spec\nType: tmpfs,\nDestination: childContainersDir,\n})\n- fds := &fdDispenser{fds: ioFDs}\n+ fds := &fdDispenser{fds: goferFDs}\nrootInode, err := createRootMount(rootCtx, spec, conf, fds, mounts)\nif err != nil {\nreturn nil, fmt.Errorf(\"failed to create root mount: %v\", err)\n@@ -595,13 +595,13 @@ func subtargets(root string, mnts []specs.Mount) []string {\n// setFileSystemForProcess is used to set up the file system and amend the procArgs accordingly.\n// procArgs are passed by reference and the FDMap field is modified.\n-func setFileSystemForProcess(procArgs *kernel.CreateProcessArgs, spec *specs.Spec, conf *Config, ioFDs []int, console bool, creds *auth.Credentials, ls *limits.LimitSet, k *kernel.Kernel, cid string) error {\n+func setFileSystemForProcess(procArgs *kernel.CreateProcessArgs, spec *specs.Spec, conf *Config, stdioFDs, goferFDs []int, console bool, creds *auth.Credentials, ls *limits.LimitSet, k *kernel.Kernel, cid string) error {\nctx := procArgs.NewContext(k)\n// Create the FD map, which will set stdin, stdout, and stderr. If\n// console is true, then ioctl calls will be passed through to the host\n// fd.\n- fdm, err := createFDMap(ctx, k, ls, console)\n+ fdm, err := createFDMap(ctx, k, ls, console, stdioFDs)\nif err != nil {\nreturn fmt.Errorf(\"error importing fds: %v\", err)\n}\n@@ -625,7 +625,7 @@ func setFileSystemForProcess(procArgs *kernel.CreateProcessArgs, spec *specs.Spe\nmns := k.RootMountNamespace()\nif mns == nil {\n// Create the virtual filesystem.\n- mns, err := createMountNamespace(ctx, rootCtx, spec, conf, ioFDs)\n+ mns, err := createMountNamespace(ctx, rootCtx, spec, conf, goferFDs)\nif err != nil {\nreturn fmt.Errorf(\"error creating mounts: %v\", err)\n}\n@@ -637,7 +637,7 @@ func setFileSystemForProcess(procArgs *kernel.CreateProcessArgs, spec *specs.Spe\n// Create the container's root filesystem mount.\nlog.Infof(\"Creating new process in child container.\")\n- fds := &fdDispenser{fds: append([]int{}, ioFDs...)}\n+ fds := &fdDispenser{fds: append([]int{}, goferFDs...)}\nrootInode, err := createRootMount(rootCtx, spec, conf, fds, nil)\nif err != nil {\nreturn fmt.Errorf(\"error creating filesystem for container: %v\", err)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -77,8 +77,11 @@ type Loader struct {\nwatchdog *watchdog.Watchdog\n- // ioFDs are the FDs that attach the sandbox to the gofers.\n- ioFDs []int\n+ // stdioFDs contains stdin, stdout, and stderr.\n+ stdioFDs []int\n+\n+ // goferFDs are the FDs that attach the sandbox to the gofers.\n+ goferFDs []int\n// spec is the base configuration for the root container.\nspec *specs.Spec\n@@ -121,7 +124,7 @@ func init() {\n// New initializes a new kernel loader configured by spec.\n// New also handles setting up a kernel for restoring a container.\n-func New(spec *specs.Spec, conf *Config, controllerFD, deviceFD int, ioFDs []int, console bool) (*Loader, error) {\n+func New(spec *specs.Spec, conf *Config, controllerFD, deviceFD int, goferFDs []int, console bool) (*Loader, error) {\n// Create kernel and platform.\np, err := createPlatform(conf, deviceFD)\nif err != nil {\n@@ -252,7 +255,8 @@ func New(spec *specs.Spec, conf *Config, controllerFD, deviceFD int, ioFDs []int\nconf: conf,\nconsole: console,\nwatchdog: watchdog,\n- ioFDs: ioFDs,\n+ stdioFDs: []int{syscall.Stdin, syscall.Stdout, syscall.Stderr},\n+ goferFDs: goferFDs,\nspec: spec,\nstartSignalForwarding: startSignalForwarding,\nrootProcArgs: procArgs,\n@@ -364,7 +368,8 @@ func (l *Loader) run() error {\n&l.rootProcArgs,\nl.spec,\nl.conf,\n- l.ioFDs,\n+ l.stdioFDs,\n+ l.goferFDs,\nl.console,\nl.rootProcArgs.Credentials,\nl.rootProcArgs.Limits,\n@@ -446,7 +451,8 @@ func (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config\n&procArgs,\nspec,\nconf,\n- ioFDs,\n+ ioFDs[:3], // stdioFDs\n+ ioFDs[3:], // goferFDs\nfalse,\ncreds,\nprocArgs.Limits,\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -100,8 +100,8 @@ func (s *Sandbox) StartRoot(spec *specs.Spec, conf *boot.Config) error {\n}\n// Start starts running a non-root container inside the sandbox.\n-func (s *Sandbox) Start(spec *specs.Spec, conf *boot.Config, cid string, ioFiles []*os.File) error {\n- for _, f := range ioFiles {\n+func (s *Sandbox) Start(spec *specs.Spec, conf *boot.Config, cid string, goferFiles []*os.File) error {\n+ for _, f := range goferFiles {\ndefer f.Close()\n}\n@@ -112,12 +112,15 @@ func (s *Sandbox) Start(spec *specs.Spec, conf *boot.Config, cid string, ioFiles\n}\ndefer sandboxConn.Close()\n+ // The payload must container stdin/stdout/stderr followed by gofer\n+ // files.\n+ files := append([]*os.File{os.Stdin, os.Stdout, os.Stderr}, goferFiles...)\n// Start running the container.\nargs := boot.StartArgs{\nSpec: spec,\nConf: conf,\nCID: cid,\n- FilePayload: urpc.FilePayload{Files: ioFiles},\n+ FilePayload: urpc.FilePayload{Files: files},\n}\nif err := sandboxConn.Call(boot.ContainerStart, &args, nil); err != nil {\nreturn fmt.Errorf(\"error starting non-root container %v: %v\", spec.Process.Args, err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Fix stdin/out/err in multi-container mode.
Stdin/out/err weren't being sent to the sentry.
PiperOrigin-RevId: 213307171
Change-Id: Ie4b634a58b1b69aa934ce8597e5cc7a47a2bcda2 |
259,881 | 17.09.2018 12:15:35 | 25,200 | d639c3d61bfdbd42eb809c21a15275cc75524b7e | Allow NULL data in mount(2) | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_mount.go",
"new_path": "pkg/sentry/syscalls/linux/sys_mount.go",
"diff": "@@ -46,14 +46,17 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nreturn 0, nil, err\n}\n+ data := \"\"\n+ if dataAddr != 0 {\n// In Linux, a full page is always copied in regardless of null\n// character placement, and the address is passed to each file system.\n// Most file systems always treat this data as a string, though, and so\n// do all of the ones we implement.\n- data, err := t.CopyInString(dataAddr, usermem.PageSize)\n+ data, err = t.CopyInString(dataAddr, usermem.PageSize)\nif err != nil {\nreturn 0, nil, err\n}\n+ }\n// Ignore magic value that was required before Linux 2.4.\nif flags&linux.MS_MGC_MSK == linux.MS_MGC_VAL {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow NULL data in mount(2)
PiperOrigin-RevId: 213315267
Change-Id: I7562bcd81fb22e90aa9c7dd9eeb94803fcb8c5af |
259,854 | 17.09.2018 13:35:00 | 25,200 | ab6fa44588233fa48d1ae0bf7d9b0d9e984a6af0 | Allow kernel.(*Task).Block to accept an extract only channel | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/lock/lock.go",
"new_path": "pkg/sentry/fs/lock/lock.go",
"diff": "@@ -121,7 +121,7 @@ type Locks struct {\n// Blocker is the interface used for blocking locks. Passing a nil Blocker\n// will be treated as non-blocking.\ntype Blocker interface {\n- Block(C chan struct{}) error\n+ Block(C <-chan struct{}) error\n}\nconst (\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_block.go",
"new_path": "pkg/sentry/kernel/task_block.go",
"diff": "@@ -95,7 +95,7 @@ func (t *Task) BlockWithDeadline(C chan struct{}, haveDeadline bool, deadline kt\n// Most clients should use BlockWithDeadline or BlockWithTimeout instead.\n//\n// Preconditions: The caller must be running on the task goroutine.\n-func (t *Task) BlockWithTimer(C chan struct{}, tchan <-chan struct{}) error {\n+func (t *Task) BlockWithTimer(C <-chan struct{}, tchan <-chan struct{}) error {\nreturn t.block(C, tchan)\n}\n@@ -104,13 +104,13 @@ func (t *Task) BlockWithTimer(C chan struct{}, tchan <-chan struct{}) error {\n// is interrupted.\n//\n// Preconditions: The caller must be running on the task goroutine.\n-func (t *Task) Block(C chan struct{}) error {\n+func (t *Task) Block(C <-chan struct{}) error {\nreturn t.block(C, nil)\n}\n// block blocks a task on one of many events.\n// N.B. defer is too expensive to be used here.\n-func (t *Task) block(C chan struct{}, timerChan <-chan struct{}) error {\n+func (t *Task) block(C <-chan struct{}, timerChan <-chan struct{}) error {\n// Fast path if the request is already done.\nselect {\ncase <-C:\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow kernel.(*Task).Block to accept an extract only channel
PiperOrigin-RevId: 213328293
Change-Id: I4164133e6f709ecdb89ffbb5f7df3324c273860a |
259,891 | 17.09.2018 16:24:05 | 25,200 | bb88c187c5457df14fa78e5e6b6f48cbc90fb489 | runsc: Enable waiting on exited processes.
This makes `runsc wait` behave more like waitpid()/wait4() in that:
Once a process has run to completion, you can wait on it and get its exit
code.
Processes not waited on will consume memory (like a zombie process) | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/proc.go",
"new_path": "pkg/sentry/control/proc.go",
"diff": "@@ -87,7 +87,7 @@ type ExecArgs struct {\n// Exec runs a new task.\nfunc (proc *Proc) Exec(args *ExecArgs, waitStatus *uint32) error {\n- newTG, err := proc.execAsync(args)\n+ newTG, _, err := proc.execAsync(args)\nif err != nil {\nreturn err\n}\n@@ -100,11 +100,13 @@ func (proc *Proc) Exec(args *ExecArgs, waitStatus *uint32) error {\n// ExecAsync runs a new task, but doesn't wait for it to finish. It is defined\n// as a function rather than a method to avoid exposing execAsync as an RPC.\n-func ExecAsync(proc *Proc, args *ExecArgs) (*kernel.ThreadGroup, error) {\n+func ExecAsync(proc *Proc, args *ExecArgs) (*kernel.ThreadGroup, kernel.ThreadID, error) {\nreturn proc.execAsync(args)\n}\n-func (proc *Proc) execAsync(args *ExecArgs) (*kernel.ThreadGroup, error) {\n+// execAsync runs a new task, but doesn't wait for it to finish. It returns the\n+// newly created thread group and its PID.\n+func (proc *Proc) execAsync(args *ExecArgs) (*kernel.ThreadGroup, kernel.ThreadID, error) {\n// Import file descriptors.\nl := limits.NewLimitSet()\nfdm := proc.Kernel.NewFDMap()\n@@ -144,7 +146,7 @@ func (proc *Proc) execAsync(args *ExecArgs) (*kernel.ThreadGroup, error) {\npaths := fs.GetPath(initArgs.Envv)\nf, err := proc.Kernel.RootMountNamespace().ResolveExecutablePath(ctx, initArgs.WorkingDirectory, initArgs.Argv[0], paths)\nif err != nil {\n- return nil, fmt.Errorf(\"error finding executable %q in PATH %v: %v\", initArgs.Argv[0], paths, err)\n+ return nil, 0, fmt.Errorf(\"error finding executable %q in PATH %v: %v\", initArgs.Argv[0], paths, err)\n}\ninitArgs.Filename = f\n}\n@@ -156,7 +158,7 @@ func (proc *Proc) execAsync(args *ExecArgs) (*kernel.ThreadGroup, error) {\n// Import the given file FD. This dups the FD as well.\nfile, err := host.ImportFile(ctx, int(f.Fd()), mounter, enableIoctl)\nif err != nil {\n- return nil, err\n+ return nil, 0, err\n}\ndefer file.DecRef()\n@@ -164,7 +166,7 @@ func (proc *Proc) execAsync(args *ExecArgs) (*kernel.ThreadGroup, error) {\nf.Close()\nif err := fdm.NewFDAt(kdefs.FD(appFD), file, kernel.FDFlags{}, l); err != nil {\n- return nil, err\n+ return nil, 0, err\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/kernel.go",
"new_path": "pkg/sentry/kernel/kernel.go",
"diff": "@@ -596,13 +596,13 @@ func (ctx *createProcessContext) Value(key interface{}) interface{} {\n//\n// CreateProcess has no analogue in Linux; it is used to create the initial\n// application task, as well as processes started by the control server.\n-func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, error) {\n+func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, ThreadID, error) {\nk.extMu.Lock()\ndefer k.extMu.Unlock()\nlog.Infof(\"EXEC: %v\", args.Argv)\nif k.mounts == nil {\n- return nil, fmt.Errorf(\"no kernel MountNamespace\")\n+ return nil, 0, fmt.Errorf(\"no kernel MountNamespace\")\n}\ntg := NewThreadGroup(k.tasks.Root, NewSignalHandlers(), linux.SIGCHLD, args.Limits, k.monotonicClock)\n@@ -622,7 +622,7 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, error) {\nvar err error\nwd, err = k.mounts.FindInode(ctx, root, nil, args.WorkingDirectory, args.MaxSymlinkTraversals)\nif err != nil {\n- return nil, fmt.Errorf(\"failed to find initial working directory %q: %v\", args.WorkingDirectory, err)\n+ return nil, 0, fmt.Errorf(\"failed to find initial working directory %q: %v\", args.WorkingDirectory, err)\n}\ndefer wd.DecRef()\n}\n@@ -630,10 +630,10 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, error) {\nif args.Filename == \"\" {\n// Was anything provided?\nif len(args.Argv) == 0 {\n- return nil, fmt.Errorf(\"no filename or command provided\")\n+ return nil, 0, fmt.Errorf(\"no filename or command provided\")\n}\nif !filepath.IsAbs(args.Argv[0]) {\n- return nil, fmt.Errorf(\"'%s' is not an absolute path\", args.Argv[0])\n+ return nil, 0, fmt.Errorf(\"'%s' is not an absolute path\", args.Argv[0])\n}\nargs.Filename = args.Argv[0]\n}\n@@ -641,7 +641,7 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, error) {\n// Create a fresh task context.\ntc, err := k.LoadTaskImage(ctx, k.mounts, root, wd, args.MaxSymlinkTraversals, args.Filename, args.Argv, args.Envv, k.featureSet)\nif err != nil {\n- return nil, err\n+ return nil, 0, err\n}\n// Take a reference on the FDMap, which will be transferred to\n@@ -663,17 +663,18 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, error) {\n}\nt, err := k.tasks.NewTask(config)\nif err != nil {\n- return nil, err\n+ return nil, 0, err\n}\n// Success.\n+ tgid := k.tasks.Root.IDOfThreadGroup(tg)\nif k.started {\ntid := k.tasks.Root.IDOfTask(t)\nt.Start(tid)\n} else if k.globalInit == nil {\nk.globalInit = tg\n}\n- return tg, nil\n+ return tg, tgid, nil\n}\n// Start starts execution of all tasks in k.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -242,32 +242,11 @@ type ExecArgs struct {\n// returns the pid of the new process.\nfunc (cm *containerManager) ExecuteAsync(args *ExecArgs, pid *int32) error {\nlog.Debugf(\"containerManager.ExecuteAsync: %+v\", args)\n-\n- // Get the container Root Dirent from the Task, since we must run this\n- // process with the same Root.\n- cm.l.mu.Lock()\n- tg, ok := cm.l.containerRootTGs[args.CID]\n- cm.l.mu.Unlock()\n- if !ok {\n- return fmt.Errorf(\"cannot exec in container %q: no such container\", args.CID)\n- }\n- tg.Leader().WithMuLocked(func(t *kernel.Task) {\n- args.Root = t.FSContext().RootDirectory()\n- })\n- if args.Root != nil {\n- defer args.Root.DecRef()\n- }\n-\n- // Start the process.\n- proc := control.Proc{Kernel: cm.l.k}\n- newTG, err := control.ExecAsync(&proc, &args.ExecArgs)\n+ tgid, err := cm.l.executeAsync(&args.ExecArgs, args.CID)\nif err != nil {\n- return fmt.Errorf(\"error executing: %+v: %v\", args, err)\n+ return err\n}\n-\n- // Return the pid of the newly-created process.\n- ts := cm.l.k.TaskSet()\n- *pid = int32(ts.Root.IDOfThreadGroup(newTG))\n+ *pid = int32(tgid)\nreturn nil\n}\n@@ -409,12 +388,16 @@ type WaitPIDArgs struct {\n// CID is the container ID.\nCID string\n+\n+ // ClearStatus determines whether the exit status of the process should\n+ // be cleared when WaitPID returns.\n+ ClearStatus bool\n}\n// WaitPID waits for the process with PID 'pid' in the sandbox.\nfunc (cm *containerManager) WaitPID(args *WaitPIDArgs, waitStatus *uint32) error {\nlog.Debugf(\"containerManager.Wait\")\n- return cm.l.waitPID(kernel.ThreadID(args.PID), args.CID, waitStatus)\n+ return cm.l.waitPID(kernel.ThreadID(args.PID), args.CID, args.ClearStatus, waitStatus)\n}\n// SignalArgs are arguments to the Signal method.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -31,6 +31,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/cpuid\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/control\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/inet\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n@@ -103,7 +104,7 @@ type Loader struct {\n// sandboxID is the ID for the whole sandbox.\nsandboxID string\n- // mu guards containerRootTGs.\n+ // mu guards containerRootTGs and execProcesses.\nmu sync.Mutex\n// containerRootTGs maps container IDs to their root processes. It\n@@ -111,7 +112,24 @@ type Loader struct {\n// call methods on particular containers.\n//\n// containerRootTGs is guarded by mu.\n+ //\n+ // TODO: When containers are removed via `runsc delete`,\n+ // containerRootTGs should be cleaned up.\ncontainerRootTGs map[string]*kernel.ThreadGroup\n+\n+ // execProcesses maps each invocation of exec to the process it spawns.\n+ //\n+ // execProcesses is guardded by mu.\n+ //\n+ // TODO: When containers are removed via `runsc delete`,\n+ // execProcesses should be cleaned up.\n+ execProcesses map[execID]*kernel.ThreadGroup\n+}\n+\n+// execID uniquely identifies a sentry process.\n+type execID struct {\n+ cid string\n+ pid kernel.ThreadID\n}\nfunc init() {\n@@ -385,7 +403,8 @@ func (l *Loader) run() error {\n}\n// Create the root container init task.\n- if _, err := l.k.CreateProcess(l.rootProcArgs); err != nil {\n+ _, _, err := l.k.CreateProcess(l.rootProcArgs)\n+ if err != nil {\nreturn fmt.Errorf(\"failed to create init process: %v\", err)\n}\n@@ -393,6 +412,11 @@ func (l *Loader) run() error {\nl.rootProcArgs.FDMap.DecRef()\n}\n+ if l.execProcesses != nil {\n+ return fmt.Errorf(\"there shouldn't already be a cache of exec'd processes, but found: %v\", l.execProcesses)\n+ }\n+ l.execProcesses = make(map[execID]*kernel.ThreadGroup)\n+\n// Start signal forwarding only after an init process is created.\nl.stopSignalForwarding = l.startSignalForwarding()\n@@ -467,7 +491,7 @@ func (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config\nreturn fmt.Errorf(\"error setting executable path for %+v: %v\", procArgs, err)\n}\n- tg, err := l.k.CreateProcess(procArgs)\n+ tg, _, err := l.k.CreateProcess(procArgs)\nif err != nil {\nreturn fmt.Errorf(\"failed to create process in sentry: %v\", err)\n}\n@@ -482,6 +506,40 @@ func (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config\nreturn nil\n}\n+func (l *Loader) executeAsync(args *control.ExecArgs, cid string) (kernel.ThreadID, error) {\n+ // Get the container Root Dirent from the Task, since we must run this\n+ // process with the same Root.\n+ l.mu.Lock()\n+ tg, ok := l.containerRootTGs[cid]\n+ l.mu.Unlock()\n+ if !ok {\n+ return 0, fmt.Errorf(\"cannot exec in container %q: no such container\", cid)\n+ }\n+ tg.Leader().WithMuLocked(func(t *kernel.Task) {\n+ args.Root = t.FSContext().RootDirectory()\n+ })\n+ if args.Root != nil {\n+ defer args.Root.DecRef()\n+ }\n+\n+ // Start the process.\n+ proc := control.Proc{Kernel: l.k}\n+ tg, tgid, err := control.ExecAsync(&proc, args)\n+ if err != nil {\n+ return 0, fmt.Errorf(\"error executing: %+v: %v\", args, err)\n+ }\n+\n+ // Insert the process into execProcesses so that we can wait on it\n+ // later.\n+ l.mu.Lock()\n+ defer l.mu.Unlock()\n+ eid := execID{cid: cid, pid: tgid}\n+ l.execProcesses[eid] = tg\n+ log.Debugf(\"updated execProcesses: %v\", l.execProcesses)\n+\n+ return tgid, nil\n+}\n+\n// TODO: Per-container namespaces must be supported for -pid.\n// waitContainer waits for the root process of a container to exit.\n@@ -500,39 +558,59 @@ func (l *Loader) waitContainer(cid string, waitStatus *uint32) error {\n// consider the container exited.\n// TODO: Multiple calls to waitContainer() should return\n// the same exit status.\n- defer func() {\n+ ws := l.wait(tg)\n+ *waitStatus = ws\n+\nl.mu.Lock()\ndefer l.mu.Unlock()\n- // TODO: Containers don't map 1:1 with their root\n- // processes. Container exits should be managed explicitly\n- // rather than via PID.\ndelete(l.containerRootTGs, cid)\n- }()\n- l.wait(tg, waitStatus)\n+\nreturn nil\n}\n-func (l *Loader) waitPID(tgid kernel.ThreadID, cid string, waitStatus *uint32) error {\n+func (l *Loader) waitPID(tgid kernel.ThreadID, cid string, clearStatus bool, waitStatus *uint32) error {\n// TODO: Containers all currently share a PID namespace.\n// When per-container PID namespaces are supported, wait should use cid\n// to find the appropriate PID namespace.\n/*if cid != l.sandboxID {\nreturn errors.New(\"non-sandbox PID namespaces are not yet implemented\")\n}*/\n- // TODO: This won't work if the exec process already exited.\n- tg := l.k.TaskSet().Root.ThreadGroupWithID(kernel.ThreadID(tgid))\n+\n+ // If the process was started via runsc exec, it will have an\n+ // entry in l.execProcesses.\n+ l.mu.Lock()\n+ eid := execID{cid: cid, pid: tgid}\n+ tg, ok := l.execProcesses[eid]\n+ l.mu.Unlock()\n+ if ok {\n+ ws := l.wait(tg)\n+ *waitStatus = ws\n+ if clearStatus {\n+ // Remove tg from the cache.\n+ l.mu.Lock()\n+ delete(l.execProcesses, eid)\n+ log.Debugf(\"updated execProcesses (removal): %v\", l.execProcesses)\n+ l.mu.Unlock()\n+ }\n+ return nil\n+ }\n+\n+ // This process wasn't created by runsc exec or start, so just find it\n+ // by pid and hope it hasn't exited yet.\n+ tg = l.k.TaskSet().Root.ThreadGroupWithID(kernel.ThreadID(tgid))\nif tg == nil {\nreturn fmt.Errorf(\"no thread group with ID %d\", tgid)\n}\n- l.wait(tg, waitStatus)\n+ ws := l.wait(tg)\n+ *waitStatus = ws\nreturn nil\n}\n// wait waits for the process with TGID 'tgid' in a container's PID namespace\n// to exit.\n-func (l *Loader) wait(tg *kernel.ThreadGroup, waitStatus *uint32) {\n+func (l *Loader) wait(tg *kernel.ThreadGroup) uint32 {\ntg.WaitExited()\n- *waitStatus = tg.ExitStatus().Status()\n+ return tg.ExitStatus().Status()\n}\nfunc (l *Loader) setRootContainerID(cid string) {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "@@ -111,11 +111,11 @@ func createLoader() (*Loader, func(), error) {\n// TestRun runs a simple application in a sandbox and checks that it succeeds.\nfunc TestRun(t *testing.T) {\n- s, cleanup, err := createLoader()\n+ l, cleanup, err := createLoader()\nif err != nil {\nt.Fatalf(\"error creating loader: %v\", err)\n}\n- defer s.Destroy()\n+ defer l.Destroy()\ndefer cleanup()\n// Start a goroutine to read the start chan result, otherwise Run will\n@@ -124,12 +124,13 @@ func TestRun(t *testing.T) {\nvar wg sync.WaitGroup\nwg.Add(1)\ngo func() {\n- resultChanErr = <-s.ctrl.manager.startResultChan\n+ resultChanErr = <-l.ctrl.manager.startResultChan\nwg.Done()\n}()\n- // Run the container..\n- if err := s.Run(); err != nil {\n+ // Run the container.\n+ l.setRootContainerID(\"foo\")\n+ if err := l.Run(); err != nil {\nt.Errorf(\"error running container: %v\", err)\n}\n@@ -140,7 +141,7 @@ func TestRun(t *testing.T) {\n}\n// Wait for the application to exit. It should succeed.\n- if status := s.WaitExit(); status.Code != 0 || status.Signo != 0 {\n+ if status := l.WaitExit(); status.Code != 0 || status.Signo != 0 {\nt.Errorf(\"application exited with status %+v, want 0\", status)\n}\n}\n@@ -148,24 +149,24 @@ func TestRun(t *testing.T) {\n// TestStartSignal tests that the controller Start message will cause\n// WaitForStartSignal to return.\nfunc TestStartSignal(t *testing.T) {\n- s, cleanup, err := createLoader()\n+ l, cleanup, err := createLoader()\nif err != nil {\nt.Fatalf(\"error creating loader: %v\", err)\n}\n- defer s.Destroy()\n+ defer l.Destroy()\ndefer cleanup()\n// We aren't going to wait on this application, so the control server\n// needs to be shut down manually.\n- defer s.ctrl.srv.Stop()\n+ defer l.ctrl.srv.Stop()\n// Start a goroutine that calls WaitForStartSignal and writes to a\n// channel when it returns.\nwaitFinished := make(chan struct{})\ngo func() {\n- s.WaitForStartSignal()\n+ l.WaitForStartSignal()\n// Pretend that Run() executed and returned no error.\n- s.ctrl.manager.startResultChan <- nil\n+ l.ctrl.manager.startResultChan <- nil\nwaitFinished <- struct{}{}\n}()\n@@ -181,7 +182,7 @@ func TestStartSignal(t *testing.T) {\n// Trigger the control server StartRoot method.\ncid := \"foo\"\n- if err := s.ctrl.manager.StartRoot(&cid, nil); err != nil {\n+ if err := l.ctrl.manager.StartRoot(&cid, nil); err != nil {\nt.Errorf(\"error calling StartRoot: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/exec.go",
"new_path": "runsc/cmd/exec.go",
"diff": "@@ -49,6 +49,7 @@ type Exec struct {\nextraKGIDs stringSlice\ncaps stringSlice\ndetach bool\n+ clearStatus bool\nprocessPath string\npidFile string\ninternalPidFile string\n@@ -100,6 +101,9 @@ func (ex *Exec) SetFlags(f *flag.FlagSet) {\nf.StringVar(&ex.pidFile, \"pid-file\", \"\", \"filename that the container pid will be written to\")\nf.StringVar(&ex.internalPidFile, \"internal-pid-file\", \"\", \"filename that the container-internal pid will be written to\")\nf.StringVar(&ex.consoleSocket, \"console-socket\", \"\", \"path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal\")\n+\n+ // clear-status is expected to only be set when we fork due to --detach being set.\n+ f.BoolVar(&ex.clearStatus, \"clear-status\", true, \"clear the status of the exec'd process upon completion\")\n}\n// Execute implements subcommands.Command.Execute. It starts a process in an\n@@ -163,7 +167,7 @@ func (ex *Exec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n}\n// Wait for the process to exit.\n- ws, err := c.WaitPID(pid)\n+ ws, err := c.WaitPID(pid, ex.clearStatus)\nif err != nil {\nFatalf(\"error waiting on pid %d: %v\", pid, err)\n}\n@@ -194,10 +198,16 @@ func (ex *Exec) execAndWait(waitStatus *syscall.WaitStatus) subcommands.ExitStat\n// Add the rest of the args, excluding the \"detach\" flag.\nfor _, a := range os.Args[1:] {\n- if !strings.Contains(a, \"detach\") {\n+ if strings.Contains(a, \"detach\") {\n+ // Replace with the \"clear-status\" flag, which tells\n+ // the new process it's a detached child and shouldn't\n+ // clear the exit status of the sentry process.\n+ args = append(args, \"--clear-status=false\")\n+ } else {\nargs = append(args, a)\n}\n}\n+\ncmd := exec.Command(binPath, args...)\n// Exec stdio defaults to current process stdio.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/wait.go",
"new_path": "runsc/cmd/wait.go",
"diff": "@@ -88,14 +88,14 @@ func (wt *Wait) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nwaitStatus = ws\n// Wait on a PID in the root PID namespace.\ncase wt.rootPID != unsetPID:\n- ws, err := c.WaitRootPID(int32(wt.rootPID))\n+ ws, err := c.WaitRootPID(int32(wt.rootPID), true /* clearStatus */)\nif err != nil {\nFatalf(\"error waiting on PID in root PID namespace %d in container %q: %v\", wt.rootPID, c.ID, err)\n}\nwaitStatus = ws\n// Wait on a PID in the container's PID namespace.\ncase wt.pid != unsetPID:\n- ws, err := c.WaitPID(int32(wt.pid))\n+ ws, err := c.WaitPID(int32(wt.pid), true /* clearStatus */)\nif err != nil {\nFatalf(\"error waiting on PID %d in container %q: %v\", wt.pid, c.ID, err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -398,22 +398,22 @@ func (c *Container) Wait() (syscall.WaitStatus, error) {\n// WaitRootPID waits for process 'pid' in the sandbox's PID namespace and\n// returns its WaitStatus.\n-func (c *Container) WaitRootPID(pid int32) (syscall.WaitStatus, error) {\n+func (c *Container) WaitRootPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) {\nlog.Debugf(\"Wait on pid %d in sandbox %q\", pid, c.Sandbox.ID)\nif c.Sandbox == nil || !c.Sandbox.IsRunning() {\nreturn 0, fmt.Errorf(\"container sandbox is not running\")\n}\n- return c.Sandbox.WaitPID(pid, c.Sandbox.ID)\n+ return c.Sandbox.WaitPID(c.Sandbox.ID, pid, clearStatus)\n}\n// WaitPID waits for process 'pid' in the container's PID namespace and returns\n// its WaitStatus.\n-func (c *Container) WaitPID(pid int32) (syscall.WaitStatus, error) {\n+func (c *Container) WaitPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) {\nlog.Debugf(\"Wait on pid %d in container %q\", pid, c.ID)\nif c.Sandbox == nil || !c.Sandbox.IsRunning() {\nreturn 0, fmt.Errorf(\"container sandbox is not running\")\n}\n- return c.Sandbox.WaitPID(pid, c.ID)\n+ return c.Sandbox.WaitPID(c.ID, pid, clearStatus)\n}\n// Signal sends the signal to the container.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -551,7 +551,7 @@ func TestExec(t *testing.T) {\nargs := &control.ExecArgs{\nFilename: \"/bin/sleep\",\n- Argv: []string{\"sleep\", \"5\"},\n+ Argv: []string{\"/bin/sleep\", \"5\"},\nWorkingDirectory: \"/\",\nKUID: uid,\n}\n@@ -1598,7 +1598,7 @@ func (cont *Container) executeSync(args *control.ExecArgs) (syscall.WaitStatus,\nif err != nil {\nreturn 0, fmt.Errorf(\"error executing: %v\", err)\n}\n- ws, err := cont.WaitPID(pid)\n+ ws, err := cont.WaitPID(pid, true /* clearStatus */)\nif err != nil {\nreturn 0, fmt.Errorf(\"error waiting: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "@@ -163,16 +163,15 @@ func TestMultiContainerWait(t *testing.T) {\ngo func(c *Container) {\ndefer wg.Done()\nconst pid = 2\n- if ws, err := c.WaitPID(pid); err != nil {\n+ if ws, err := c.WaitPID(pid, true /* clearStatus */); err != nil {\nt.Errorf(\"failed to wait for PID %d: %v\", pid, err)\n} else if es := ws.ExitStatus(); es != 0 {\nt.Errorf(\"PID %d exited with non-zero status %d\", pid, es)\n}\n- if _, err := c.WaitPID(pid); err == nil {\n+ if _, err := c.WaitPID(pid, true /* clearStatus */); err == nil {\nt.Errorf(\"wait for stopped PID %d should fail\", pid)\n}\n- // TODO: use 'container[1]' when PID namespace is supported.\n- }(containers[0])\n+ }(containers[1])\n}\nwg.Wait()\n@@ -184,6 +183,93 @@ func TestMultiContainerWait(t *testing.T) {\n}\n}\n+// TestExecWait ensures what we can wait containers and individual processes in the\n+// sandbox that have already exited.\n+func TestExecWait(t *testing.T) {\n+ rootDir, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+\n+ // The first container should run the entire duration of the test.\n+ cmd1 := []string{\"sleep\", \"100\"}\n+ // We'll wait on the second container, which is much shorter lived.\n+ cmd2 := []string{\"sleep\", \"1\"}\n+ specs, ids := createSpecs(cmd1, cmd2)\n+\n+ // Setup the containers.\n+ var containers []*Container\n+ for i, spec := range specs {\n+ conf := testutil.TestConfig()\n+ bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(bundleDir)\n+ cont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+ containers = append(containers, cont)\n+ }\n+\n+ // Check via ps that multiple processes are running.\n+ expectedPL := []*control.Process{\n+ {PID: 1, Cmd: \"sleep\"},\n+ {PID: 2, Cmd: \"sleep\"},\n+ }\n+ if err := waitForProcessList(containers[0], expectedPL); err != nil {\n+ t.Fatalf(\"failed to wait for sleep to start: %v\", err)\n+ }\n+\n+ // Wait for the second container to finish.\n+ if err := waitForProcessList(containers[0], expectedPL[:1]); err != nil {\n+ t.Fatalf(\"failed to wait for second container to stop: %v\", err)\n+ }\n+\n+ // Get the second container exit status.\n+ if ws, err := containers[1].Wait(); err != nil {\n+ t.Fatalf(\"failed to wait for process %s: %v\", containers[1].Spec.Process.Args, err)\n+ } else if es := ws.ExitStatus(); es != 0 {\n+ t.Fatalf(\"process %s exited with non-zero status %d\", containers[1].Spec.Process.Args, es)\n+ }\n+ if _, err := containers[1].Wait(); err == nil {\n+ t.Fatalf(\"wait for stopped process %s should fail\", containers[1].Spec.Process.Args)\n+ }\n+\n+ // Execute another process in the first container.\n+ args := &control.ExecArgs{\n+ Filename: \"/bin/sleep\",\n+ Argv: []string{\"/bin/sleep\", \"1\"},\n+ WorkingDirectory: \"/\",\n+ KUID: 0,\n+ }\n+ pid, err := containers[0].Execute(args)\n+ if err != nil {\n+ t.Fatalf(\"error executing: %v\", err)\n+ }\n+\n+ // Wait for the exec'd process to exit.\n+ if err := waitForProcessList(containers[0], expectedPL[:1]); err != nil {\n+ t.Fatalf(\"failed to wait for second container to stop: %v\", err)\n+ }\n+\n+ // Get the exit status from the exec'd process.\n+ if ws, err := containers[0].WaitPID(pid, true /* clearStatus */); err != nil {\n+ t.Fatalf(\"failed to wait for process %+v with pid %d: %v\", args, pid, err)\n+ } else if es := ws.ExitStatus(); es != 0 {\n+ t.Fatalf(\"process %+v exited with non-zero status %d\", args, es)\n+ }\n+ if _, err := containers[0].WaitPID(pid, true /* clearStatus */); err == nil {\n+ t.Fatalf(\"wait for stopped process %+v should fail\", args)\n+ }\n+}\n+\n// TestMultiContainerMount tests that bind mounts can be used with multiple\n// containers.\nfunc TestMultiContainerMount(t *testing.T) {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -522,7 +522,7 @@ func (s *Sandbox) Wait(cid string) (syscall.WaitStatus, error) {\n// WaitPID waits for process 'pid' in the container's sandbox and returns its\n// WaitStatus.\n-func (s *Sandbox) WaitPID(pid int32, cid string) (syscall.WaitStatus, error) {\n+func (s *Sandbox) WaitPID(cid string, pid int32, clearStatus bool) (syscall.WaitStatus, error) {\nlog.Debugf(\"Waiting for PID %d in sandbox %q\", pid, s.ID)\nvar ws syscall.WaitStatus\nconn, err := s.sandboxConnect()\n@@ -534,6 +534,7 @@ func (s *Sandbox) WaitPID(pid int32, cid string) (syscall.WaitStatus, error) {\nargs := &boot.WaitPIDArgs{\nPID: pid,\nCID: cid,\n+ ClearStatus: clearStatus,\n}\nif err := conn.Call(boot.ContainerWaitPID, args, &ws); err != nil {\nreturn ws, fmt.Errorf(\"error waiting on PID %d in sandbox %q: %v\", pid, s.ID, err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Enable waiting on exited processes.
This makes `runsc wait` behave more like waitpid()/wait4() in that:
- Once a process has run to completion, you can wait on it and get its exit
code.
- Processes not waited on will consume memory (like a zombie process)
PiperOrigin-RevId: 213358916
Change-Id: I5b5eca41ce71eea68e447380df8c38361a4d1558 |
259,992 | 17.09.2018 21:17:31 | 25,200 | 26b08e182cd08371f14dc58fd54ed7865452cea7 | Rename container in test
's' used to stand for sandbox, before container exited. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -192,16 +192,16 @@ func run(spec *specs.Spec, conf *boot.Config) error {\ndefer os.RemoveAll(bundleDir)\n// Create, start and wait for the container.\n- s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ c, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nreturn fmt.Errorf(\"error creating container: %v\", err)\n}\n- defer s.Destroy()\n- if err := s.Start(conf); err != nil {\n+ defer c.Destroy()\n+ if err := c.Start(conf); err != nil {\nreturn fmt.Errorf(\"error starting container: %v\", err)\n}\n- ws, err := s.Wait()\n+ ws, err := c.Wait()\nif err != nil {\nreturn fmt.Errorf(\"error waiting on container: %v\", err)\n}\n@@ -315,11 +315,11 @@ func TestLifecycle(t *testing.T) {\n}\n// Load the container from disk and check the status.\n- s, err := Load(rootDir, id)\n+ c, err := Load(rootDir, id)\nif err != nil {\nt.Fatalf(\"error loading container: %v\", err)\n}\n- if got, want := s.Status, Created; got != want {\n+ if got, want := c.Status, Created; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n@@ -333,21 +333,21 @@ func TestLifecycle(t *testing.T) {\n}\n// Start the container.\n- if err := s.Start(conf); err != nil {\n+ if err := c.Start(conf); err != nil {\nt.Fatalf(\"error starting container: %v\", err)\n}\n// Load the container from disk and check the status.\n- s, err = Load(rootDir, id)\n+ c, err = Load(rootDir, id)\nif err != nil {\nt.Fatalf(\"error loading container: %v\", err)\n}\n- if got, want := s.Status, Running; got != want {\n+ if got, want := c.Status, Running; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n// Verify that \"sleep 100\" is running.\n- if err := waitForProcessList(s, expectedPL); err != nil {\n+ if err := waitForProcessList(c, expectedPL); err != nil {\nt.Error(err)\n}\n@@ -357,7 +357,7 @@ func TestLifecycle(t *testing.T) {\nch := make(chan struct{})\ngo func() {\nch <- struct{}{}\n- ws, err := s.Wait()\n+ ws, err := c.Wait()\nif err != nil {\nt.Fatalf(\"error waiting on container: %v\", err)\n}\n@@ -372,7 +372,7 @@ func TestLifecycle(t *testing.T) {\n<-ch\ntime.Sleep(100 * time.Millisecond)\n// Send the container a SIGTERM which will cause it to stop.\n- if err := s.Signal(syscall.SIGTERM); err != nil {\n+ if err := c.Signal(syscall.SIGTERM); err != nil {\nt.Fatalf(\"error sending signal %v to container: %v\", syscall.SIGTERM, err)\n}\n// Wait for it to die.\n@@ -383,23 +383,23 @@ func TestLifecycle(t *testing.T) {\n// and init will reap the sandbox. However, in this case the\n// test runner is the parent and will not reap the sandbox\n// process, so we must do it ourselves.\n- reapWg, err := reapChildren(s)\n+ reapWg, err := reapChildren(c)\nif err != nil {\nt.Fatalf(\"error reaping children: %v\", err)\n}\nreapWg.Wait()\n// Load the container from disk and check the status.\n- s, err = Load(rootDir, id)\n+ c, err = Load(rootDir, id)\nif err != nil {\nt.Fatalf(\"error loading container: %v\", err)\n}\n- if got, want := s.Status, Stopped; got != want {\n+ if got, want := c.Status, Stopped; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n// Destroy the container.\n- if err := s.Destroy(); err != nil {\n+ if err := c.Destroy(); err != nil {\nt.Fatalf(\"error destroying container: %v\", err)\n}\n@@ -1160,7 +1160,7 @@ func TestConsoleSocket(t *testing.T) {\n// Create the container and pass the socket name.\nid := testutil.UniqueContainerID()\n- s, err := Create(id, spec, conf, bundleDir, socketRelPath, \"\")\n+ c, err := Create(id, spec, conf, bundleDir, socketRelPath, \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -1197,12 +1197,12 @@ func TestConsoleSocket(t *testing.T) {\n}\n// Reap the sandbox process.\n- if _, err := reapChildren(s); err != nil {\n+ if _, err := reapChildren(c); err != nil {\nt.Fatalf(\"error reaping children: %v\", err)\n}\n// Shut it down.\n- if err := s.Destroy(); err != nil {\n+ if err := c.Destroy(); err != nil {\nt.Fatalf(\"error destroying container: %v\", err)\n}\n@@ -1288,16 +1288,16 @@ func TestReadonlyRoot(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create, start and wait for the container.\n- s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ c, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n- defer s.Destroy()\n- if err := s.Start(conf); err != nil {\n+ defer c.Destroy()\n+ if err := c.Start(conf); err != nil {\nt.Fatalf(\"error starting container: %v\", err)\n}\n- ws, err := s.Wait()\n+ ws, err := c.Wait()\nif err != nil {\nt.Fatalf(\"error waiting on container: %v\", err)\n}\n@@ -1332,16 +1332,16 @@ func TestReadonlyMount(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create, start and wait for the container.\n- s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ c, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n- defer s.Destroy()\n- if err := s.Start(conf); err != nil {\n+ defer c.Destroy()\n+ if err := c.Start(conf); err != nil {\nt.Fatalf(\"error starting container: %v\", err)\n}\n- ws, err := s.Wait()\n+ ws, err := c.Wait()\nif err != nil {\nt.Fatalf(\"error waiting on container: %v\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Rename container in test
's' used to stand for sandbox, before container exited.
PiperOrigin-RevId: 213390641
Change-Id: I7bda94a50398c46721baa92227e32a7a1d817412 |
259,992 | 17.09.2018 21:33:51 | 25,200 | 5d9816be41a967fa1fa9bbbe0c638dd322c7c0b1 | Remove memory usage static init
panic() during init() can be hard to debug.
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/usage/memory.go",
"new_path": "pkg/sentry/usage/memory.go",
"diff": "@@ -117,15 +117,16 @@ type MemoryLocked struct {\nFile *os.File\n}\n-func newMemoryLocked() MemoryLocked {\n- name := \"memory-usage\"\n+// Init initializes global 'MemoryAccounting'.\n+func Init() error {\n+ const name = \"memory-usage\"\nfd, err := memutil.CreateMemFD(name, 0)\nif err != nil {\n- panic(\"error creating usage file: \" + err.Error())\n+ return fmt.Errorf(\"error creating usage file: %v\", err)\n}\nfile := os.NewFile(uintptr(fd), name)\nif err := file.Truncate(int64(RTMemoryStatsSize)); err != nil {\n- panic(\"error truncating usage file: \" + err.Error())\n+ return fmt.Errorf(\"error truncating usage file: %v\", err)\n}\n// Note: We rely on the returned page being initially zeroed. This will\n// always be the case for a newly mapped page from /dev/shm. If we obtain\n@@ -133,13 +134,14 @@ func newMemoryLocked() MemoryLocked {\n// explicitly zero the page.\nmmap, err := syscall.Mmap(int(file.Fd()), 0, int(RTMemoryStatsSize), syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)\nif err != nil {\n- panic(\"error mapping usage file: \" + err.Error())\n+ return fmt.Errorf(\"error mapping usage file: %v\", err)\n}\n- return MemoryLocked{\n+ MemoryAccounting = &MemoryLocked{\nFile: file,\nRTMemoryStats: RTMemoryStatsPointer(mmap),\n}\n+ return nil\n}\n// MemoryAccounting is the global memory stats.\n@@ -147,7 +149,7 @@ func newMemoryLocked() MemoryLocked {\n// There is no need to save or restore the global memory accounting object,\n// because individual frame kinds are saved and charged only when they become\n// resident.\n-var MemoryAccounting = newMemoryLocked()\n+var MemoryAccounting *MemoryLocked\nfunc (m *MemoryLocked) incLocked(val uint64, kind MemoryKind) {\nswitch kind {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -42,6 +42,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/sighandling\"\nslinux \"gvisor.googlesource.com/gvisor/pkg/sentry/syscalls/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/time\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usage\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/watchdog\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/link/sniffer\"\n@@ -143,6 +144,9 @@ func init() {\n// New initializes a new kernel loader configured by spec.\n// New also handles setting up a kernel for restoring a container.\nfunc New(spec *specs.Spec, conf *Config, controllerFD, deviceFD int, goferFDs []int, console bool) (*Loader, error) {\n+ if err := usage.Init(); err != nil {\n+ return nil, fmt.Errorf(\"Error setting up memory usage: %v\", err)\n+ }\n// Create kernel and platform.\np, err := createPlatform(conf, deviceFD)\nif err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove memory usage static init
panic() during init() can be hard to debug.
Updates #100
PiperOrigin-RevId: 213391932
Change-Id: Ic103f1981c5b48f1e12da3b42e696e84ffac02a9 |
259,992 | 18.09.2018 02:08:11 | 25,200 | da20559137ccbf7f27e6008472f4d9159306df4a | Provide better message when memfd_create fails with ENOSYS
Updates | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/filemem/filemem.go",
"new_path": "pkg/sentry/platform/filemem/filemem.go",
"diff": "@@ -233,6 +233,9 @@ func newFromFile(file *os.File) (*FileMem, error) {\nfunc New(name string) (*FileMem, error) {\nfd, err := memutil.CreateMemFD(name, 0)\nif err != nil {\n+ if e, ok := err.(syscall.Errno); ok && e == syscall.ENOSYS {\n+ return nil, fmt.Errorf(\"memfd_create(2) is not implemented. Check that you have Linux 3.17 or higher\")\n+ }\nreturn nil, err\n}\nreturn newFromFile(os.NewFile(uintptr(fd), name))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/usage/memory.go",
"new_path": "pkg/sentry/usage/memory.go",
"diff": "@@ -122,6 +122,9 @@ func Init() error {\nconst name = \"memory-usage\"\nfd, err := memutil.CreateMemFD(name, 0)\nif err != nil {\n+ if e, ok := err.(syscall.Errno); ok && e == syscall.ENOSYS {\n+ return fmt.Errorf(\"memfd_create(2) is not implemented. Check that you have Linux 3.17 or higher\")\n+ }\nreturn fmt.Errorf(\"error creating usage file: %v\", err)\n}\nfile := os.NewFile(uintptr(fd), name)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Provide better message when memfd_create fails with ENOSYS
Updates #100
PiperOrigin-RevId: 213414821
Change-Id: I90c2e6c18c54a6afcd7ad6f409f670aa31577d37 |
260,013 | 18.09.2018 11:13:27 | 25,200 | ed08597d121a624592e5517a28ae40ddbcc59cb0 | Allow for MSG_CTRUNC in input flags for recv. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_socket.go",
"new_path": "pkg/sentry/syscalls/linux/sys_socket.go",
"diff": "@@ -602,7 +602,7 @@ func RecvMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca\n}\n// Reject flags that we don't handle yet.\n- if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_PEEK|linux.MSG_TRUNC|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 {\n+ if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_PEEK|linux.MSG_TRUNC|linux.MSG_CTRUNC|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 {\nreturn 0, nil, syscall.EINVAL\n}\n@@ -635,7 +635,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc\n}\n// Reject flags that we don't handle yet.\n- if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_TRUNC|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 {\n+ if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_TRUNC|linux.MSG_CTRUNC|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 {\nreturn 0, nil, syscall.EINVAL\n}\n@@ -791,7 +791,7 @@ func recvFrom(t *kernel.Task, fd kdefs.FD, bufPtr usermem.Addr, bufLen uint64, f\n}\n// Reject flags that we don't handle yet.\n- if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_PEEK|linux.MSG_TRUNC|linux.MSG_CONFIRM) != 0 {\n+ if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_PEEK|linux.MSG_TRUNC|linux.MSG_CTRUNC|linux.MSG_CONFIRM) != 0 {\nreturn 0, syscall.EINVAL\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow for MSG_CTRUNC in input flags for recv.
PiperOrigin-RevId: 213481363
Change-Id: I8150ea20cebeb207afe031ed146244de9209e745 |
259,992 | 18.09.2018 19:11:49 | 25,200 | 8aec7473a1cc106d1de2e6c072b84eecc1f239b5 | Added state machine checks for Container.Status
For my own sanitity when thinking about possible transitions and state. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -92,7 +92,7 @@ type Container struct {\nStatus Status `json:\"status\"`\n// GoferPid is the pid of the gofer running along side the sandbox. May\n- // be 0 if the gofer has been killed or it's not being used.\n+ // be 0 if the gofer has been killed.\nGoferPid int `json:\"goferPid\"`\n// Sandbox is the sandbox this container is running in. It will be nil\n@@ -138,14 +138,13 @@ func Load(rootDir, id string) (*Container, error) {\n// Check if the sandbox process is still running.\nif !c.Sandbox.IsRunning() {\n// Sandbox no longer exists, so this container definitely does not exist.\n- c.Status = Stopped\n- c.Sandbox = nil\n+ c.changeStatus(Stopped)\n} else if c.Status == Running {\n// Container state should reflect the actual state of\n// the application, so we don't consider gofer process\n// here.\nif err := c.Signal(syscall.Signal(0)); err != nil {\n- c.Status = Stopped\n+ c.changeStatus(Stopped)\n}\n}\n}\n@@ -265,7 +264,7 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\n}\nc.Sandbox = sb.Sandbox\n}\n- c.Status = Created\n+ c.changeStatus(Created)\n// Save the metadata file.\nif err := c.save(); err != nil {\n@@ -322,7 +321,7 @@ func (c *Container) Start(conf *boot.Config) error {\nexecuteHooksBestEffort(c.Spec.Hooks.Poststart, c.State())\n}\n- c.Status = Running\n+ c.changeStatus(Running)\nreturn c.save()\n}\n@@ -338,7 +337,7 @@ func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile str\nif err := c.Sandbox.Restore(c.ID, spec, conf, restoreFile); err != nil {\nreturn err\n}\n- c.Status = Running\n+ c.changeStatus(Running)\nreturn c.save()\n}\n@@ -447,7 +446,7 @@ func (c *Container) Pause() error {\nif err := c.Sandbox.Pause(c.ID); err != nil {\nreturn fmt.Errorf(\"error pausing container: %v\", err)\n}\n- c.Status = Paused\n+ c.changeStatus(Paused)\nreturn c.save()\ndefault:\nreturn fmt.Errorf(\"container %q not created or running, not pausing\", c.ID)\n@@ -463,7 +462,7 @@ func (c *Container) Resume() error {\nif err := c.Sandbox.Resume(c.ID); err != nil {\nreturn fmt.Errorf(\"error resuming container: %v\", err)\n}\n- c.Status = Running\n+ c.changeStatus(Running)\nreturn c.save()\ndefault:\nreturn fmt.Errorf(\"container %q not paused, not resuming\", c.ID)\n@@ -519,7 +518,7 @@ func (c *Container) Destroy() error {\nexecuteHooksBestEffort(c.Spec.Hooks.Poststop, c.State())\n}\n- c.Status = Stopped\n+ c.changeStatus(Stopped)\nreturn nil\n}\n@@ -583,6 +582,7 @@ func (c *Container) waitForStopped() error {\nif err := syscall.Kill(c.GoferPid, 0); err == nil {\nreturn fmt.Errorf(\"gofer is still running\")\n}\n+ c.GoferPid = 0\n}\nreturn nil\n}\n@@ -652,3 +652,47 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bund\nc.GoferPid = cmd.Process.Pid\nreturn sandEnds, nil\n}\n+\n+// changeStatus transitions from one status to another ensuring that the\n+// transition is valid.\n+func (c *Container) changeStatus(s Status) {\n+ switch s {\n+ case Creating:\n+ // Initial state, never transitions to it.\n+ panic(fmt.Sprintf(\"invalid state transition: %v => %v\", c.Status, s))\n+\n+ case Created:\n+ if c.Status != Creating {\n+ panic(fmt.Sprintf(\"invalid state transition: %v => %v\", c.Status, s))\n+ }\n+ if c.Sandbox == nil {\n+ panic(\"sandbox cannot be nil\")\n+ }\n+\n+ case Paused:\n+ if c.Status != Running {\n+ panic(fmt.Sprintf(\"invalid state transition: %v => %v\", c.Status, s))\n+ }\n+ if c.Sandbox == nil {\n+ panic(\"sandbox cannot be nil\")\n+ }\n+\n+ case Running:\n+ if c.Status != Created && c.Status != Paused {\n+ panic(fmt.Sprintf(\"invalid state transition: %v => %v\", c.Status, s))\n+ }\n+ if c.Sandbox == nil {\n+ panic(\"sandbox cannot be nil\")\n+ }\n+\n+ case Stopped:\n+ if c.Status != Created && c.Status != Running && c.Status != Stopped {\n+ panic(fmt.Sprintf(\"invalid state transition: %v => %v\", c.Status, s))\n+ }\n+ c.Sandbox = nil\n+\n+ default:\n+ panic(fmt.Sprintf(\"invalid new state: %v\", s))\n+ }\n+ c.Status = s\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "@@ -378,12 +378,15 @@ func TestMultiContainerSignal(t *testing.T) {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n}\n+ // goferPid is reset when container is destroyed.\n+ goferPid := containers[1].GoferPid\n+\n// Destroy container and ensure container's gofer process has exited.\nif err := containers[1].Destroy(); err != nil {\nt.Errorf(\"failed to destroy container: %v\", err)\n}\n_, _, err = testutil.RetryEintr(func() (uintptr, uintptr, error) {\n- cpid, err := syscall.Wait4(containers[1].GoferPid, nil, 0, nil)\n+ cpid, err := syscall.Wait4(goferPid, nil, 0, nil)\nreturn uintptr(cpid), 0, err\n})\nif err != nil && err != syscall.ECHILD {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Added state machine checks for Container.Status
For my own sanitity when thinking about possible transitions and state.
PiperOrigin-RevId: 213559482
Change-Id: I25588c86cf6098be4eda01f4e7321c102ceef33c |
259,942 | 19.09.2018 13:42:55 | 25,200 | 2e497de2d9f6c410a214faae9962e762757b0648 | Pass local link address to DeliverNetworkPacket
This allows a NetworkDispatcher to implement transparent bridging,
assuming all implementations of LinkEndpoint.WritePacket call eth.Encode
with header.EthernetFields.SrcAddr set to the passed
Route.LocalLinkAddress, if it is provided. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/channel/channel.go",
"new_path": "pkg/tcpip/link/channel/channel.go",
"diff": "@@ -72,7 +72,7 @@ func (e *Endpoint) Inject(protocol tcpip.NetworkProtocolNumber, vv buffer.Vector\n// InjectLinkAddr injects an inbound packet with a remote link address.\nfunc (e *Endpoint) InjectLinkAddr(protocol tcpip.NetworkProtocolNumber, remoteLinkAddr tcpip.LinkAddress, vv buffer.VectorisedView) {\n- e.dispatcher.DeliverNetworkPacket(e, remoteLinkAddr, protocol, vv.Clone(nil))\n+ e.dispatcher.DeliverNetworkPacket(e, remoteLinkAddr, \"\" /* localLinkAddr */, protocol, vv.Clone(nil))\n}\n// Attach saves the stack network-layer dispatcher for use later when packets\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint.go",
"diff": "@@ -164,17 +164,24 @@ func (e *endpoint) WritePacket(r *stack.Route, hdr buffer.Prependable, payload b\nviews[0] = hdr.View()\nviews = append(views, payload.Views()...)\nvv := buffer.NewVectorisedView(len(views[0])+payload.Size(), views)\n- e.dispatcher.DeliverNetworkPacket(e, r.RemoteLinkAddress, protocol, vv)\n+ e.dispatcher.DeliverNetworkPacket(e, r.RemoteLinkAddress, r.LocalLinkAddress, protocol, vv)\nreturn nil\n}\nif e.hdrSize > 0 {\n// Add ethernet header if needed.\neth := header.Ethernet(hdr.Prepend(header.EthernetMinimumSize))\n- eth.Encode(&header.EthernetFields{\n+ ethHdr := &header.EthernetFields{\nDstAddr: r.RemoteLinkAddress,\n- SrcAddr: e.addr,\nType: protocol,\n- })\n+ }\n+\n+ // Preserve the src address if it's set in the route.\n+ if r.LocalLinkAddress != \"\" {\n+ ethHdr.SrcAddr = r.LocalLinkAddress\n+ } else {\n+ ethHdr.SrcAddr = e.addr\n+ }\n+ eth.Encode(ethHdr)\n}\nif payload.Size() == 0 {\n@@ -223,12 +230,15 @@ func (e *endpoint) dispatch(largeV buffer.View) (bool, *tcpip.Error) {\nreturn false, nil\n}\n- var p tcpip.NetworkProtocolNumber\n- var addr tcpip.LinkAddress\n+ var (\n+ p tcpip.NetworkProtocolNumber\n+ remoteLinkAddr, localLinkAddr tcpip.LinkAddress\n+ )\nif e.hdrSize > 0 {\neth := header.Ethernet(e.views[0])\np = eth.Type()\n- addr = eth.SourceAddress()\n+ remoteLinkAddr = eth.SourceAddress()\n+ localLinkAddr = eth.DestinationAddress()\n} else {\n// We don't get any indication of what the packet is, so try to guess\n// if it's an IPv4 or IPv6 packet.\n@@ -246,7 +256,7 @@ func (e *endpoint) dispatch(largeV buffer.View) (bool, *tcpip.Error) {\nvv := buffer.NewVectorisedView(n, e.views[:used])\nvv.TrimFront(e.hdrSize)\n- e.dispatcher.DeliverNetworkPacket(e, addr, p, vv)\n+ e.dispatcher.DeliverNetworkPacket(e, remoteLinkAddr, localLinkAddr, p, vv)\n// Prepare e.views for another packet: release used views.\nfor i := 0; i < used; i++ {\n@@ -287,7 +297,7 @@ func (e *InjectableEndpoint) Attach(dispatcher stack.NetworkDispatcher) {\n// Inject injects an inbound packet.\nfunc (e *InjectableEndpoint) Inject(protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\n- e.dispatcher.DeliverNetworkPacket(e, \"\", protocol, vv)\n+ e.dispatcher.DeliverNetworkPacket(e, \"\" /* remoteLinkAddr */, \"\" /* localLinkAddr */, protocol, vv)\n}\n// NewInjectable creates a new fd-based InjectableEndpoint.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint_test.go",
"diff": "@@ -31,6 +31,13 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/stack\"\n)\n+const (\n+ mtu = 1500\n+ laddr = tcpip.LinkAddress(\"\\x11\\x22\\x33\\x44\\x55\\x66\")\n+ raddr = tcpip.LinkAddress(\"\\x77\\x88\\x99\\xaa\\xbb\\xcc\")\n+ proto = 10\n+)\n+\ntype packetInfo struct {\nraddr tcpip.LinkAddress\nproto tcpip.NetworkProtocolNumber\n@@ -78,12 +85,11 @@ func (c *context) cleanup() {\nsyscall.Close(c.fds[1])\n}\n-func (c *context) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\n+func (c *context) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, localLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\nc.ch <- packetInfo{remoteLinkAddr, protocol, vv.ToView()}\n}\nfunc TestNoEthernetProperties(t *testing.T) {\n- const mtu = 1500\nc := newContext(t, &Options{MTU: mtu})\ndefer c.cleanup()\n@@ -97,7 +103,6 @@ func TestNoEthernetProperties(t *testing.T) {\n}\nfunc TestEthernetProperties(t *testing.T) {\n- const mtu = 1500\nc := newContext(t, &Options{EthernetHeader: true, MTU: mtu})\ndefer c.cleanup()\n@@ -111,7 +116,6 @@ func TestEthernetProperties(t *testing.T) {\n}\nfunc TestAddress(t *testing.T) {\n- const mtu = 1500\naddrs := []tcpip.LinkAddress{\"\", \"abc\", \"def\"}\nfor _, a := range addrs {\nt.Run(fmt.Sprintf(\"Address: %q\", a), func(t *testing.T) {\n@@ -126,13 +130,6 @@ func TestAddress(t *testing.T) {\n}\nfunc TestWritePacket(t *testing.T) {\n- const (\n- mtu = 1500\n- laddr = tcpip.LinkAddress(\"\\x11\\x22\\x33\\x44\\x55\\x66\")\n- raddr = tcpip.LinkAddress(\"\\x77\\x88\\x99\\xaa\\xbb\\xcc\")\n- proto = 10\n- )\n-\nlengths := []int{0, 100, 1000}\neths := []bool{true, false}\n@@ -197,14 +194,40 @@ func TestWritePacket(t *testing.T) {\n}\n}\n-func TestDeliverPacket(t *testing.T) {\n- const (\n- mtu = 1500\n- laddr = tcpip.LinkAddress(\"\\x11\\x22\\x33\\x44\\x55\\x66\")\n- raddr = tcpip.LinkAddress(\"\\x77\\x88\\x99\\xaa\\xbb\\xcc\")\n- proto = 10\n- )\n+func TestPreserveSrcAddress(t *testing.T) {\n+ baddr := tcpip.LinkAddress(\"\\xcc\\xbb\\xaa\\x77\\x88\\x99\")\n+ c := newContext(t, &Options{Address: laddr, MTU: mtu, EthernetHeader: true})\n+ defer c.cleanup()\n+\n+ // Set LocalLinkAddress in route to the value of the bridged address.\n+ r := &stack.Route{\n+ RemoteLinkAddress: raddr,\n+ LocalLinkAddress: baddr,\n+ }\n+\n+ // WritePacket panics given a prependable with anything less than\n+ // the minimum size of the ethernet header.\n+ hdr := buffer.NewPrependable(header.EthernetMinimumSize)\n+ if err := c.ep.WritePacket(r, hdr, buffer.VectorisedView{}, proto); err != nil {\n+ t.Fatalf(\"WritePacket failed: %v\", err)\n+ }\n+\n+ // Read from the FD, then compare with what we wrote.\n+ b := make([]byte, mtu)\n+ n, err := syscall.Read(c.fds[0], b)\n+ if err != nil {\n+ t.Fatalf(\"Read failed: %v\", err)\n+ }\n+ b = b[:n]\n+ h := header.Ethernet(b)\n+\n+ if a := h.SourceAddress(); a != baddr {\n+ t.Fatalf(\"SourceAddress() = %v, want %v\", a, baddr)\n+ }\n+}\n+\n+func TestDeliverPacket(t *testing.T) {\nlengths := []int{100, 1000}\neths := []bool{true, false}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/loopback/loopback.go",
"new_path": "pkg/tcpip/link/loopback/loopback.go",
"diff": "@@ -77,7 +77,11 @@ func (e *endpoint) WritePacket(_ *stack.Route, hdr buffer.Prependable, payload b\nviews[0] = hdr.View()\nviews = append(views, payload.Views()...)\nvv := buffer.NewVectorisedView(len(views[0])+payload.Size(), views)\n- e.dispatcher.DeliverNetworkPacket(e, \"\", protocol, vv)\n+\n+ // Because we're immediately turning around and writing the packet back to the\n+ // rx path, we intentionally don't preserve the remote and local link\n+ // addresses from the stack.Route we're passed.\n+ e.dispatcher.DeliverNetworkPacket(e, \"\" /* remoteLinkAddr */, \"\" /* localLinkAddr */, protocol, vv)\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"diff": "@@ -187,11 +187,16 @@ func (e *endpoint) LinkAddress() tcpip.LinkAddress {\nfunc (e *endpoint) WritePacket(r *stack.Route, hdr buffer.Prependable, payload buffer.VectorisedView, protocol tcpip.NetworkProtocolNumber) *tcpip.Error {\n// Add the ethernet header here.\neth := header.Ethernet(hdr.Prepend(header.EthernetMinimumSize))\n- eth.Encode(&header.EthernetFields{\n+ ethHdr := &header.EthernetFields{\nDstAddr: r.RemoteLinkAddress,\n- SrcAddr: e.addr,\nType: protocol,\n- })\n+ }\n+ if r.LocalLinkAddress != \"\" {\n+ ethHdr.SrcAddr = r.LocalLinkAddress\n+ } else {\n+ ethHdr.SrcAddr = e.addr\n+ }\n+ eth.Encode(ethHdr)\nv := payload.ToView()\n// Transmit the packet.\n@@ -248,7 +253,7 @@ func (e *endpoint) dispatchLoop(d stack.NetworkDispatcher) {\n// Send packet up the stack.\neth := header.Ethernet(b)\n- d.DeliverNetworkPacket(e, eth.SourceAddress(), eth.Type(), buffer.View(b[header.EthernetMinimumSize:]).ToVectorisedView())\n+ d.DeliverNetworkPacket(e, eth.SourceAddress(), eth.DestinationAddress(), eth.Type(), buffer.View(b[header.EthernetMinimumSize:]).ToVectorisedView())\n}\n// Clean state.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem_test.go",
"diff": "package sharedmem\nimport (\n+ \"bytes\"\n\"io/ioutil\"\n\"math/rand\"\n\"os\"\n\"reflect\"\n+ \"strings\"\n\"sync\"\n\"syscall\"\n\"testing\"\n@@ -129,10 +131,10 @@ func newTestContext(t *testing.T, mtu, bufferSize uint32, addr tcpip.LinkAddress\nreturn c\n}\n-func (c *testContext) DeliverNetworkPacket(_ stack.LinkEndpoint, remoteAddr tcpip.LinkAddress, proto tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\n+func (c *testContext) DeliverNetworkPacket(_ stack.LinkEndpoint, remoteLinkAddr, localLinkAddr tcpip.LinkAddress, proto tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\nc.mu.Lock()\nc.packets = append(c.packets, packetInfo{\n- addr: remoteAddr,\n+ addr: remoteLinkAddr,\nproto: proto,\nvv: vv.Clone(nil),\n})\n@@ -259,6 +261,7 @@ func TestSimpleSend(t *testing.T) {\n}\nfor iters := 1000; iters > 0; iters-- {\n+ func() {\n// Prepare and send packet.\nn := rand.Intn(10000)\nhdr := buffer.NewPrependable(n + int(c.ep.MaxHeaderLength()))\n@@ -277,6 +280,9 @@ func TestSimpleSend(t *testing.T) {\n// Receive packet.\ndesc := c.txq.tx.Pull()\npi := queue.DecodeTxPacketHeader(desc)\n+ if pi.Reserved != 0 {\n+ t.Fatalf(\"Reserved value is non-zero: 0x%x\", pi.Reserved)\n+ }\ncontents := make([]byte, 0, pi.Size)\nfor i := 0; i < pi.BufferCount; i++ {\nbi := queue.DecodeTxBufferHeader(desc, i)\n@@ -284,18 +290,21 @@ func TestSimpleSend(t *testing.T) {\n}\nc.txq.tx.Flush()\n- if pi.Reserved != 0 {\n- t.Fatalf(\"Reserved value is non-zero: 0x%x\", pi.Reserved)\n- }\n+ defer func() {\n+ // Tell the endpoint about the completion of the write.\n+ b := c.txq.rx.Push(8)\n+ queue.EncodeTxCompletion(b, pi.ID)\n+ c.txq.rx.Flush()\n+ }()\n- // Check the thernet header.\n+ // Check the ethernet header.\nethTemplate := make(header.Ethernet, header.EthernetMinimumSize)\nethTemplate.Encode(&header.EthernetFields{\nSrcAddr: localLinkAddr,\nDstAddr: remoteLinkAddr,\nType: proto,\n})\n- if got := contents[:header.EthernetMinimumSize]; !reflect.DeepEqual(got, []byte(ethTemplate)) {\n+ if got := contents[:header.EthernetMinimumSize]; !bytes.Equal(got, []byte(ethTemplate)) {\nt.Fatalf(\"Bad ethernet header in packet: got %x, want %x\", got, ethTemplate)\n}\n@@ -307,14 +316,65 @@ func TestSimpleSend(t *testing.T) {\n}\ncontents = contents[:pi.Size][header.EthernetMinimumSize:]\n- if !reflect.DeepEqual(contents, merged) {\n+ if !bytes.Equal(contents, merged) {\nt.Fatalf(\"Buffers are different: got %x (%v bytes), want %x (%v bytes)\", contents, len(contents), merged, len(merged))\n}\n+ }()\n+ }\n+}\n+\n+// TestPreserveSrcAddressInSend calls WritePacket once with LocalLinkAddress\n+// set in Route (using much of the same code as TestSimpleSend), then checks\n+// that the encoded ethernet header received includes the correct SrcAddr.\n+func TestPreserveSrcAddressInSend(t *testing.T) {\n+ c := newTestContext(t, 20000, 1500, localLinkAddr)\n+ defer c.cleanup()\n+\n+ newLocalLinkAddress := tcpip.LinkAddress(strings.Repeat(\"0xFE\", 6))\n+ // Set both remote and local link address in route.\n+ r := stack.Route{\n+ RemoteLinkAddress: remoteLinkAddr,\n+ LocalLinkAddress: newLocalLinkAddress,\n+ }\n+ // WritePacket panics given a prependable with anything less than\n+ // the minimum size of the ethernet header.\n+ hdr := buffer.NewPrependable(header.EthernetMinimumSize)\n+\n+ proto := tcpip.NetworkProtocolNumber(rand.Intn(0x10000))\n+ if err := c.ep.WritePacket(&r, hdr, buffer.VectorisedView{}, proto); err != nil {\n+ t.Fatalf(\"WritePacket failed: %v\", err)\n+ }\n+\n+ // Receive packet.\n+ desc := c.txq.tx.Pull()\n+ pi := queue.DecodeTxPacketHeader(desc)\n+ if pi.Reserved != 0 {\n+ t.Fatalf(\"Reserved value is non-zero: 0x%x\", pi.Reserved)\n+ }\n+ contents := make([]byte, 0, pi.Size)\n+ for i := 0; i < pi.BufferCount; i++ {\n+ bi := queue.DecodeTxBufferHeader(desc, i)\n+ contents = append(contents, c.txq.data[bi.Offset:][:bi.Size]...)\n+ }\n+ c.txq.tx.Flush()\n+\n+ defer func() {\n// Tell the endpoint about the completion of the write.\nb := c.txq.rx.Push(8)\nqueue.EncodeTxCompletion(b, pi.ID)\nc.txq.rx.Flush()\n+ }()\n+\n+ // Check that the ethernet header contains the expected SrcAddr.\n+ ethTemplate := make(header.Ethernet, header.EthernetMinimumSize)\n+ ethTemplate.Encode(&header.EthernetFields{\n+ SrcAddr: newLocalLinkAddress,\n+ DstAddr: remoteLinkAddr,\n+ Type: proto,\n+ })\n+ if got := contents[:header.EthernetMinimumSize]; !bytes.Equal(got, []byte(ethTemplate)) {\n+ t.Fatalf(\"Bad ethernet header in packet: got %x, want %x\", got, ethTemplate)\n}\n}\n@@ -583,7 +643,7 @@ func TestSimpleReceive(t *testing.T) {\nc.mu.Unlock()\ncontents = contents[header.EthernetMinimumSize:]\n- if !reflect.DeepEqual(contents, rcvd) {\n+ if !bytes.Equal(contents, rcvd) {\nt.Fatalf(\"Unexpected buffer contents: got %x, want %x\", rcvd, contents)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sniffer/sniffer.go",
"new_path": "pkg/tcpip/link/sniffer/sniffer.go",
"diff": "@@ -116,7 +116,7 @@ func NewWithFile(lower tcpip.LinkEndpointID, file *os.File, snapLen uint32) (tcp\n// DeliverNetworkPacket implements the stack.NetworkDispatcher interface. It is\n// called by the link-layer endpoint being wrapped when a packet arrives, and\n// logs the packet before forwarding to the actual dispatcher.\n-func (e *endpoint) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\n+func (e *endpoint) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAddr, localLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\nif atomic.LoadUint32(&LogPackets) == 1 && e.file == nil {\nlogPacket(\"recv\", protocol, vv.First())\n}\n@@ -147,7 +147,7 @@ func (e *endpoint) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAdd\npanic(err)\n}\n}\n- e.dispatcher.DeliverNetworkPacket(e, remoteLinkAddr, protocol, vv)\n+ e.dispatcher.DeliverNetworkPacket(e, remoteLinkAddr, localLinkAddr, protocol, vv)\n}\n// Attach implements the stack.LinkEndpoint interface. It saves the dispatcher\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/waitable/waitable.go",
"new_path": "pkg/tcpip/link/waitable/waitable.go",
"diff": "@@ -51,12 +51,12 @@ func New(lower tcpip.LinkEndpointID) (tcpip.LinkEndpointID, *Endpoint) {\n// It is called by the link-layer endpoint being wrapped when a packet arrives,\n// and only forwards to the actual dispatcher if Wait or WaitDispatch haven't\n// been called.\n-func (e *Endpoint) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\n+func (e *Endpoint) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAddr, localLinkAddress tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\nif !e.dispatchGate.Enter() {\nreturn\n}\n- e.dispatcher.DeliverNetworkPacket(e, remoteLinkAddr, protocol, vv)\n+ e.dispatcher.DeliverNetworkPacket(e, remoteLinkAddr, localLinkAddress, protocol, vv)\ne.dispatchGate.Leave()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/waitable/waitable_test.go",
"new_path": "pkg/tcpip/link/waitable/waitable_test.go",
"diff": "@@ -35,7 +35,7 @@ type countedEndpoint struct {\ndispatcher stack.NetworkDispatcher\n}\n-func (e *countedEndpoint) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\n+func (e *countedEndpoint) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAddr, localLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\ne.dispatchCount++\n}\n@@ -106,21 +106,21 @@ func TestWaitDispatch(t *testing.T) {\n}\n// Dispatch and check that it goes through.\n- ep.dispatcher.DeliverNetworkPacket(ep, \"\", 0, buffer.VectorisedView{})\n+ ep.dispatcher.DeliverNetworkPacket(ep, \"\", \"\", 0, buffer.VectorisedView{})\nif want := 1; ep.dispatchCount != want {\nt.Fatalf(\"Unexpected dispatchCount: got=%v, want=%v\", ep.dispatchCount, want)\n}\n// Wait on writes, then try to dispatch. It must go through.\nwep.WaitWrite()\n- ep.dispatcher.DeliverNetworkPacket(ep, \"\", 0, buffer.VectorisedView{})\n+ ep.dispatcher.DeliverNetworkPacket(ep, \"\", \"\", 0, buffer.VectorisedView{})\nif want := 2; ep.dispatchCount != want {\nt.Fatalf(\"Unexpected dispatchCount: got=%v, want=%v\", ep.dispatchCount, want)\n}\n// Wait on dispatches, then try to dispatch. It must not go through.\nwep.WaitDispatch()\n- ep.dispatcher.DeliverNetworkPacket(ep, \"\", 0, buffer.VectorisedView{})\n+ ep.dispatcher.DeliverNetworkPacket(ep, \"\", \"\", 0, buffer.VectorisedView{})\nif want := 2; ep.dispatchCount != want {\nt.Fatalf(\"Unexpected dispatchCount: got=%v, want=%v\", ep.dispatchCount, want)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -391,7 +391,7 @@ func (n *NIC) RemoveAddress(addr tcpip.Address) *tcpip.Error {\n// Note that the ownership of the slice backing vv is retained by the caller.\n// This rule applies only to the slice itself, not to the items of the slice;\n// the ownership of the items is not retained by the caller.\n-func (n *NIC) DeliverNetworkPacket(linkEP LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\n+func (n *NIC) DeliverNetworkPacket(linkEP LinkEndpoint, remoteLinkAddr, localLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView) {\nnetProto, ok := n.stack.networkProtocols[protocol]\nif !ok {\nn.stack.stats.UnknownProtocolRcvdPackets.Increment()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -196,7 +196,7 @@ type NetworkProtocol interface {\ntype NetworkDispatcher interface {\n// DeliverNetworkPacket finds the appropriate network protocol\n// endpoint and hands the packet over for further processing.\n- DeliverNetworkPacket(linkEP LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView)\n+ DeliverNetworkPacket(linkEP LinkEndpoint, dstLinkAddr, srcLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv buffer.VectorisedView)\n}\n// LinkEndpointCapabilities is the type associated with the capabilities\n@@ -238,6 +238,10 @@ type LinkEndpoint interface {\n// WritePacket writes a packet with the given protocol through the given\n// route.\n+ //\n+ // To participate in transparent bridging, a LinkEndpoint implementation\n+ // should call eth.Encode with header.EthernetFields.SrcAddr set to\n+ // r.LocalLinkAddress if it is provided.\nWritePacket(r *Route, hdr buffer.Prependable, payload buffer.VectorisedView, protocol tcpip.NetworkProtocolNumber) *tcpip.Error\n// Attach attaches the data link layer endpoint to the network-layer\n"
}
] | Go | Apache License 2.0 | google/gvisor | Pass local link address to DeliverNetworkPacket
This allows a NetworkDispatcher to implement transparent bridging,
assuming all implementations of LinkEndpoint.WritePacket call eth.Encode
with header.EthernetFields.SrcAddr set to the passed
Route.LocalLinkAddress, if it is provided.
PiperOrigin-RevId: 213686651
Change-Id: I446a4ac070970202f0724ef796ff1056ae4dd72a |
259,992 | 19.09.2018 17:14:20 | 25,200 | e3952733011df912ecaa48974832a054a45c345a | Fix sandbox and gofer capabilities
Capabilities.Set() adds capabilities,
but doesn't remove existing ones that might have been loaded. Fixed
the code and added tests. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -428,13 +428,13 @@ func parseAndFilterOptions(opts []string, allowedKeys ...string) ([]string, erro\nkv := strings.Split(o, \"=\")\nswitch len(kv) {\ncase 1:\n- if contains(allowedKeys, o) {\n+ if specutils.ContainsStr(allowedKeys, o) {\nout = append(out, o)\ncontinue\n}\nlog.Warningf(\"ignoring unsupported key %q\", kv)\ncase 2:\n- if contains(allowedKeys, kv[0]) {\n+ if specutils.ContainsStr(allowedKeys, kv[0]) {\nout = append(out, o)\ncontinue\n}\n@@ -540,15 +540,6 @@ func mountFlags(opts []string) fs.MountSourceFlags {\nreturn mf\n}\n-func contains(strs []string, str string) bool {\n- for _, s := range strs {\n- if s == str {\n- return true\n- }\n- }\n- return false\n-}\n-\nfunc mustFindFilesystem(name string) fs.Filesystem {\nfs, ok := fs.FindFilesystem(name)\nif !ok {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/BUILD",
"new_path": "runsc/cmd/BUILD",
"diff": "@@ -55,18 +55,27 @@ go_test(\nname = \"cmd_test\",\nsize = \"small\",\nsrcs = [\n+ \"capability_test.go\",\n\"delete_test.go\",\n\"exec_test.go\",\n],\n+ data = [\n+ \"//runsc\",\n+ ],\nembed = [\":cmd\"],\ndeps = [\n\"//pkg/abi/linux\",\n+ \"//pkg/log\",\n\"//pkg/sentry/control\",\n\"//pkg/sentry/kernel/auth\",\n\"//pkg/urpc\",\n\"//runsc/boot\",\n+ \"//runsc/container\",\n+ \"//runsc/specutils\",\n+ \"//runsc/test/testutil\",\n\"@com_github_google_go-cmp//cmp:go_default_library\",\n\"@com_github_google_go-cmp//cmp/cmpopts:go_default_library\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n+ \"@com_github_syndtr_gocapability//capability:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/capability.go",
"new_path": "runsc/cmd/capability.go",
"diff": "@@ -16,56 +16,67 @@ package cmd\nimport (\n\"fmt\"\n- \"os\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"github.com/syndtr/gocapability/capability\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n)\n+var allCapTypes = []capability.CapType{\n+ capability.BOUNDS,\n+ capability.EFFECTIVE,\n+ capability.PERMITTED,\n+ capability.INHERITABLE,\n+ capability.AMBIENT,\n+}\n+\n// applyCaps applies the capabilities in the spec to the current thread.\n//\n// Note that it must be called with current thread locked.\nfunc applyCaps(caps *specs.LinuxCapabilities) error {\n- setter, err := capability.NewPid2(os.Getpid())\n+ // Load current capabilities to trim the ones not permitted.\n+ curCaps, err := capability.NewPid2(0)\nif err != nil {\nreturn err\n}\n- if err := setter.Load(); err != nil {\n+ if err := curCaps.Load(); err != nil {\nreturn err\n}\n- bounding, err := trimCaps(caps.Bounding, setter)\n+ // Create an empty capability set to populate.\n+ newCaps, err := capability.NewPid2(0)\nif err != nil {\nreturn err\n}\n- setter.Set(capability.BOUNDS, bounding...)\n- effective, err := trimCaps(caps.Effective, setter)\n- if err != nil {\n- return err\n+ for _, c := range allCapTypes {\n+ if !newCaps.Empty(c) {\n+ panic(\"unloaded capabilities must be empty\")\n}\n- setter.Set(capability.EFFECTIVE, effective...)\n-\n- permitted, err := trimCaps(caps.Permitted, setter)\n+ set, err := trimCaps(getCaps(c, caps), curCaps)\nif err != nil {\nreturn err\n}\n- setter.Set(capability.PERMITTED, permitted...)\n-\n- inheritable, err := trimCaps(caps.Inheritable, setter)\n- if err != nil {\n- return err\n+ newCaps.Set(c, set...)\n}\n- setter.Set(capability.INHERITABLE, inheritable...)\n- ambient, err := trimCaps(caps.Ambient, setter)\n- if err != nil {\n- return err\n+ return newCaps.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS)\n}\n- setter.Set(capability.AMBIENT, ambient...)\n- return setter.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS)\n+func getCaps(which capability.CapType, caps *specs.LinuxCapabilities) []string {\n+ switch which {\n+ case capability.BOUNDS:\n+ return caps.Bounding\n+ case capability.EFFECTIVE:\n+ return caps.Effective\n+ case capability.PERMITTED:\n+ return caps.Permitted\n+ case capability.INHERITABLE:\n+ return caps.Inheritable\n+ case capability.AMBIENT:\n+ return caps.Ambient\n+ }\n+ panic(fmt.Sprint(\"invalid capability type:\", which))\n}\nfunc trimCaps(names []string, setter capability.Capabilities) ([]capability.Cap, error) {\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/cmd/capability_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package cmd\n+\n+import (\n+ \"fmt\"\n+ \"os\"\n+ \"testing\"\n+\n+ specs \"github.com/opencontainers/runtime-spec/specs-go\"\n+ \"github.com/syndtr/gocapability/capability\"\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/runsc/boot\"\n+ \"gvisor.googlesource.com/gvisor/runsc/container\"\n+ \"gvisor.googlesource.com/gvisor/runsc/specutils\"\n+ \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n+)\n+\n+func init() {\n+ log.SetLevel(log.Debug)\n+ if err := testutil.ConfigureExePath(); err != nil {\n+ panic(err.Error())\n+ }\n+}\n+\n+func checkProcessCaps(pid int, wantCaps *specs.LinuxCapabilities) error {\n+ curCaps, err := capability.NewPid2(pid)\n+ if err != nil {\n+ return fmt.Errorf(\"capability.NewPid2(%d) failed: %v\", pid, err)\n+ }\n+ if err := curCaps.Load(); err != nil {\n+ return fmt.Errorf(\"unable to load capabilities: %v\", err)\n+ }\n+ fmt.Printf(\"Capabilities (PID: %d): %v\\n\", pid, curCaps)\n+\n+ for _, c := range allCapTypes {\n+ if err := checkCaps(c, curCaps, wantCaps); err != nil {\n+ return err\n+ }\n+ }\n+ return nil\n+}\n+\n+func checkCaps(which capability.CapType, curCaps capability.Capabilities, wantCaps *specs.LinuxCapabilities) error {\n+ wantNames := getCaps(which, wantCaps)\n+ for name, c := range capFromName {\n+ want := specutils.ContainsStr(wantNames, name)\n+ got := curCaps.Get(which, c)\n+ if want != got {\n+ if want {\n+ return fmt.Errorf(\"capability %v:%s should be set\", which, name)\n+ }\n+ return fmt.Errorf(\"capability %v:%s should NOT be set\", which, name)\n+ }\n+ }\n+ return nil\n+}\n+\n+func TestCapabilities(t *testing.T) {\n+ stop := testutil.StartReaper()\n+ defer stop()\n+\n+ spec := testutil.NewSpecWithArgs(\"/bin/sleep\", \"10000\")\n+ caps := []string{\n+ \"CAP_CHOWN\",\n+ \"CAP_SYS_PTRACE\", // ptrace is added due to the platform choice.\n+ }\n+ spec.Process.Capabilities = &specs.LinuxCapabilities{\n+ Permitted: caps,\n+ Bounding: caps,\n+ Effective: caps,\n+ Inheritable: caps,\n+ }\n+\n+ conf := testutil.TestConfig()\n+\n+ // Use --network=host to make sandbox use spec's capabilities.\n+ conf.Network = boot.NetworkHost\n+\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ defer os.RemoveAll(bundleDir)\n+\n+ // Create and start the container.\n+ c, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer c.Destroy()\n+ if err := c.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+\n+ // Check that sandbox and gofer have the proper capabilities.\n+ if err := checkProcessCaps(c.Sandbox.Pid, spec.Process.Capabilities); err != nil {\n+ t.Error(err)\n+ }\n+ if err := checkProcessCaps(c.GoferPid, goferCaps); err != nil {\n+ t.Error(err)\n+ }\n+}\n+\n+func TestMain(m *testing.M) {\n+ testutil.RunAsRoot()\n+ os.Exit(m.Run())\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/gofer.go",
"new_path": "runsc/cmd/gofer.go",
"diff": "@@ -31,6 +31,23 @@ import (\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n)\n+var caps = []string{\n+ \"CAP_CHOWN\",\n+ \"CAP_DAC_OVERRIDE\",\n+ \"CAP_DAC_READ_SEARCH\",\n+ \"CAP_FOWNER\",\n+ \"CAP_FSETID\",\n+ \"CAP_SYS_CHROOT\",\n+}\n+\n+// goferCaps is the minimal set of capabilities needed by the Gofer to operate\n+// on files.\n+var goferCaps = &specs.LinuxCapabilities{\n+ Bounding: caps,\n+ Effective: caps,\n+ Permitted: caps,\n+}\n+\n// Gofer implements subcommands.Command for the \"gofer\" command, which starts a\n// filesystem gofer. This command should not be called directly.\ntype Gofer struct {\n@@ -72,25 +89,11 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n}\nif g.applyCaps {\n- // Minimal set of capabilities needed by the Gofer to operate on files.\n- caps := []string{\n- \"CAP_CHOWN\",\n- \"CAP_DAC_OVERRIDE\",\n- \"CAP_DAC_READ_SEARCH\",\n- \"CAP_FOWNER\",\n- \"CAP_FSETID\",\n- }\n- lc := &specs.LinuxCapabilities{\n- Bounding: caps,\n- Effective: caps,\n- Permitted: caps,\n- }\n-\n// Disable caps when calling myself again.\n// Note: minimal argument handling for the default case to keep it simple.\nargs := os.Args\nargs = append(args, \"--apply-caps=false\")\n- if err := setCapsAndCallSelf(args, lc); err != nil {\n+ if err := setCapsAndCallSelf(args, goferCaps); err != nil {\nFatalf(\"Unable to apply caps: %v\", err)\n}\npanic(\"unreachable\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/specutils/specutils.go",
"new_path": "runsc/specutils/specutils.go",
"diff": "@@ -392,3 +392,13 @@ func Mount(src, dst, typ string, flags uint32) error {\n}\nreturn nil\n}\n+\n+// ContainsStr returns true if 'str' is inside 'strs'.\n+func ContainsStr(strs []string, str string) bool {\n+ for _, s := range strs {\n+ if s == str {\n+ return true\n+ }\n+ }\n+ return false\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix sandbox and gofer capabilities
Capabilities.Set() adds capabilities,
but doesn't remove existing ones that might have been loaded. Fixed
the code and added tests.
PiperOrigin-RevId: 213726369
Change-Id: Id7fa6fce53abf26c29b13b9157bb4c6616986fba |
259,854 | 19.09.2018 17:48:24 | 25,200 | 117ac8bc5b4a98cd74c68ac0feed02b5bb4b78b1 | Fix data race on tcp.endpoint.hardError in tcp.(*endpoint).Read
tcp.endpoint.hardError is protected by tcp.endpoint.mu. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -127,7 +127,7 @@ type endpoint struct {\n// hardError is meaningful only when state is stateError, it stores the\n// error to be returned when read/write syscalls are called and the\n- // endpoint is in this state.\n+ // endpoint is in this state. hardError is protected by mu.\nhardError *tcpip.Error `state:\".(string)\"`\n// workerRunning specifies if a worker goroutine is running.\n@@ -447,9 +447,10 @@ func (e *endpoint) Read(*tcpip.FullAddress) (buffer.View, tcpip.ControlMessages,\nbufUsed := e.rcvBufUsed\nif s := e.state; s != stateConnected && s != stateClosed && bufUsed == 0 {\ne.rcvListMu.Unlock()\n+ he := e.hardError\ne.mu.RUnlock()\nif s == stateError {\n- return buffer.View{}, tcpip.ControlMessages{}, e.hardError\n+ return buffer.View{}, tcpip.ControlMessages{}, he\n}\nreturn buffer.View{}, tcpip.ControlMessages{}, tcpip.ErrInvalidEndpointState\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix data race on tcp.endpoint.hardError in tcp.(*endpoint).Read
tcp.endpoint.hardError is protected by tcp.endpoint.mu.
PiperOrigin-RevId: 213730698
Change-Id: I4e4f322ac272b145b500b1a652fbee0c7b985be2 |
259,992 | 19.09.2018 18:16:18 | 25,200 | b873e388f36ee7251059d54a963c27a55be50ab5 | Update gocapability commit to get bug fix | [
{
"change_type": "MODIFY",
"old_path": "WORKSPACE",
"new_path": "WORKSPACE",
"diff": "@@ -79,5 +79,5 @@ go_repository(\ngo_repository(\nname = \"com_github_syndtr_gocapability\",\nimportpath = \"github.com/syndtr/gocapability\",\n- commit = \"33e07d32887e1e06b7c025f27ce52f62c7990bc0\",\n+ commit = \"d98352740cb2c55f81556b63d4a1ec64c5a319c2\",\n)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update gocapability commit to get bug fix
PiperOrigin-RevId: 213734203
Change-Id: I9cf5d3885fb88b41444c686168d4cab00f09988a |
259,891 | 19.09.2018 22:19:10 | 25,200 | ffb5fdd69021713e88ec965e77487b7fc28bc104 | runsc: Fix stdin/stdout/stderr in multi-container mode.
The issue with the previous change was that the stdin/stdout/stderr passed to
the sentry were dup'd by host.ImportFile. This left a dangling FD that by never
closing caused containerd to timeout waiting on container stop. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -193,7 +193,9 @@ type StartArgs struct {\n// CID is the ID of the container to start.\nCID string\n- // FilePayload contains the file descriptor over which the sandbox will\n+ // FilePayload contains, in order:\n+ // * stdin, stdout, and stderr.\n+ // * the file descriptor over which the sandbox will\n// request files from its root filesystem.\nurpc.FilePayload\n}\n@@ -222,8 +224,8 @@ func (cm *containerManager) Start(args *StartArgs, _ *struct{}) error {\nif path.Clean(args.CID) != args.CID {\nreturn fmt.Errorf(\"container ID shouldn't contain directory traversals such as \\\"..\\\": %q\", args.CID)\n}\n- if len(args.FilePayload.Files) == 0 {\n- return fmt.Errorf(\"start arguments must contain at least one file for the container root\")\n+ if len(args.FilePayload.Files) < 4 {\n+ return fmt.Errorf(\"start arguments must contain stdin, stderr, and stdout followed by at least one file for the container root gofer\")\n}\nerr := cm.l.startContainer(cm.l.k, args.Spec, args.Conf, args.CID, args.FilePayload.Files)\n@@ -408,7 +410,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\ncm.l.k = k\n// Set up the restore environment.\n- fds := &fdDispenser{fds: cm.l.ioFDs}\n+ fds := &fdDispenser{fds: cm.l.goferFDs}\nrenv, err := createRestoreEnvironment(cm.l.spec, cm.l.conf, fds)\nif err != nil {\nreturn fmt.Errorf(\"error creating RestoreEnvironment: %v\", err)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fds.go",
"new_path": "runsc/boot/fds.go",
"diff": "@@ -16,7 +16,6 @@ package boot\nimport (\n\"fmt\"\n- \"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n@@ -28,15 +27,20 @@ import (\n// createFDMap creates an fd map that contains stdin, stdout, and stderr. If\n// console is true, then ioctl calls will be passed through to the host fd.\n-func createFDMap(ctx context.Context, k *kernel.Kernel, l *limits.LimitSet, console bool) (*kernel.FDMap, error) {\n+// Upon success, createFDMap dups then closes stdioFDs.\n+func createFDMap(ctx context.Context, k *kernel.Kernel, l *limits.LimitSet, console bool, stdioFDs []int) (*kernel.FDMap, error) {\n+ if len(stdioFDs) != 3 {\n+ return nil, fmt.Errorf(\"stdioFDs should contain exactly 3 FDs (stdin, stdout, and stderr), but %d FDs received\", len(stdioFDs))\n+ }\n+\nfdm := k.NewFDMap()\ndefer fdm.DecRef()\n// Maps sandbox fd to host fd.\nfdMap := map[int]int{\n- 0: syscall.Stdin,\n- 1: syscall.Stdout,\n- 2: syscall.Stderr,\n+ 0: stdioFDs[0],\n+ 1: stdioFDs[1],\n+ 2: stdioFDs[2],\n}\nmounter := fs.FileOwnerFromContext(ctx)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -82,7 +82,7 @@ func (f *fdDispenser) empty() bool {\n// createMountNamespace creates a mount namespace containing the root filesystem\n// and all mounts. 'rootCtx' is used to walk directories to find mount points.\n-func createMountNamespace(userCtx context.Context, rootCtx context.Context, spec *specs.Spec, conf *Config, ioFDs []int) (*fs.MountNamespace, error) {\n+func createMountNamespace(userCtx context.Context, rootCtx context.Context, spec *specs.Spec, conf *Config, goferFDs []int) (*fs.MountNamespace, error) {\nmounts := compileMounts(spec)\nif conf.MultiContainer {\n// Create a tmpfs mount where we create and mount a root filesystem for\n@@ -92,7 +92,7 @@ func createMountNamespace(userCtx context.Context, rootCtx context.Context, spec\nDestination: ChildContainersDir,\n})\n}\n- fds := &fdDispenser{fds: ioFDs}\n+ fds := &fdDispenser{fds: goferFDs}\nrootInode, err := createRootMount(rootCtx, spec, conf, fds, mounts)\nif err != nil {\nreturn nil, fmt.Errorf(\"failed to create root mount: %v\", err)\n@@ -587,14 +587,14 @@ func subtargets(root string, mnts []specs.Mount) []string {\n}\n// setFileSystemForProcess is used to set up the file system and amend the procArgs accordingly.\n-// procArgs are passed by reference and the FDMap field is modified.\n-func setFileSystemForProcess(procArgs *kernel.CreateProcessArgs, spec *specs.Spec, conf *Config, ioFDs []int, console bool, creds *auth.Credentials, ls *limits.LimitSet, k *kernel.Kernel, cid string) error {\n+// procArgs are passed by reference and the FDMap field is modified. It dups stdioFDs.\n+func setFileSystemForProcess(procArgs *kernel.CreateProcessArgs, spec *specs.Spec, conf *Config, stdioFDs, goferFDs []int, console bool, creds *auth.Credentials, ls *limits.LimitSet, k *kernel.Kernel, cid string) error {\nctx := procArgs.NewContext(k)\n// Create the FD map, which will set stdin, stdout, and stderr. If\n// console is true, then ioctl calls will be passed through to the host\n// fd.\n- fdm, err := createFDMap(ctx, k, ls, console)\n+ fdm, err := createFDMap(ctx, k, ls, console, stdioFDs)\nif err != nil {\nreturn fmt.Errorf(\"error importing fds: %v\", err)\n}\n@@ -618,7 +618,7 @@ func setFileSystemForProcess(procArgs *kernel.CreateProcessArgs, spec *specs.Spe\nmns := k.RootMountNamespace()\nif mns == nil {\n// Create the virtual filesystem.\n- mns, err := createMountNamespace(ctx, rootCtx, spec, conf, ioFDs)\n+ mns, err := createMountNamespace(ctx, rootCtx, spec, conf, goferFDs)\nif err != nil {\nreturn fmt.Errorf(\"error creating mounts: %v\", err)\n}\n@@ -630,7 +630,7 @@ func setFileSystemForProcess(procArgs *kernel.CreateProcessArgs, spec *specs.Spe\n// Create the container's root filesystem mount.\nlog.Infof(\"Creating new process in child container.\")\n- fds := &fdDispenser{fds: append([]int{}, ioFDs...)}\n+ fds := &fdDispenser{fds: append([]int{}, goferFDs...)}\nrootInode, err := createRootMount(rootCtx, spec, conf, fds, nil)\nif err != nil {\nreturn fmt.Errorf(\"error creating filesystem for container: %v\", err)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -78,8 +78,11 @@ type Loader struct {\nwatchdog *watchdog.Watchdog\n- // ioFDs are the FDs that attach the sandbox to the gofers.\n- ioFDs []int\n+ // stdioFDs contains stdin, stdout, and stderr.\n+ stdioFDs []int\n+\n+ // goferFDs are the FDs that attach the sandbox to the gofers.\n+ goferFDs []int\n// spec is the base configuration for the root container.\nspec *specs.Spec\n@@ -139,7 +142,7 @@ func init() {\n// New initializes a new kernel loader configured by spec.\n// New also handles setting up a kernel for restoring a container.\n-func New(spec *specs.Spec, conf *Config, controllerFD, deviceFD int, ioFDs []int, console bool) (*Loader, error) {\n+func New(spec *specs.Spec, conf *Config, controllerFD, deviceFD int, goferFDs []int, console bool) (*Loader, error) {\nif err := usage.Init(); err != nil {\nreturn nil, fmt.Errorf(\"Error setting up memory usage: %v\", err)\n}\n@@ -278,7 +281,8 @@ func New(spec *specs.Spec, conf *Config, controllerFD, deviceFD int, ioFDs []int\nconf: conf,\nconsole: console,\nwatchdog: watchdog,\n- ioFDs: ioFDs,\n+ stdioFDs: []int{syscall.Stdin, syscall.Stdout, syscall.Stderr},\n+ goferFDs: goferFDs,\nspec: spec,\nstartSignalForwarding: startSignalForwarding,\nrootProcArgs: procArgs,\n@@ -390,7 +394,8 @@ func (l *Loader) run() error {\n&l.rootProcArgs,\nl.spec,\nl.conf,\n- l.ioFDs,\n+ l.stdioFDs,\n+ l.goferFDs,\nl.console,\nl.rootProcArgs.Credentials,\nl.rootProcArgs.Limits,\n@@ -474,11 +479,14 @@ func (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config\nioFDs = append(ioFDs, fd)\n}\n+ stdioFDs := ioFDs[:3]\n+ goferFDs := ioFDs[3:]\nif err := setFileSystemForProcess(\n&procArgs,\nspec,\nconf,\n- ioFDs,\n+ stdioFDs,\n+ goferFDs,\nfalse,\ncreds,\nprocArgs.Limits,\n@@ -487,6 +495,13 @@ func (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config\nreturn fmt.Errorf(\"failed to create new process: %v\", err)\n}\n+ // setFileSystemForProcess dup'd stdioFDs, so we can close them.\n+ for i, fd := range stdioFDs {\n+ if err := syscall.Close(fd); err != nil {\n+ return fmt.Errorf(\"failed to close stdioFD #%d: %v\", i, fd)\n+ }\n+ }\n+\nctx := procArgs.NewContext(l.k)\nmns := k.RootMountNamespace()\nif err := setExecutablePath(ctx, mns, &procArgs); err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -101,8 +101,8 @@ func (s *Sandbox) StartRoot(spec *specs.Spec, conf *boot.Config) error {\n}\n// Start starts running a non-root container inside the sandbox.\n-func (s *Sandbox) Start(spec *specs.Spec, conf *boot.Config, cid string, ioFiles []*os.File) error {\n- for _, f := range ioFiles {\n+func (s *Sandbox) Start(spec *specs.Spec, conf *boot.Config, cid string, goferFiles []*os.File) error {\n+ for _, f := range goferFiles {\ndefer f.Close()\n}\n@@ -113,12 +113,15 @@ func (s *Sandbox) Start(spec *specs.Spec, conf *boot.Config, cid string, ioFiles\n}\ndefer sandboxConn.Close()\n+ // The payload must container stdin/stdout/stderr followed by gofer\n+ // files.\n+ files := append([]*os.File{os.Stdin, os.Stdout, os.Stderr}, goferFiles...)\n// Start running the container.\nargs := boot.StartArgs{\nSpec: spec,\nConf: conf,\nCID: cid,\n- FilePayload: urpc.FilePayload{Files: ioFiles},\n+ FilePayload: urpc.FilePayload{Files: files},\n}\nif err := sandboxConn.Call(boot.ContainerStart, &args, nil); err != nil {\nreturn fmt.Errorf(\"error starting non-root container %v: %v\", spec.Process.Args, err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Fix stdin/stdout/stderr in multi-container mode.
The issue with the previous change was that the stdin/stdout/stderr passed to
the sentry were dup'd by host.ImportFile. This left a dangling FD that by never
closing caused containerd to timeout waiting on container stop.
PiperOrigin-RevId: 213753032
Change-Id: Ia5e4c0565c42c8610d3b59f65599a5643b0901e4 |
259,992 | 20.09.2018 18:53:02 | 25,200 | b63c4bfe02d1b88eb12d75d0c7051a006d5cbe7d | Set Sandbox.Chroot so it gets cleaned up upon destruction
I've made several attempts to create a test, but the lack of
permission from the test user makes it nearly impossible to
test anything useful. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -95,8 +95,8 @@ type Container struct {\n// be 0 if the gofer has been killed.\nGoferPid int `json:\"goferPid\"`\n- // Sandbox is the sandbox this container is running in. It will be nil\n- // if the container is not in state Running or Created.\n+ // Sandbox is the sandbox this container is running in. It's set when the\n+ // container is created and reset when the sandbox is destroyed.\nSandbox *sandbox.Sandbox `json:\"sandbox\"`\n}\n@@ -136,14 +136,12 @@ func Load(rootDir, id string) (*Container, error) {\n// This is inherently racey.\nif c.Status == Running || c.Status == Created {\n// Check if the sandbox process is still running.\n- if !c.Sandbox.IsRunning() {\n+ if !c.isSandboxRunning() {\n// Sandbox no longer exists, so this container definitely does not exist.\nc.changeStatus(Stopped)\n- c.Sandbox = nil\n} else if c.Status == Running {\n- // Container state should reflect the actual state of\n- // the application, so we don't consider gofer process\n- // here.\n+ // Container state should reflect the actual state of the application, so\n+ // we don't consider gofer process here.\nif err := c.Signal(syscall.Signal(0)); err != nil {\nc.changeStatus(Stopped)\n}\n@@ -288,8 +286,8 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\n// Start starts running the containerized process inside the sandbox.\nfunc (c *Container) Start(conf *boot.Config) error {\nlog.Debugf(\"Start container %q\", c.ID)\n- if c.Status != Created {\n- return fmt.Errorf(\"cannot start container in state %s\", c.Status)\n+ if err := c.requireStatus(\"start\", Created); err != nil {\n+ return err\n}\n// \"If any prestart hook fails, the runtime MUST generate an error,\n@@ -330,11 +328,9 @@ func (c *Container) Start(conf *boot.Config) error {\n// to restore a container from its state file.\nfunc (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile string) error {\nlog.Debugf(\"Restore container %q\", c.ID)\n-\n- if c.Status != Created {\n- return fmt.Errorf(\"cannot restore container in state %s\", c.Status)\n+ if err := c.requireStatus(\"restore\", Created); err != nil {\n+ return err\n}\n-\nif err := c.Sandbox.Restore(c.ID, spec, conf, restoreFile); err != nil {\nreturn err\n}\n@@ -361,8 +357,8 @@ func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocke\n// the newly created process.\nfunc (c *Container) Execute(args *control.ExecArgs) (int32, error) {\nlog.Debugf(\"Execute in container %q, args: %+v\", c.ID, args)\n- if c.Status != Created && c.Status != Running {\n- return 0, fmt.Errorf(\"cannot exec in container in state %s\", c.Status)\n+ if err := c.requireStatus(\"execute in\", Created, Running); err != nil {\n+ return 0, err\n}\nreturn c.Sandbox.Execute(c.ID, args)\n}\n@@ -370,8 +366,8 @@ func (c *Container) Execute(args *control.ExecArgs) (int32, error) {\n// Event returns events for the container.\nfunc (c *Container) Event() (*boot.Event, error) {\nlog.Debugf(\"Getting events for container %q\", c.ID)\n- if c.Status != Running && c.Status != Created {\n- return nil, fmt.Errorf(\"cannot get events for container in state: %s\", c.Status)\n+ if err := c.requireStatus(\"get events for\", Created, Running, Paused); err != nil {\n+ return nil, err\n}\nreturn c.Sandbox.Event(c.ID)\n}\n@@ -379,7 +375,7 @@ func (c *Container) Event() (*boot.Event, error) {\n// Pid returns the Pid of the sandbox the container is running in, or -1 if the\n// container is not running.\nfunc (c *Container) Pid() int {\n- if c.Status != Running && c.Status != Created && c.Status != Paused {\n+ if err := c.requireStatus(\"pid\", Created, Running, Paused); err != nil {\nreturn -1\n}\nreturn c.Sandbox.Pid\n@@ -390,8 +386,8 @@ func (c *Container) Pid() int {\n// and wait returns immediately.\nfunc (c *Container) Wait() (syscall.WaitStatus, error) {\nlog.Debugf(\"Wait on container %q\", c.ID)\n- if c.Sandbox == nil || !c.Sandbox.IsRunning() {\n- return 0, fmt.Errorf(\"container sandbox is not running\")\n+ if !c.isSandboxRunning() {\n+ return 0, fmt.Errorf(\"container is not running\")\n}\nreturn c.Sandbox.Wait(c.ID)\n}\n@@ -400,8 +396,8 @@ func (c *Container) Wait() (syscall.WaitStatus, error) {\n// returns its WaitStatus.\nfunc (c *Container) WaitRootPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) {\nlog.Debugf(\"Wait on pid %d in sandbox %q\", pid, c.Sandbox.ID)\n- if c.Sandbox == nil || !c.Sandbox.IsRunning() {\n- return 0, fmt.Errorf(\"container sandbox is not running\")\n+ if !c.isSandboxRunning() {\n+ return 0, fmt.Errorf(\"container is not running\")\n}\nreturn c.Sandbox.WaitPID(c.Sandbox.ID, pid, clearStatus)\n}\n@@ -410,8 +406,8 @@ func (c *Container) WaitRootPID(pid int32, clearStatus bool) (syscall.WaitStatus\n// its WaitStatus.\nfunc (c *Container) WaitPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) {\nlog.Debugf(\"Wait on pid %d in container %q\", pid, c.ID)\n- if c.Sandbox == nil || !c.Sandbox.IsRunning() {\n- return 0, fmt.Errorf(\"container sandbox is not running\")\n+ if !c.isSandboxRunning() {\n+ return 0, fmt.Errorf(\"container is not running\")\n}\nreturn c.Sandbox.WaitPID(c.ID, pid, clearStatus)\n}\n@@ -421,8 +417,8 @@ func (c *Container) WaitPID(pid int32, clearStatus bool) (syscall.WaitStatus, er\n// TODO: Distinguish different error types.\nfunc (c *Container) Signal(sig syscall.Signal) error {\nlog.Debugf(\"Signal container %q: %v\", c.ID, sig)\n- if c.Status == Stopped {\n- return fmt.Errorf(\"container sandbox is stopped\")\n+ if err := c.requireStatus(\"running\", Running); err != nil {\n+ return err\n}\n// TODO: Query the container for its state, then save it.\nreturn c.Sandbox.Signal(c.ID, sig)\n@@ -432,8 +428,8 @@ func (c *Container) Signal(sig syscall.Signal) error {\n// The statefile will be written to f, the file at the specified image-path.\nfunc (c *Container) Checkpoint(f *os.File) error {\nlog.Debugf(\"Checkpoint container %q\", c.ID)\n- if c.Status == Stopped {\n- return fmt.Errorf(\"container sandbox is stopped\")\n+ if err := c.requireStatus(\"checkpoint\", Created, Running, Paused); err != nil {\n+ return err\n}\nreturn c.Sandbox.Checkpoint(c.ID, f)\n}\n@@ -484,8 +480,8 @@ func (c *Container) State() specs.State {\n// Processes retrieves the list of processes and associated metadata inside a\n// container.\nfunc (c *Container) Processes() ([]*control.Process, error) {\n- if c.Status != Running && c.Status != Paused {\n- return nil, fmt.Errorf(\"cannot get processes of container %q because it isn't running. It is in state %v\", c.ID, c.Status)\n+ if err := c.requireStatus(\"get processes of\", Running, Paused); err != nil {\n+ return nil, err\n}\nreturn c.Sandbox.Processes(c.ID)\n}\n@@ -544,11 +540,13 @@ func (c *Container) save() error {\n// root containers), and waits for the container or sandbox and the gofer\n// to stop. If any of them doesn't stop before timeout, an error is returned.\nfunc (c *Container) stop() error {\n- if c.Sandbox != nil && c.Sandbox.IsRunning() {\n+ if c.Sandbox != nil {\nlog.Debugf(\"Destroying container %q\", c.ID)\nif err := c.Sandbox.DestroyContainer(c.ID); err != nil {\nreturn fmt.Errorf(\"error destroying container %q: %v\", c.ID, err)\n}\n+ // Only set sandbox to nil after it has been told to destroy the container.\n+ c.Sandbox = nil\n}\n// Try killing gofer if it does not exit with container.\n@@ -567,7 +565,7 @@ func (c *Container) waitForStopped() error {\ndefer cancel()\nb := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\nop := func() error {\n- if c.Sandbox != nil && c.Sandbox.IsRunning() {\n+ if c.isSandboxRunning() {\nif err := c.Signal(syscall.Signal(0)); err == nil {\nreturn fmt.Errorf(\"container is still running\")\n}\n@@ -689,3 +687,16 @@ func (c *Container) changeStatus(s Status) {\n}\nc.Status = s\n}\n+\n+func (c *Container) isSandboxRunning() bool {\n+ return c.Sandbox != nil && c.Sandbox.IsRunning()\n+}\n+\n+func (c *Container) requireStatus(action string, statuses ...Status) error {\n+ for _, s := range statuses {\n+ if c.Status == s {\n+ return nil\n+ }\n+ }\n+ return fmt.Errorf(\"cannot %s container %q in state %s\", action, c.ID, c.Status)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/chroot.go",
"new_path": "runsc/sandbox/chroot.go",
"diff": "@@ -74,6 +74,8 @@ func setUpChroot() (string, error) {\n// tearDownChroot unmounts /proc and /runsc from the chroot before deleting the\n// directory.\nfunc tearDownChroot(chroot string) error {\n+ log.Debugf(\"Removing chroot mounts %q\", chroot)\n+\n// Unmount /proc.\nproc := filepath.Join(chroot, \"proc\")\nif err := syscall.Unmount(proc, 0); err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -451,6 +451,7 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\nif err != nil {\nreturn fmt.Errorf(\"error setting up chroot: %v\", err)\n}\n+ s.Chroot = chroot // Remember path so it can cleaned up.\ncmd.SysProcAttr.Chroot = chroot\ncmd.Args[0] = \"/runsc\"\ncmd.Path = \"/runsc\"\n@@ -549,9 +550,9 @@ func (s *Sandbox) IsRootContainer(cid string) bool {\nreturn s.ID == cid\n}\n-// Destroy frees all resources associated with the sandbox.\n-// Destroy returns error if any step fails, and the function can be safely retried.\n-func (s *Sandbox) Destroy() error {\n+// Destroy frees all resources associated with the sandbox. It fails fast and\n+// is idempotent.\n+func (s *Sandbox) destroy() error {\nlog.Debugf(\"Destroy sandbox %q\", s.ID)\nif s.Pid != 0 {\nlog.Debugf(\"Killing sandbox %q\", s.ID)\n@@ -674,7 +675,12 @@ func (s *Sandbox) Stacks() (string, error) {\nfunc (s *Sandbox) DestroyContainer(cid string) error {\nif s.IsRootContainer(cid) {\nlog.Debugf(\"Destroying root container %q by destroying sandbox\", cid)\n- return s.Destroy()\n+ return s.destroy()\n+ }\n+\n+ if !s.IsRunning() {\n+ // Sandbox isn't running anymore, container is already destroyed.\n+ return nil\n}\nlog.Debugf(\"Destroying container %q in sandbox %q\", cid, s.ID)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Set Sandbox.Chroot so it gets cleaned up upon destruction
I've made several attempts to create a test, but the lack of
permission from the test user makes it nearly impossible to
test anything useful.
PiperOrigin-RevId: 213922174
Change-Id: I5b502ca70cb7a6645f8836f028fb203354b4c625 |
259,992 | 26.09.2018 21:58:54 | 25,200 | fca9a390db4c965b4606dd85838460841bd4ab14 | Return correct parent PID
Old code was returning ID of the thread that created
the child process. It should be returning the ID of
the parent process instead. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/control/proc.go",
"new_path": "pkg/sentry/control/proc.go",
"diff": "@@ -269,11 +269,14 @@ func Processes(k *kernel.Kernel, out *[]*Process) error {\ncontinue\n}\n+ ppid := kernel.ThreadID(0)\n+ if tg.Leader().Parent() != nil {\n+ ppid = ts.Root.IDOfThreadGroup(tg.Leader().Parent().ThreadGroup())\n+ }\n*out = append(*out, &Process{\nUID: tg.Leader().Credentials().EffectiveKUID,\nPID: pid,\n- // If Parent is null (i.e. tg is the init process), PPID will be 0.\n- PPID: ts.Root.IDOfTask(tg.Leader().Parent()),\n+ PPID: ppid,\nSTime: formatStartTime(now, tg.Leader().StartTime()),\nC: percentCPU(tg.CPUStats(), tg.Leader().StartTime(), now),\nTime: tg.CPUStats().SysTime.String(),\n"
}
] | Go | Apache License 2.0 | google/gvisor | Return correct parent PID
Old code was returning ID of the thread that created
the child process. It should be returning the ID of
the parent process instead.
PiperOrigin-RevId: 214720910
Change-Id: I95715c535bcf468ecf1ae771cccd04a4cd345b36 |
259,992 | 27.09.2018 08:57:32 | 25,200 | 6910ff36434f4bc5aa8c6b3094b617c7c92a9803 | Move uds_test_app to common test_app
This was done so it's easier to add more functionality
to this file for other tests. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/BUILD",
"new_path": "runsc/container/BUILD",
"diff": "@@ -2,13 +2,6 @@ package(licenses = [\"notice\"]) # Apache 2.0\nload(\"@io_bazel_rules_go//go:def.bzl\", \"go_binary\", \"go_library\", \"go_test\")\n-go_binary(\n- name = \"uds_test_app\",\n- srcs = [\n- \"uds_test_app.go\",\n- ],\n-)\n-\ngo_library(\nname = \"container\",\nsrcs = [\n@@ -42,7 +35,7 @@ go_test(\n\"multi_container_test.go\",\n],\ndata = [\n- \":uds_test_app\",\n+ \":test_app\",\n\"//runsc\",\n],\nembed = [\":container\"],\n@@ -64,3 +57,9 @@ go_test(\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n+\n+go_binary(\n+ name = \"test_app\",\n+ srcs = [\"test_app.go\"],\n+ deps = [\"@com_github_google_subcommands//:go_default_library\"],\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -722,15 +722,15 @@ func TestUnixDomainSockets(t *testing.T) {\n}\ndefer outputFile.Close()\n- app, err := testutil.FindFile(\"runsc/container/uds_test_app\")\n+ app, err := testutil.FindFile(\"runsc/container/test_app\")\nif err != nil {\n- t.Fatal(\"error finding uds_test_app:\", err)\n+ t.Fatal(\"error finding test_app:\", err)\n}\nsocketPath := filepath.Join(dir, \"uds_socket\")\ndefer os.Remove(socketPath)\n- spec := testutil.NewSpecWithArgs(app, \"--file\", outputPath, \"--socket\", socketPath)\n+ spec := testutil.NewSpecWithArgs(app, \"uds\", \"--file\", outputPath, \"--socket\", socketPath)\nspec.Process.User = specs.User{\nUID: uint32(os.Getuid()),\nGID: uint32(os.Getgid()),\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/container/test_app.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Binary test_app is like a swiss knife for tests that need to run anything\n+// inside the sandbox. New functionality can be added with new commands.\n+package main\n+\n+import (\n+ \"context\"\n+ \"fmt\"\n+ \"log\"\n+ \"net\"\n+ \"os\"\n+ \"strconv\"\n+ \"time\"\n+\n+ \"flag\"\n+ \"github.com/google/subcommands\"\n+)\n+\n+func main() {\n+ subcommands.Register(subcommands.HelpCommand(), \"\")\n+ subcommands.Register(subcommands.FlagsCommand(), \"\")\n+ subcommands.Register(new(uds), \"\")\n+\n+ flag.Parse()\n+\n+ exitCode := subcommands.Execute(context.Background())\n+ os.Exit(int(exitCode))\n+}\n+\n+type uds struct {\n+ fileName string\n+ socketPath string\n+}\n+\n+// Name implements subcommands.Command.Name.\n+func (*uds) Name() string {\n+ return \"uds\"\n+}\n+\n+// Synopsis implements subcommands.Command.Synopsys.\n+func (*uds) Synopsis() string {\n+ return \"creates unix domain socket client and server. Client sends a contant flow of sequential numbers. Server prints them to --file\"\n+}\n+\n+// Usage implements subcommands.Command.Usage.\n+func (*uds) Usage() string {\n+ return \"uds <flags>\"\n+}\n+\n+// SetFlags implements subcommands.Command.SetFlags.\n+func (c *uds) SetFlags(f *flag.FlagSet) {\n+ f.StringVar(&c.fileName, \"file\", \"\", \"name of output file\")\n+ f.StringVar(&c.socketPath, \"socket\", \"\", \"path to socket\")\n+}\n+\n+// Execute implements subcommands.Command.Execute.\n+func (c *uds) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {\n+ if c.fileName == \"\" || c.socketPath == \"\" {\n+ log.Fatal(\"Flags cannot be empty, given: fileName: %q, socketPath: %q\", c.fileName, c.socketPath)\n+ return subcommands.ExitFailure\n+ }\n+ outputFile, err := os.OpenFile(c.fileName, os.O_WRONLY|os.O_CREATE, 0666)\n+ if err != nil {\n+ log.Fatal(\"error opening output file:\", err)\n+ }\n+\n+ defer os.Remove(c.socketPath)\n+\n+ listener, err := net.Listen(\"unix\", c.socketPath)\n+ if err != nil {\n+ log.Fatal(\"error listening on socket %q:\", c.socketPath, err)\n+ }\n+\n+ go server(listener, outputFile)\n+ for i := 0; ; i++ {\n+ conn, err := net.Dial(\"unix\", c.socketPath)\n+ if err != nil {\n+ log.Fatal(\"error dialing:\", err)\n+ }\n+ if _, err := conn.Write([]byte(strconv.Itoa(i))); err != nil {\n+ log.Fatal(\"error writing:\", err)\n+ }\n+ conn.Close()\n+ time.Sleep(100 * time.Millisecond)\n+ }\n+}\n+\n+func server(listener net.Listener, out *os.File) {\n+ buf := make([]byte, 16)\n+\n+ for {\n+ c, err := listener.Accept()\n+ if err != nil {\n+ log.Fatal(\"error accepting connection:\", err)\n+ }\n+ nr, err := c.Read(buf)\n+ if err != nil {\n+ log.Fatal(\"error reading from buf:\", err)\n+ }\n+ data := buf[0:nr]\n+ fmt.Fprint(out, string(data)+\"\\n\")\n+ }\n+}\n"
},
{
"change_type": "DELETE",
"old_path": "runsc/container/uds_test_app.go",
"new_path": null,
"diff": "-// Copyright 2018 Google Inc.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-// Binary uds-test-app opens a socket and reads a series of numbers\n-// which are then written to an output file.\n-package main\n-\n-import (\n- \"flag\"\n- \"fmt\"\n- \"log\"\n- \"net\"\n- \"os\"\n- \"strconv\"\n- \"time\"\n-)\n-\n-var (\n- fileName = flag.String(\"file\", \"\", \"name of output file\")\n- socketPath = flag.String(\"socket\", \"\", \"path to socket\")\n-)\n-\n-func server(listener net.Listener, f *os.File) {\n- buf := make([]byte, 16)\n-\n- for {\n- c, err := listener.Accept()\n- if err != nil {\n- log.Fatal(\"error accepting connection:\", err)\n- }\n- nr, err := c.Read(buf)\n- if err != nil {\n- log.Fatal(\"error reading from buf:\", err)\n- }\n- data := buf[0:nr]\n- fmt.Fprintf(f, string(data)+\"\\n\")\n- }\n-}\n-\n-func main() {\n- flag.Parse()\n- if *fileName == \"\" || *socketPath == \"\" {\n- log.Fatalf(\"Flags cannot be empty, given: fileName=%s, socketPath=%s\", *fileName, *socketPath)\n- }\n- outputFile, err := os.OpenFile(*fileName, os.O_WRONLY|os.O_CREATE, 0666)\n- if err != nil {\n- log.Fatal(\"error opening output file:\", err)\n- }\n-\n- socket := *socketPath\n- defer os.Remove(socket)\n-\n- listener, err := net.Listen(\"unix\", socket)\n- if err != nil {\n- log.Fatal(\"error listening on socket:\", err)\n- }\n-\n- go server(listener, outputFile)\n- for i := 0; ; i++ {\n-\n- conn, err := net.Dial(\"unix\", socket)\n- if err != nil {\n- log.Fatal(\"error dialing:\", err)\n- }\n- if _, err := conn.Write([]byte(strconv.Itoa(i))); err != nil {\n- log.Fatal(\"error writing:\", err)\n- }\n- conn.Close()\n- time.Sleep(100 * time.Millisecond)\n- }\n-\n-}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move uds_test_app to common test_app
This was done so it's easier to add more functionality
to this file for other tests.
PiperOrigin-RevId: 214782043
Change-Id: I1f38b9ee1219b3ce7b789044ada8e52bdc1e6279 |
259,992 | 27.09.2018 10:25:19 | 25,200 | b514ab05897bca53c1d4f71c912f2977b3134daf | Refactor 'runsc boot' to take container ID as argument
This makes the flow slightly simpler (no need to call
Loader.SetRootContainer). And this is required change to tag
tasks with container ID inside the Sentry. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -171,7 +171,6 @@ func (cm *containerManager) StartRoot(cid *string, _ *struct{}) error {\nif err := <-cm.startResultChan; err != nil {\nreturn fmt.Errorf(\"failed to start sandbox: %v\", err)\n}\n- cm.l.setRootContainerID(*cid)\nreturn nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -142,7 +142,7 @@ func init() {\n// New initializes a new kernel loader configured by spec.\n// New also handles setting up a kernel for restoring a container.\n-func New(spec *specs.Spec, conf *Config, controllerFD, deviceFD int, goferFDs []int, console bool) (*Loader, error) {\n+func New(id string, spec *specs.Spec, conf *Config, controllerFD, deviceFD int, goferFDs []int, console bool) (*Loader, error) {\nif err := usage.Init(); err != nil {\nreturn nil, fmt.Errorf(\"Error setting up memory usage: %v\", err)\n}\n@@ -286,6 +286,9 @@ func New(spec *specs.Spec, conf *Config, controllerFD, deviceFD int, goferFDs []\nspec: spec,\nstartSignalForwarding: startSignalForwarding,\nrootProcArgs: procArgs,\n+ sandboxID: id,\n+ containerRootTGs: make(map[string]*kernel.ThreadGroup),\n+ execProcesses: make(map[execID]*kernel.ThreadGroup),\n}\nctrl.manager.l = l\nreturn l, nil\n@@ -420,10 +423,9 @@ func (l *Loader) run() error {\nl.rootProcArgs.FDMap.DecRef()\n}\n- if l.execProcesses != nil {\n- return fmt.Errorf(\"there shouldn't already be a cache of exec'd processes, but found: %v\", l.execProcesses)\n- }\n- l.execProcesses = make(map[execID]*kernel.ThreadGroup)\n+ l.mu.Lock()\n+ l.containerRootTGs[l.sandboxID] = l.k.GlobalInit()\n+ l.mu.Unlock()\n// Start signal forwarding only after an init process is created.\nl.stopSignalForwarding = l.startSignalForwarding()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader_test.go",
"new_path": "runsc/boot/loader_test.go",
"diff": "@@ -101,7 +101,7 @@ func createLoader() (*Loader, func(), error) {\nreturn nil, nil, err\n}\n- l, err := New(spec, conf, fd, -1 /* device fd */, []int{sandEnd}, false)\n+ l, err := New(\"foo\", spec, conf, fd, -1 /* device fd */, []int{sandEnd}, false)\nif err != nil {\ncleanup()\nreturn nil, nil, err\n@@ -129,7 +129,6 @@ func TestRun(t *testing.T) {\n}()\n// Run the container.\n- l.setRootContainerID(\"foo\")\nif err := l.Run(); err != nil {\nt.Errorf(\"error running container: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -69,7 +69,7 @@ func (*Boot) Synopsis() string {\n// Usage implements subcommands.Command.Usage.\nfunc (*Boot) Usage() string {\n- return `boot [flags]`\n+ return `boot [flags] <container id>`\n}\n// SetFlags implements subcommands.Command.SetFlags.\n@@ -86,7 +86,7 @@ func (b *Boot) SetFlags(f *flag.FlagSet) {\n// Execute implements subcommands.Command.Execute. It starts a sandbox in a\n// waiting state.\nfunc (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {\n- if b.specFD == -1 || b.controllerFD == -1 || f.NArg() != 0 {\n+ if b.specFD == -1 || b.controllerFD == -1 || f.NArg() != 1 {\nf.Usage()\nreturn subcommands.ExitUsageError\n}\n@@ -138,7 +138,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n}\n// Create the loader.\n- l, err := boot.New(spec, conf, b.controllerFD, b.deviceFD, b.ioFDs.GetArray(), b.console)\n+ l, err := boot.New(f.Arg(0), spec, conf, b.controllerFD, b.deviceFD, b.ioFDs.GetArray(), b.console)\nif err != nil {\nFatalf(\"error creating loader: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -460,6 +460,9 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\n}\n}\n+ // Add container as the last argument.\n+ cmd.Args = append(cmd.Args, s.ID)\n+\n// Log the fds we are donating to the sandbox process.\nfor i, f := range cmd.ExtraFiles {\nlog.Debugf(\"Donating FD %d: %q\", i+3, f.Name())\n"
}
] | Go | Apache License 2.0 | google/gvisor | Refactor 'runsc boot' to take container ID as argument
This makes the flow slightly simpler (no need to call
Loader.SetRootContainer). And this is required change to tag
tasks with container ID inside the Sentry.
PiperOrigin-RevId: 214795210
Change-Id: I6ff4af12e73bb07157f7058bb15fd5bb88760884 |
259,948 | 27.09.2018 10:41:28 | 25,200 | 234f36b6f2cb0db74d119079e5244619d6ea38ad | sentry: export cpuTime function. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/state/state_metadata.go",
"new_path": "pkg/sentry/state/state_metadata.go",
"diff": "@@ -28,7 +28,7 @@ const (\n)\nfunc addSaveMetadata(m map[string]string) {\n- t, err := cpuTime()\n+ t, err := CPUTime()\nif err != nil {\nlog.Warningf(\"Error getting cpu time: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/state/state_unsafe.go",
"new_path": "pkg/sentry/state/state_unsafe.go",
"diff": "@@ -23,7 +23,8 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n)\n-func cpuTime() (time.Duration, error) {\n+// CPUTime returns the CPU time usage by Sentry and app.\n+func CPUTime() (time.Duration, error) {\nvar ts syscall.Timespec\n_, _, errno := syscall.RawSyscall(syscall.SYS_CLOCK_GETTIME, uintptr(linux.CLOCK_PROCESS_CPUTIME_ID), uintptr(unsafe.Pointer(&ts)), 0)\nif errno != 0 {\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: export cpuTime function.
PiperOrigin-RevId: 214798278
Change-Id: Id59d1ceb35037cda0689d3a1c4844e96c6957615 |
259,992 | 27.09.2018 22:52:25 | 25,200 | 1166c088fc51c83af3198e25d5e774103ae976fc | Move common test code to function | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -62,7 +62,8 @@ func waitForProcessList(cont *Container, want []*control.Process) error {\n}\nreturn nil\n}\n- return testutil.Poll(cb, 5*time.Second)\n+ // Gives plenty of time as tests can run slow under --race.\n+ return testutil.Poll(cb, 30*time.Second)\n}\nfunc waitForProcessCount(cont *Container, want int) error {\n@@ -77,7 +78,8 @@ func waitForProcessCount(cont *Container, want int) error {\n}\nreturn nil\n}\n- return testutil.Poll(cb, 5*time.Second)\n+ // Gives plenty of time as tests can run slow under --race.\n+ return testutil.Poll(cb, 30*time.Second)\n}\n// procListsEqual is used to check whether 2 Process lists are equal for all\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "package container\nimport (\n+ \"fmt\"\n\"io/ioutil\"\n\"math\"\n\"os\"\n@@ -56,38 +57,60 @@ func createSpecs(cmds ...[]string) ([]*specs.Spec, []string) {\nreturn specs, ids\n}\n-// TestMultiContainerSanity checks that it is possible to run 2 dead-simple\n-// containers in the same sandbox.\n-func TestMultiContainerSanity(t *testing.T) {\n- for _, conf := range configs(all...) {\n- t.Logf(\"Running test with conf: %+v\", conf)\n-\n+func startContainers(conf *boot.Config, specs []*specs.Spec, ids []string) ([]*Container, func(), error) {\nrootDir, err := testutil.SetupRootDir()\nif err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n+ return nil, nil, fmt.Errorf(\"error creating root dir: %v\", err)\n}\n- defer os.RemoveAll(rootDir)\n- // Setup the containers.\n- sleep := []string{\"sleep\", \"100\"}\n- specs, ids := createSpecs(sleep, sleep)\nvar containers []*Container\n+ var bundles []string\n+ cleanup := func() {\n+ for _, c := range containers {\n+ c.Destroy()\n+ }\n+ for _, b := range bundles {\n+ os.RemoveAll(b)\n+ }\n+ os.RemoveAll(rootDir)\n+ }\nfor i, spec := range specs {\nbundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\nif err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n+ cleanup()\n+ return nil, nil, fmt.Errorf(\"error setting up container: %v\", err)\n}\n- defer os.RemoveAll(bundleDir)\n+ bundles = append(bundles, bundleDir)\n+\ncont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\nif err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n+ cleanup()\n+ return nil, nil, fmt.Errorf(\"error creating container: %v\", err)\n}\n- defer cont.Destroy()\n+ containers = append(containers, cont)\n+\nif err := cont.Start(conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n+ cleanup()\n+ return nil, nil, fmt.Errorf(\"error starting container: %v\", err)\n}\n- containers = append(containers, cont)\n}\n+ return containers, cleanup, nil\n+}\n+\n+// TestMultiContainerSanity checks that it is possible to run 2 dead-simple\n+// containers in the same sandbox.\n+func TestMultiContainerSanity(t *testing.T) {\n+ for _, conf := range configs(all...) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\n+ // Setup the containers.\n+ sleep := []string{\"sleep\", \"100\"}\n+ specs, ids := createSpecs(sleep, sleep)\n+ containers, cleanup, err := startContainers(conf, specs, ids)\n+ if err != nil {\n+ t.Fatalf(\"error starting containers: %v\", err)\n+ }\n+ defer cleanup()\n// Check via ps that multiple processes are running.\nexpectedPL := []*control.Process{\n@@ -106,37 +129,18 @@ func TestMultiContainerSanity(t *testing.T) {\n}\nfunc TestMultiContainerWait(t *testing.T) {\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n-\n// The first container should run the entire duration of the test.\ncmd1 := []string{\"sleep\", \"100\"}\n// We'll wait on the second container, which is much shorter lived.\ncmd2 := []string{\"sleep\", \"1\"}\nspecs, ids := createSpecs(cmd1, cmd2)\n- // Setup the containers.\n- var containers []*Container\n- for i, spec := range specs {\nconf := testutil.TestConfig()\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n+ containers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n- }\n- defer os.RemoveAll(bundleDir)\n- cont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\n- if err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n- }\n- defer cont.Destroy()\n- if err := cont.Start(conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n- }\n- containers = append(containers, cont)\n+ t.Fatalf(\"error starting containers: %v\", err)\n}\n+ defer cleanup()\n// Check via ps that multiple processes are running.\nexpectedPL := []*control.Process{\n@@ -206,26 +210,12 @@ func TestExecWait(t *testing.T) {\n// We'll wait on the second container, which is much shorter lived.\ncmd2 := []string{\"sleep\", \"1\"}\nspecs, ids := createSpecs(cmd1, cmd2)\n-\n- // Setup the containers.\n- var containers []*Container\n- for i, spec := range specs {\nconf := testutil.TestConfig()\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n- if err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n- }\n- defer os.RemoveAll(bundleDir)\n- cont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\n+ containers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n- }\n- defer cont.Destroy()\n- if err := cont.Start(conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n- }\n- containers = append(containers, cont)\n+ t.Fatalf(\"error starting containers: %v\", err)\n}\n+ defer cleanup()\n// Check via ps that process is running.\nexpectedPL := []*control.Process{\n@@ -284,12 +274,6 @@ func TestExecWait(t *testing.T) {\n// TestMultiContainerMount tests that bind mounts can be used with multiple\n// containers.\nfunc TestMultiContainerMount(t *testing.T) {\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n-\ncmd1 := []string{\"sleep\", \"100\"}\n// 'src != dst' ensures that 'dst' doesn't exist in the host and must be\n@@ -309,24 +293,12 @@ func TestMultiContainerMount(t *testing.T) {\n})\n// Setup the containers.\n- var containers []*Container\n- for i, spec := range sps {\nconf := testutil.TestConfig()\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n+ containers, cleanup, err := startContainers(conf, sps, ids)\nif err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n- }\n- defer os.RemoveAll(bundleDir)\n- cont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\n- if err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n- }\n- defer cont.Destroy()\n- if err := cont.Start(conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n- }\n- containers = append(containers, cont)\n+ t.Fatalf(\"error starting containers: %v\", err)\n}\n+ defer cleanup()\nws, err := containers[1].Wait()\nif err != nil {\n@@ -343,32 +315,14 @@ func TestMultiContainerSignal(t *testing.T) {\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n-\n// Setup the containers.\nsleep := []string{\"sleep\", \"100\"}\nspecs, ids := createSpecs(sleep, sleep)\n- var containers []*Container\n- for i, spec := range specs {\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n+ containers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n- }\n- defer os.RemoveAll(bundleDir)\n- cont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\n- if err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n- }\n- defer cont.Destroy()\n- if err := cont.Start(conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n- }\n- containers = append(containers, cont)\n+ t.Fatalf(\"error starting containers: %v\", err)\n}\n+ defer cleanup()\n// Check via ps that container 1 process is running.\nexpectedPL := []*control.Process{\n@@ -452,34 +406,14 @@ func TestMultiContainerDestroy(t *testing.T) {\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n-\n// Two containers that will run for a long time. We will\n// destroy the second one.\nspecs, ids := createSpecs([]string{\"sleep\", \"100\"}, []string{\"sleep\", \"100\"})\n-\n- // Setup the containers.\n- var containers []*Container\n- for i, spec := range specs {\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n+ containers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n- }\n- defer os.RemoveAll(bundleDir)\n- cont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\n- if err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n- }\n- defer cont.Destroy()\n- if err := cont.Start(conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n- }\n- containers = append(containers, cont)\n+ t.Fatalf(\"error starting containers: %v\", err)\n}\n+ defer cleanup()\n// Exec in the root container to check for the existence of the\n// second containers root filesystem directory.\n@@ -519,31 +453,12 @@ func TestMultiContainerProcesses(t *testing.T) {\nspecs, ids := createSpecs(\n[]string{\"sleep\", \"100\"},\n[]string{\"sh\", \"-c\", \"while true; do sleep 100; done\"})\n-\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n-\n- var containers []*Container\n- for i, spec := range specs {\nconf := testutil.TestConfig()\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n- if err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n- }\n- defer os.RemoveAll(bundleDir)\n- cont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\n+ containers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n- }\n- defer cont.Destroy()\n- if err := cont.Start(conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n- }\n- containers = append(containers, cont)\n+ t.Fatalf(\"error starting containers: %v\", err)\n}\n+ defer cleanup()\n// Check root's container process list doesn't include other containers.\nexpectedPL0 := []*control.Process{\n@@ -592,31 +507,12 @@ func TestMultiContainerKillAll(t *testing.T) {\nspecs, ids := createSpecs(\n[]string{app, \"task-tree\", \"--depth=2\", \"--width=2\"},\n[]string{app, \"task-tree\", \"--depth=4\", \"--width=2\"})\n-\n- rootDir, err := testutil.SetupRootDir()\n- if err != nil {\n- t.Fatalf(\"error creating root dir: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n-\n- var containers []*Container\n- for i, spec := range specs {\nconf := testutil.TestConfig()\n- bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\n- if err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n- }\n- defer os.RemoveAll(bundleDir)\n- cont, err := Create(ids[i], spec, conf, bundleDir, \"\", \"\")\n+ containers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n- }\n- defer cont.Destroy()\n- if err := cont.Start(conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n- }\n- containers = append(containers, cont)\n+ t.Fatalf(\"error starting containers: %v\", err)\n}\n+ defer cleanup()\n// Wait until all processes are created.\nrootProcCount := int(math.Pow(2, 3) - 1)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move common test code to function
PiperOrigin-RevId: 214890335
Change-Id: I42743f0ce46a5a42834133bce2f32d187194fc87 |
259,992 | 28.09.2018 09:43:13 | 25,200 | cf226d48ce8c49409049e03ed405366db9fc2a04 | Switch to root in userns when CAP_SYS_CHROOT is also missing
Some tests check current capabilities and re-run the tests as root inside
userns if required capabibilities are missing. It was checking for
CAP_SYS_ADMIN only, CAP_SYS_CHROOT is also required now. | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/capability.go",
"new_path": "runsc/cmd/capability.go",
"diff": "@@ -60,7 +60,11 @@ func applyCaps(caps *specs.LinuxCapabilities) error {\nnewCaps.Set(c, set...)\n}\n- return newCaps.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS)\n+ if err := newCaps.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS); err != nil {\n+ return err\n+ }\n+ log.Infof(\"Capabilities applied: %+v\", newCaps)\n+ return nil\n}\nfunc getCaps(which capability.CapType, caps *specs.LinuxCapabilities) []string {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/cmd.go",
"new_path": "runsc/cmd/cmd.go",
"diff": "@@ -85,7 +85,6 @@ func setCapsAndCallSelf(args []string, caps *specs.LinuxCapabilities) error {\nreturn err\n}\n- log.Infof(\"Capabilities applied: %+v\", caps)\nlog.Infof(\"Execve %q again, bye!\", binPath)\nsyscall.Exec(binPath, args, []string{})\npanic(\"unreachable\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/BUILD",
"new_path": "runsc/sandbox/BUILD",
"diff": "@@ -25,6 +25,7 @@ go_library(\n\"//runsc/specutils\",\n\"@com_github_cenkalti_backoff//:go_default_library\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n+ \"@com_github_syndtr_gocapability//capability:go_default_library\",\n\"@com_github_vishvananda_netlink//:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -26,6 +26,7 @@ import (\n\"github.com/cenkalti/backoff\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n+ \"github.com/syndtr/gocapability/capability\"\n\"gvisor.googlesource.com/gvisor/pkg/control/client\"\n\"gvisor.googlesource.com/gvisor/pkg/control/server\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n@@ -415,7 +416,7 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\n// as user nobody.\nif conf.TestOnlyAllowRunAsCurrentUserWithoutChroot {\nlog.Warningf(\"Running sandbox in test mode as current user (uid=%d gid=%d). This is only safe in tests!\", os.Getuid(), os.Getgid())\n- } else if specutils.CanSetUIDGID() {\n+ } else if specutils.HasCapabilities(capability.CAP_SETUID, capability.CAP_SETGID) {\n// Map nobody in the new namespace to nobody in the parent namespace.\nconst nobody = 65534\ncmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{{\n@@ -442,7 +443,7 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\n// bind-mount the executable inside it.\nif conf.TestOnlyAllowRunAsCurrentUserWithoutChroot {\nlog.Warningf(\"Running sandbox in test mode without chroot. This is only safe in tests!\")\n- } else if specutils.HasCapSysAdmin() {\n+ } else if specutils.HasCapabilities(capability.CAP_SYS_ADMIN, capability.CAP_SYS_CHROOT) {\nlog.Infof(\"Sandbox will be started in minimal chroot\")\nchroot, err := setUpChroot()\nif err != nil {\n@@ -453,7 +454,7 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\ncmd.Args[0] = \"/runsc\"\ncmd.Path = \"/runsc\"\n} else {\n- return fmt.Errorf(\"can't run sandbox process in minimal chroot since we don't have CAP_SYS_ADMIN\")\n+ return fmt.Errorf(\"can't run sandbox process in minimal chroot since we don't have CAP_SYS_ADMIN and CAP_SYS_CHROOT\")\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/specutils/namespace.go",
"new_path": "runsc/specutils/namespace.go",
"diff": "@@ -204,8 +204,8 @@ func SetUIDGIDMappings(cmd *exec.Cmd, s *specs.Spec) {\n}\n}\n-// CanSetUIDGID returns true if the user has SETUID and SETGID capabilities.\n-func CanSetUIDGID() bool {\n+// HasCapabilities returns true if the user has all capabilties in 'cs'.\n+func HasCapabilities(cs ...capability.Cap) bool {\ncaps, err := capability.NewPid2(os.Getpid())\nif err != nil {\nreturn false\n@@ -213,18 +213,10 @@ func CanSetUIDGID() bool {\nif err := caps.Load(); err != nil {\nreturn false\n}\n- return caps.Get(capability.EFFECTIVE, capability.CAP_SETUID) &&\n- caps.Get(capability.EFFECTIVE, capability.CAP_SETGID)\n-}\n-\n-// HasCapSysAdmin returns true if the user has CAP_SYS_ADMIN capability.\n-func HasCapSysAdmin() bool {\n- caps, err := capability.NewPid2(os.Getpid())\n- if err != nil {\n+ for _, c := range cs {\n+ if !caps.Get(capability.EFFECTIVE, c) {\nreturn false\n}\n- if err := caps.Load(); err != nil {\n- return false\n}\n- return caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN)\n+ return true\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/BUILD",
"new_path": "runsc/test/testutil/BUILD",
"diff": "@@ -18,5 +18,6 @@ go_library(\n\"//runsc/specutils\",\n\"@com_github_cenkalti_backoff//:go_default_library\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n+ \"@com_github_syndtr_gocapability//capability:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -32,6 +32,7 @@ import (\n\"github.com/cenkalti/backoff\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n+ \"github.com/syndtr/gocapability/capability\"\n\"gvisor.googlesource.com/gvisor/runsc/boot\"\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n)\n@@ -234,12 +235,12 @@ func WaitForHTTP(port int, timeout time.Duration) error {\nreturn Poll(cb, timeout)\n}\n-// RunAsRoot ensures the test runs with CAP_SYS_ADMIN. If need it will create\n-// a new user namespace and reexecute the test as root inside of the namespace.\n-// This functionr returns when it's running as root. If it needs to create\n-// another process, it will exit from there and not return.\n+// RunAsRoot ensures the test runs with CAP_SYS_ADMIN and CAP_SYS_CHROOT. If\n+// need it will create a new user namespace and reexecute the test as root\n+// inside of the namespace. This functionr returns when it's running as root. If\n+// it needs to create another process, it will exit from there and not return.\nfunc RunAsRoot() {\n- if specutils.HasCapSysAdmin() {\n+ if specutils.HasCapabilities(capability.CAP_SYS_ADMIN, capability.CAP_SYS_CHROOT) {\nreturn\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Switch to root in userns when CAP_SYS_CHROOT is also missing
Some tests check current capabilities and re-run the tests as root inside
userns if required capabibilities are missing. It was checking for
CAP_SYS_ADMIN only, CAP_SYS_CHROOT is also required now.
PiperOrigin-RevId: 214949226
Change-Id: Ic81363969fa76c04da408fae8ea7520653266312 |
259,881 | 28.09.2018 11:02:11 | 25,200 | 3ff24b4f2c6d5a7a872a744150bbfca795afdbfc | Require AF_UNIX sockets from the gofer
host.endpoint already has the check, but it is missing from
host.ConnectedEndpoint. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/gofer/socket.go",
"new_path": "pkg/sentry/fs/gofer/socket.go",
"diff": "package gofer\nimport (\n+ \"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/pkg/p9\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/host\"\n@@ -101,6 +102,7 @@ func (e *endpoint) BidirectionalConnect(ce unix.ConnectingEndpoint, returnConnec\nc, terr := host.NewConnectedEndpoint(hostFile, ce.WaiterQueue(), e.path)\nif terr != nil {\nce.Unlock()\n+ log.Warningf(\"Gofer returned invalid host socket for BidirectionalConnect; file %+v flags %+v: %v\", e.file, cf, terr)\nreturn terr\n}\n@@ -120,6 +122,7 @@ func (e *endpoint) UnidirectionalConnect() (unix.ConnectedEndpoint, *tcpip.Error\nc, terr := host.NewConnectedEndpoint(hostFile, &waiter.Queue{}, e.path)\nif terr != nil {\n+ log.Warningf(\"Gofer returned invalid host socket for UnidirectionalConnect; file %+v: %v\", e.file, terr)\nreturn nil, terr\n}\nc.Init()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/host/socket.go",
"new_path": "pkg/sentry/fs/host/socket.go",
"diff": "@@ -35,6 +35,8 @@ import (\n// endpoint encapsulates the state needed to represent a host Unix socket.\n//\n+// TODO: Remove/merge with ConnectedEndpoint.\n+//\n// +stateify savable\ntype endpoint struct {\nqueue waiter.Queue `state:\"zerovalue\"`\n@@ -288,13 +290,23 @@ func recvMsg(fd int, data [][]byte, numRights uintptr, peek bool, addr *tcpip.Fu\nreturn rl, ml, control.New(nil, nil, newSCMRights(fds)), nil\n}\n-// NewConnectedEndpoint creates a new ConnectedEndpoint backed by\n-// a host FD that will pretend to be bound at a given sentry path.\n+// NewConnectedEndpoint creates a new ConnectedEndpoint backed by a host FD\n+// that will pretend to be bound at a given sentry path.\n//\n-// The caller is responsible for calling Init(). Additionaly, Release needs\n-// to be called twice because host.ConnectedEndpoint is both a\n-// unix.Receiver and unix.ConnectedEndpoint.\n+// The caller is responsible for calling Init(). Additionaly, Release needs to\n+// be called twice because host.ConnectedEndpoint is both a unix.Receiver and\n+// unix.ConnectedEndpoint.\nfunc NewConnectedEndpoint(file *fd.FD, queue *waiter.Queue, path string) (*ConnectedEndpoint, *tcpip.Error) {\n+ family, err := syscall.GetsockoptInt(file.FD(), syscall.SOL_SOCKET, syscall.SO_DOMAIN)\n+ if err != nil {\n+ return nil, translateError(err)\n+ }\n+\n+ if family != syscall.AF_UNIX {\n+ // We only allow Unix sockets.\n+ return nil, tcpip.ErrInvalidEndpointState\n+ }\n+\ne := &ConnectedEndpoint{path: path, queue: queue, file: file}\n// AtomicRefCounters start off with a single reference. We need two.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Require AF_UNIX sockets from the gofer
host.endpoint already has the check, but it is missing from
host.ConnectedEndpoint.
PiperOrigin-RevId: 214962762
Change-Id: I88bb13a5c5871775e4e7bf2608433df8a3d348e6 |
259,992 | 28.09.2018 12:20:56 | 25,200 | 2496d9b4b6343154525f73e9583a4a60bebcfa30 | Make runsc kill and delete more conformant to the "spec" | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -21,10 +21,8 @@ import (\n\"path\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n- \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/control/server\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/control\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n@@ -32,7 +30,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/state\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/time\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/watchdog\"\n- \"gvisor.googlesource.com/gvisor/pkg/syserror\"\n\"gvisor.googlesource.com/gvisor/pkg/urpc\"\n)\n@@ -247,91 +244,7 @@ func (cm *containerManager) Start(args *StartArgs, _ *struct{}) error {\n// filesystem.\nfunc (cm *containerManager) Destroy(cid *string, _ *struct{}) error {\nlog.Debugf(\"containerManager.destroy %q\", *cid)\n- cm.l.mu.Lock()\n- defer cm.l.mu.Unlock()\n-\n- key := execID{cid: *cid}\n- if tg, ok := cm.l.processes[key]; ok {\n- // Send SIGKILL to threadgroup.\n- if err := tg.SendSignal(&arch.SignalInfo{\n- Signo: int32(linux.SIGKILL),\n- Code: arch.SignalInfoUser,\n- }); err == nil {\n- // SIGKILL sent. Now wait for it to exit.\n- log.Debugf(\"Waiting for container process to exit.\")\n- tg.WaitExited()\n- log.Debugf(\"Container process exited.\")\n- } else if err != syserror.ESRCH {\n- return fmt.Errorf(\"error sending SIGKILL to container %q: %v\", *cid, err)\n- }\n-\n- // Remove the container thread group from the map.\n- delete(cm.l.processes, key)\n- }\n-\n- // Clean up the filesystem by unmounting all mounts for this container\n- // and deleting the container root directory.\n-\n- // First get a reference to the container root directory.\n- mns := cm.l.k.RootMountNamespace()\n- mnsRoot := mns.Root()\n- defer mnsRoot.DecRef()\n- ctx := cm.l.rootProcArgs.NewContext(cm.l.k)\n- containerRoot := path.Join(ChildContainersDir, *cid)\n- containerRootDirent, err := mns.FindInode(ctx, mnsRoot, nil, containerRoot, linux.MaxSymlinkTraversals)\n- if err == syserror.ENOENT {\n- // Container must have been destroyed already. That's fine.\n- return nil\n- }\n- if err != nil {\n- return fmt.Errorf(\"error finding container root directory %q: %v\", containerRoot, err)\n- }\n- defer containerRootDirent.DecRef()\n-\n- // Iterate through all submounts and unmount them. We unmount lazily by\n- // setting detach=true, so we can unmount in any order.\n- for _, m := range containerRootDirent.Inode.MountSource.Submounts() {\n- root := m.Root()\n- defer root.DecRef()\n-\n- // Do a best-effort unmount by flushing the refs and unmount\n- // with \"detach only = true\".\n- log.Debugf(\"Unmounting container submount %q\", root.BaseName())\n- m.FlushDirentRefs()\n- if err := mns.Unmount(ctx, root, true /* detach only */); err != nil {\n- return fmt.Errorf(\"error unmounting container submount %q: %v\", root.BaseName(), err)\n- }\n- }\n-\n- // Unmount the container root itself.\n- log.Debugf(\"Unmounting container root %q\", containerRoot)\n- containerRootDirent.Inode.MountSource.FlushDirentRefs()\n- if err := mns.Unmount(ctx, containerRootDirent, true /* detach only */); err != nil {\n- return fmt.Errorf(\"error unmounting container root mount %q: %v\", containerRootDirent.BaseName(), err)\n- }\n-\n- // Get a reference to the parent directory and remove the root\n- // container directory.\n- containersDirDirent, err := mns.FindInode(ctx, mnsRoot, nil, ChildContainersDir, linux.MaxSymlinkTraversals)\n- if err != nil {\n- return fmt.Errorf(\"error finding containers directory %q: %v\", ChildContainersDir, err)\n- }\n- defer containersDirDirent.DecRef()\n- log.Debugf(\"Deleting container root %q\", containerRoot)\n- if err := containersDirDirent.RemoveDirectory(ctx, mnsRoot, *cid); err != nil {\n- return fmt.Errorf(\"error removing directory %q: %v\", containerRoot, err)\n- }\n-\n- // Flushing dirent references triggers many async close operations. We\n- // must wait for those to complete before returning, otherwise the\n- // caller may kill the gofer before they complete, causing a cascade of\n- // failing RPCs.\n- log.Infof(\"Waiting for async filesystem operations to complete\")\n- fs.AsyncBarrier()\n-\n- // We made it!\n- log.Debugf(\"Destroyed container %q\", *cid)\n- return nil\n+ return cm.l.destroyContainer(*cid)\n}\n// ExecuteAsync starts running a command on a created or running sandbox. It\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -16,6 +16,7 @@ package boot\nimport (\n\"fmt\"\n+ \"path\"\n\"path/filepath\"\n\"strconv\"\n\"strings\"\n@@ -576,9 +577,9 @@ func subtargets(root string, mnts []specs.Mount) []string {\nreturn targets\n}\n-// setFileSystemForProcess is used to set up the file system and amend the procArgs accordingly.\n+// setupContainerFS is used to set up the file system and amend the procArgs accordingly.\n// procArgs are passed by reference and the FDMap field is modified. It dups stdioFDs.\n-func setFileSystemForProcess(procArgs *kernel.CreateProcessArgs, spec *specs.Spec, conf *Config, stdioFDs, goferFDs []int, console bool, creds *auth.Credentials, ls *limits.LimitSet, k *kernel.Kernel, cid string) error {\n+func setupContainerFS(procArgs *kernel.CreateProcessArgs, spec *specs.Spec, conf *Config, stdioFDs, goferFDs []int, console bool, creds *auth.Credentials, ls *limits.LimitSet, k *kernel.Kernel, cid string) error {\nctx := procArgs.NewContext(k)\n// Create the FD map, which will set stdin, stdout, and stderr. If\n@@ -676,3 +677,65 @@ func setExecutablePath(ctx context.Context, mns *fs.MountNamespace, procArgs *ke\nprocArgs.Filename = f\nreturn nil\n}\n+\n+// destroyContainerFS cleans up the filesystem by unmounting all mounts for the\n+// given container and deleting the container root directory.\n+func destroyContainerFS(ctx context.Context, cid string, k *kernel.Kernel) error {\n+ // First get a reference to the container root directory.\n+ mns := k.RootMountNamespace()\n+ mnsRoot := mns.Root()\n+ defer mnsRoot.DecRef()\n+ containerRoot := path.Join(ChildContainersDir, cid)\n+ containerRootDirent, err := mns.FindInode(ctx, mnsRoot, nil, containerRoot, linux.MaxSymlinkTraversals)\n+ if err == syserror.ENOENT {\n+ // Container must have been destroyed already. That's fine.\n+ return nil\n+ }\n+ if err != nil {\n+ return fmt.Errorf(\"error finding container root directory %q: %v\", containerRoot, err)\n+ }\n+ defer containerRootDirent.DecRef()\n+\n+ // Iterate through all submounts and unmount them. We unmount lazily by\n+ // setting detach=true, so we can unmount in any order.\n+ for _, m := range containerRootDirent.Inode.MountSource.Submounts() {\n+ root := m.Root()\n+ defer root.DecRef()\n+\n+ // Do a best-effort unmount by flushing the refs and unmount\n+ // with \"detach only = true\".\n+ log.Debugf(\"Unmounting container submount %q\", root.BaseName())\n+ m.FlushDirentRefs()\n+ if err := mns.Unmount(ctx, root, true /* detach only */); err != nil {\n+ return fmt.Errorf(\"error unmounting container submount %q: %v\", root.BaseName(), err)\n+ }\n+ }\n+\n+ // Unmount the container root itself.\n+ log.Debugf(\"Unmounting container root %q\", containerRoot)\n+ containerRootDirent.Inode.MountSource.FlushDirentRefs()\n+ if err := mns.Unmount(ctx, containerRootDirent, true /* detach only */); err != nil {\n+ return fmt.Errorf(\"error unmounting container root mount %q: %v\", containerRootDirent.BaseName(), err)\n+ }\n+\n+ // Get a reference to the parent directory and remove the root\n+ // container directory.\n+ containersDirDirent, err := mns.FindInode(ctx, mnsRoot, nil, ChildContainersDir, linux.MaxSymlinkTraversals)\n+ if err != nil {\n+ return fmt.Errorf(\"error finding containers directory %q: %v\", ChildContainersDir, err)\n+ }\n+ defer containersDirDirent.DecRef()\n+ log.Debugf(\"Deleting container root %q\", containerRoot)\n+ if err := containersDirDirent.RemoveDirectory(ctx, mnsRoot, cid); err != nil {\n+ return fmt.Errorf(\"error removing directory %q: %v\", containerRoot, err)\n+ }\n+\n+ // Flushing dirent references triggers many async close operations. We\n+ // must wait for those to complete before returning, otherwise the\n+ // caller may kill the gofer before they complete, causing a cascade of\n+ // failing RPCs.\n+ log.Infof(\"Waiting for async filesystem operations to complete\")\n+ fs.AsyncBarrier()\n+\n+ return nil\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -112,9 +112,6 @@ type Loader struct {\n// have the corresponding pid set.\n//\n// processes is guardded by mu.\n- //\n- // TODO: When containers are removed via `runsc delete`,\n- // processes should be cleaned up.\nprocesses map[execID]*kernel.ThreadGroup\n}\n@@ -385,7 +382,7 @@ func (l *Loader) run() error {\n// If we are restoring, we do not want to create a process.\n// l.restore is set by the container manager when a restore call is made.\nif !l.restore {\n- if err := setFileSystemForProcess(\n+ if err := setupContainerFS(\n&l.rootProcArgs,\nl.spec,\nl.conf,\n@@ -476,7 +473,7 @@ func (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config\nstdioFDs := ioFDs[:3]\ngoferFDs := ioFDs[3:]\n- if err := setFileSystemForProcess(\n+ if err := setupContainerFS(\n&procArgs,\nspec,\nconf,\n@@ -519,6 +516,34 @@ func (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config\nreturn nil\n}\n+// destroyContainer stops a container if it is still running and cleans up its\n+// filesystem.\n+func (l *Loader) destroyContainer(cid string) error {\n+ // First kill and wait for all processes in the container.\n+ if err := l.signal(cid, int32(linux.SIGKILL), true /*all*/); err != nil {\n+ return fmt.Errorf(\"failed to SIGKILL all container processes: %v\", err)\n+ }\n+\n+ l.mu.Lock()\n+ defer l.mu.Unlock()\n+\n+ // Remove all container thread groups from the map.\n+ for key := range l.processes {\n+ if key.cid == cid {\n+ delete(l.processes, key)\n+ }\n+ }\n+\n+ ctx := l.rootProcArgs.NewContext(l.k)\n+ if err := destroyContainerFS(ctx, cid, l.k); err != nil {\n+ return fmt.Errorf(\"failed to destroy filesystem for container %q: %v\", cid, err)\n+ }\n+\n+ // We made it!\n+ log.Debugf(\"Container destroyed %q\", cid)\n+ return nil\n+}\n+\nfunc (l *Loader) executeAsync(args *control.ExecArgs) (kernel.ThreadID, error) {\n// Get the container Root Dirent from the Task, since we must run this\n// process with the same Root.\n@@ -669,13 +694,27 @@ func (l *Loader) signal(cid string, signo int32, all bool) error {\n}\nsi := arch.SignalInfo{Signo: signo}\n- if all {\n+ if !all {\n+ return tg.Leader().SendSignal(&si)\n+ }\n+\n// Pause the kernel to prevent new processes from being created while\n// the signal is delivered. This prevents process leaks when SIGKILL is\n// sent to the entire container.\nl.k.Pause()\n- defer l.k.Unpause()\n- return l.k.SendContainerSignal(cid, &si)\n+ if err := l.k.SendContainerSignal(cid, &si); err != nil {\n+ l.k.Unpause()\n+ return err\n}\n- return tg.Leader().SendSignal(&si)\n+ l.k.Unpause()\n+\n+ // If killing all processes, wait for them to exit.\n+ if all && linux.Signal(signo) == linux.SIGKILL {\n+ for _, t := range l.k.TaskSet().Root.Tasks() {\n+ if t.ContainerID() == cid {\n+ t.ThreadGroup().WaitExited()\n+ }\n+ }\n+ }\n+ return nil\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/boot.go",
"new_path": "runsc/cmd/boot.go",
"diff": "@@ -142,6 +142,8 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nif err != nil {\nFatalf(\"error creating loader: %v\", err)\n}\n+ // Fatalf exits the process and doesn't run defers. 'l' must be destroyed\n+ // explicitly!\n// Notify other processes the loader has been created.\nl.NotifyLoaderCreated()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -72,6 +72,21 @@ func validateID(id string) error {\n// Containers must write their metadata files after any change to their internal\n// states. The entire container directory is deleted when the container is\n// destroyed.\n+//\n+// When the container is stopped, all processes that belong to the container\n+// must be stopped before Destroy() returns. containerd makes roughly the\n+// following calls to stop a container:\n+// - First it attempts to kill the container process with\n+// 'runsc kill SIGTERM'. After some time, it escalates to SIGKILL. In a\n+// separate thread, it's waiting on the container. As soon as the wait\n+// returns, it moves on to the next step:\n+// - It calls 'runsc kill --all SIGKILL' to stop every process that belongs to\n+// the container. 'kill --all SIGKILL' waits for all processes before\n+// returning.\n+// - Containerd waits for stdin, stdout and stderr to drain and be closed.\n+// - It calls 'runsc delete'. runc implementation kills --all SIGKILL once\n+// again just to be sure, waits, and then proceeds with remaining teardown.\n+//\ntype Container struct {\n// ID is the container ID.\nID string `json:\"id\"`\n@@ -451,7 +466,8 @@ func (c *Container) WaitPID(pid int32, clearStatus bool) (syscall.WaitStatus, er\nreturn c.Sandbox.WaitPID(c.ID, pid, clearStatus)\n}\n-// Signal sends the signal to the container.\n+// Signal sends the signal to the container. If all is true and signal is\n+// SIGKILL, then waits for all processes to exit before returning.\n// Signal returns an error if the container is already stopped.\n// TODO: Distinguish different error types.\nfunc (c *Container) Signal(sig syscall.Signal, all bool) error {\n@@ -534,8 +550,8 @@ func (c *Container) Processes() ([]*control.Process, error) {\nreturn c.Sandbox.Processes(c.ID)\n}\n-// Destroy frees all resources associated with the container. It fails fast and\n-// is idempotent.\n+// Destroy stops all processes and frees all resources associated with the\n+// container. It fails fast and is idempotent.\nfunc (c *Container) Destroy() error {\nlog.Debugf(\"Destroy container %q\", c.ID)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "@@ -25,6 +25,7 @@ import (\n\"sync\"\n\"syscall\"\n\"testing\"\n+ \"time\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/control\"\n@@ -403,12 +404,18 @@ func TestMultiContainerSignal(t *testing.T) {\n// TestMultiContainerDestroy checks that container are properly cleaned-up when\n// they are destroyed.\nfunc TestMultiContainerDestroy(t *testing.T) {\n+ app, err := testutil.FindFile(\"runsc/container/test_app\")\n+ if err != nil {\n+ t.Fatal(\"error finding test_app:\", err)\n+ }\n+\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n- // Two containers that will run for a long time. We will\n- // destroy the second one.\n- specs, ids := createSpecs([]string{\"sleep\", \"100\"}, []string{\"sleep\", \"100\"})\n+ // First container will remain intact while the second container is killed.\n+ specs, ids := createSpecs(\n+ []string{app, \"reaper\"},\n+ []string{app, \"fork-bomb\"})\ncontainers, cleanup, err := startContainers(conf, specs, ids)\nif err != nil {\nt.Fatalf(\"error starting containers: %v\", err)\n@@ -416,26 +423,48 @@ func TestMultiContainerDestroy(t *testing.T) {\ndefer cleanup()\n// Exec in the root container to check for the existence of the\n- // second containers root filesystem directory.\n+ // second container's root filesystem directory.\ncontDir := path.Join(boot.ChildContainersDir, containers[1].ID)\n- args := &control.ExecArgs{\n+ dirArgs := &control.ExecArgs{\nFilename: \"/usr/bin/test\",\nArgv: []string{\"test\", \"-d\", contDir},\n}\n- if ws, err := containers[0].executeSync(args); err != nil {\n- t.Fatalf(\"error executing %+v: %v\", args, err)\n+ if ws, err := containers[0].executeSync(dirArgs); err != nil {\n+ t.Fatalf(\"error executing %+v: %v\", dirArgs, err)\n} else if ws.ExitStatus() != 0 {\nt.Errorf(\"exec 'test -f %q' got exit status %d, wanted 0\", contDir, ws.ExitStatus())\n}\n- // Destory the second container.\n+ // Exec more processes to ensure signal all works for exec'd processes too.\n+ args := &control.ExecArgs{\n+ Filename: app,\n+ Argv: []string{app, \"fork-bomb\"},\n+ }\n+ if _, err := containers[1].Execute(args); err != nil {\n+ t.Fatalf(\"error exec'ing: %v\", err)\n+ }\n+\n+ // Let it brew...\n+ time.Sleep(500 * time.Millisecond)\n+\nif err := containers[1].Destroy(); err != nil {\nt.Fatalf(\"error destroying container: %v\", err)\n}\n+ // Check that destroy killed all processes belonging to the container and\n+ // waited for them to exit before returning.\n+ pss, err := containers[0].Sandbox.Processes(\"\")\n+ if err != nil {\n+ t.Fatalf(\"error getting process data from sandbox: %v\", err)\n+ }\n+ expectedPL := []*control.Process{{PID: 1, Cmd: \"test_app\"}}\n+ if !procListsEqual(pss, expectedPL) {\n+ t.Errorf(\"container got process list: %s, want: %s\", procListToString(pss), procListToString(expectedPL))\n+ }\n+\n// Now the container dir should be gone.\n- if ws, err := containers[0].executeSync(args); err != nil {\n- t.Fatalf(\"error executing %+v: %v\", args, err)\n+ if ws, err := containers[0].executeSync(dirArgs); err != nil {\n+ t.Fatalf(\"error executing %+v: %v\", dirArgs, err)\n} else if ws.ExitStatus() == 0 {\nt.Errorf(\"exec 'test -f %q' got exit status 0, wanted non-zero\", contDir)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/test_app.go",
"new_path": "runsc/container/test_app.go",
"diff": "@@ -36,6 +36,8 @@ func main() {\nsubcommands.Register(subcommands.FlagsCommand(), \"\")\nsubcommands.Register(new(uds), \"\")\nsubcommands.Register(new(taskTree), \"\")\n+ subcommands.Register(new(forkBomb), \"\")\n+ subcommands.Register(new(reaper), \"\")\nflag.Parse()\n@@ -151,9 +153,7 @@ func (c *taskTree) Execute(ctx context.Context, f *flag.FlagSet, args ...interfa\nif c.depth == 0 {\nlog.Printf(\"Child sleeping, PID: %d\\n\", os.Getpid())\n- for {\n- time.Sleep(24 * time.Hour)\n- }\n+ select {}\n}\nlog.Printf(\"Parent %d sleeping, PID: %d\\n\", c.depth, os.Getpid())\n@@ -177,3 +177,67 @@ func (c *taskTree) Execute(ctx context.Context, f *flag.FlagSet, args ...interfa\n}\nreturn subcommands.ExitSuccess\n}\n+\n+type forkBomb struct {\n+ delay time.Duration\n+}\n+\n+// Name implements subcommands.Command.\n+func (*forkBomb) Name() string {\n+ return \"fork-bomb\"\n+}\n+\n+// Synopsis implements subcommands.Command.\n+func (*forkBomb) Synopsis() string {\n+ return \"creates child process until the end of times\"\n+}\n+\n+// Usage implements subcommands.Command.\n+func (*forkBomb) Usage() string {\n+ return \"fork-bomb <flags>\"\n+}\n+\n+// SetFlags implements subcommands.Command.\n+func (c *forkBomb) SetFlags(f *flag.FlagSet) {\n+ f.DurationVar(&c.delay, \"delay\", 100*time.Millisecond, \"amount of time to delay creation of child\")\n+}\n+\n+// Execute implements subcommands.Command.\n+func (c *forkBomb) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {\n+ time.Sleep(c.delay)\n+\n+ cmd := exec.Command(\"/proc/self/exe\", c.Name())\n+ cmd.Stdout = os.Stdout\n+ cmd.Stderr = os.Stderr\n+ if err := cmd.Run(); err != nil {\n+ log.Fatal(\"failed to call self:\", err)\n+ }\n+ return subcommands.ExitSuccess\n+}\n+\n+type reaper struct{}\n+\n+// Name implements subcommands.Command.\n+func (*reaper) Name() string {\n+ return \"reaper\"\n+}\n+\n+// Synopsis implements subcommands.Command.\n+func (*reaper) Synopsis() string {\n+ return \"reaps all children in a loop\"\n+}\n+\n+// Usage implements subcommands.Command.\n+func (*reaper) Usage() string {\n+ return \"reaper <flags>\"\n+}\n+\n+// SetFlags implements subcommands.Command.\n+func (*reaper) SetFlags(*flag.FlagSet) {}\n+\n+// Execute implements subcommands.Command.\n+func (c *reaper) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {\n+ stop := testutil.StartReaper()\n+ defer stop()\n+ select {}\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -572,7 +572,8 @@ func (s *Sandbox) destroy() error {\nreturn nil\n}\n-// Signal sends the signal to a container in the sandbox.\n+// Signal sends the signal to a container in the sandbox. If all is true and\n+// signal is SIGKILL, then waits for all processes to exit before returning.\nfunc (s *Sandbox) Signal(cid string, sig syscall.Signal, all bool) error {\nlog.Debugf(\"Signal sandbox %q\", s.ID)\nconn, err := s.sandboxConnect()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make runsc kill and delete more conformant to the "spec"
PiperOrigin-RevId: 214976251
Change-Id: I631348c3886f41f63d0e77e7c4f21b3ede2ab521 |
259,992 | 28.09.2018 15:51:36 | 25,200 | 49ff81a42b51a3fa2ee139e1e86179fa0c427a86 | Add ruby image tests | [
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/BUILD",
"new_path": "runsc/test/image/BUILD",
"diff": "@@ -11,6 +11,8 @@ go_test(\ndata = [\n\"latin10k.txt\",\n\"mysql.sql\",\n+ \"ruby.rb\",\n+ \"ruby.sh\",\n],\nembed = [\":image\"],\ntags = [\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/image_test.go",
"new_path": "runsc/test/image/image_test.go",
"diff": "@@ -30,6 +30,7 @@ import (\n\"io/ioutil\"\n\"net/http\"\n\"os\"\n+ \"path/filepath\"\n\"strings\"\n\"testing\"\n\"time\"\n@@ -256,6 +257,54 @@ func TestTomcat(t *testing.T) {\n}\n}\n+func TestRuby(t *testing.T) {\n+ if err := testutil.Pull(\"ruby\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\", err)\n+ }\n+ d := testutil.MakeDocker(\"ruby-test\")\n+\n+ dir, err := testutil.PrepareFiles(\"ruby.rb\", \"ruby.sh\")\n+ if err != nil {\n+ t.Fatalf(\"PrepareFiles() failed: %v\", err)\n+ }\n+ if err := os.Chmod(filepath.Join(dir, \"ruby.sh\"), 0333); err != nil {\n+ t.Fatalf(\"os.Chmod(%q, 0333) failed: %v\", dir, err)\n+ }\n+\n+ if _, err := d.Run(\"-p\", \"8080\", \"-v\", testutil.MountArg(dir, \"/src:ro\"), \"ruby\", \"/src/ruby.sh\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+ defer d.CleanUp()\n+\n+ // Find where port 8080 is mapped to.\n+ port, err := d.FindPort(8080)\n+ if err != nil {\n+ t.Fatalf(\"docker.FindPort(8080) failed: %v\", err)\n+ }\n+\n+ // Wait until it's up and running, 'gem install' can take some time.\n+ if err := testutil.WaitForHTTP(port, 30*time.Second); err != nil {\n+ t.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n+ }\n+\n+ // Ensure that content is being served.\n+ url := fmt.Sprintf(\"http://localhost:%d\", port)\n+ resp, err := http.Get(url)\n+ if err != nil {\n+ t.Errorf(\"error reaching http server: %v\", err)\n+ }\n+ if want := http.StatusOK; resp.StatusCode != want {\n+ t.Errorf(\"wrong response code, got: %d, want: %d\", resp.StatusCode, want)\n+ }\n+ body, err := ioutil.ReadAll(resp.Body)\n+ if err != nil {\n+ t.Fatalf(\"error reading body: %v\", err)\n+ }\n+ if got, want := string(body), \"Hello World\"; !strings.Contains(got, want) {\n+ t.Errorf(\"invalid body content, got: %q, want: %q\", got, want)\n+ }\n+}\n+\nfunc MainTest(m *testing.M) {\ntestutil.EnsureSupportedDockerVersion()\nos.Exit(m.Run())\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/image/ruby.rb",
"diff": "+# Copyright 2018 Google Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+require 'sinatra'\n+\n+set :bind, \"0.0.0.0\"\n+set :port, 8080\n+\n+get '/' do\n+ 'Hello World'\n+end\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/image/ruby.sh",
"diff": "+#!/bin/bash\n+\n+# Copyright 2018 Google Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+set -e\n+\n+gem install sinatra\n+ruby /src/ruby.rb\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/docker.go",
"new_path": "runsc/test/testutil/docker.go",
"diff": "@@ -162,6 +162,11 @@ func (d *Docker) Run(args ...string) (string, error) {\nreturn do(a...)\n}\n+// Logs calls 'docker logs'.\n+func (d *Docker) Logs() (string, error) {\n+ return do(\"logs\", d.Name)\n+}\n+\n// Exec calls 'docker exec' with the arguments provided.\nfunc (d *Docker) Exec(args ...string) (string, error) {\na := []string{\"exec\", d.Name}\n@@ -193,12 +198,14 @@ func (d *Docker) Remove() error {\nreturn nil\n}\n-// CleanUp kills and deletes the container.\n-func (d *Docker) CleanUp() error {\n+// CleanUp kills and deletes the container (best effort).\n+func (d *Docker) CleanUp() {\nif _, err := do(\"kill\", d.Name); err != nil {\n- return fmt.Errorf(\"error killing container %q: %v\", d.Name, err)\n+ log.Printf(\"error killing container %q: %v\", d.Name, err)\n+ }\n+ if err := d.Remove(); err != nil {\n+ log.Print(err)\n}\n- return d.Remove()\n}\n// FindPort returns the host port that is mapped to 'sandboxPort'. This calls\n@@ -223,7 +230,7 @@ func (d *Docker) WaitForOutput(pattern string, timeout time.Duration) (string, e\nvar out string\nfor exp := time.Now().Add(timeout); time.Now().Before(exp); {\nvar err error\n- out, err = do(\"logs\", d.Name)\n+ out, err = d.Logs()\nif err != nil {\nreturn \"\", err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add ruby image tests
PiperOrigin-RevId: 215009066
Change-Id: I54ab920fa649cf4d0817f7cb8ea76f9126523330 |
259,992 | 28.09.2018 17:47:22 | 25,200 | cfdd418fe23880cad88639596c1171cbe7ad6ffb | Made a few changes to make testutil.Docker easier to use | [
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/image_test.go",
"new_path": "runsc/test/image/image_test.go",
"diff": "@@ -40,7 +40,7 @@ import (\nfunc TestHelloWorld(t *testing.T) {\nd := testutil.MakeDocker(\"hello-test\")\n- if _, err := d.Run(\"hello-world\"); err != nil {\n+ if err := d.Run(\"hello-world\"); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n@@ -94,7 +94,8 @@ func TestHttpd(t *testing.T) {\n}\n// Start the container.\n- if _, err := d.Run(\"-p\", \"80\", \"-v\", testutil.MountArg(dir, \"/usr/local/apache2/htdocs:ro\"), \"httpd\"); err != nil {\n+ mountArg := testutil.MountArg(dir, \"/usr/local/apache2/htdocs\", testutil.ReadOnly)\n+ if err := d.Run(\"-p\", \"80\", mountArg, \"httpd\"); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n@@ -127,7 +128,8 @@ func TestNginx(t *testing.T) {\n}\n// Start the container.\n- if _, err := d.Run(\"-p\", \"80\", \"-v\", testutil.MountArg(dir, \"/usr/share/nginx/html:ro\"), \"nginx\"); err != nil {\n+ mountArg := testutil.MountArg(dir, \"/usr/share/nginx/html\", testutil.ReadOnly)\n+ if err := d.Run(\"-p\", \"80\", mountArg, \"nginx\"); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n@@ -155,7 +157,7 @@ func TestMysql(t *testing.T) {\nd := testutil.MakeDocker(\"mysql-test\")\n// Start the container.\n- if _, err := d.Run(\"-e\", \"MYSQL_ROOT_PASSWORD=foobar123\", \"mysql\"); err != nil {\n+ if err := d.Run(\"-e\", \"MYSQL_ROOT_PASSWORD=foobar123\", \"mysql\"); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n@@ -174,12 +176,12 @@ func TestMysql(t *testing.T) {\n// Tell mysql client to connect to the server and execute the file in verbose\n// mode to verify the output.\nargs := []string{\n- \"--link\", testutil.LinkArg(&d, \"mysql\"),\n- \"-v\", testutil.MountArg(dir, \"/sql\"),\n+ testutil.LinkArg(&d, \"mysql\"),\n+ testutil.MountArg(dir, \"/sql\", testutil.ReadWrite),\n\"mysql\",\n\"mysql\", \"-hmysql\", \"-uroot\", \"-pfoobar123\", \"-v\", \"-e\", \"source /sql/mysql.sql\",\n}\n- if _, err := client.Run(args...); err != nil {\n+ if err := client.Run(args...); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\ndefer client.CleanUp()\n@@ -198,7 +200,7 @@ func TestPythonHello(t *testing.T) {\nt.Fatalf(\"docker pull failed: %v\", err)\n}\nd := testutil.MakeDocker(\"python-hello-test\")\n- if _, err := d.Run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\n+ if err := d.Run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n@@ -230,7 +232,7 @@ func TestTomcat(t *testing.T) {\nt.Fatalf(\"docker pull failed: %v\", err)\n}\nd := testutil.MakeDocker(\"tomcat-test\")\n- if _, err := d.Run(\"-p\", \"8080\", \"tomcat:8.0\"); err != nil {\n+ if err := d.Run(\"-p\", \"8080\", \"tomcat:8.0\"); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/integration/exec_test.go",
"new_path": "runsc/test/integration/exec_test.go",
"diff": "@@ -40,7 +40,7 @@ func TestExecCapabilities(t *testing.T) {\nd := testutil.MakeDocker(\"exec-test\")\n// Start the container.\n- if _, err := d.Run(\"alpine\", \"sh\", \"-c\", \"cat /proc/self/status; sleep 100\"); err != nil {\n+ if err := d.Run(\"alpine\", \"sh\", \"-c\", \"cat /proc/self/status; sleep 100\"); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/integration/integration_test.go",
"new_path": "runsc/test/integration/integration_test.go",
"diff": "@@ -98,8 +98,8 @@ func TestPauseResume(t *testing.T) {\nt.Fatal(\"docker pull failed:\", err)\n}\nd := testutil.MakeDocker(\"pause-resume-test\")\n- if out, err := d.Run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\n- t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ if err := d.Run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n@@ -157,7 +157,7 @@ func TestConnectToSelf(t *testing.T) {\n// Creates server that replies \"server\" and exists. Sleeps at the end because\n// 'docker exec' gets killed if the init process exists before it can finish.\n- if _, err := d.Run(\"ubuntu:trusty\", \"/bin/sh\", \"-c\", \"echo server | nc -l -p 8080 && sleep 1\"); err != nil {\n+ if err := d.Run(\"ubuntu:trusty\", \"/bin/sh\", \"-c\", \"echo server | nc -l -p 8080 && sleep 1\"); err != nil {\nt.Fatal(\"docker run failed:\", err)\n}\ndefer d.CleanUp()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/docker.go",
"new_path": "runsc/test/testutil/docker.go",
"diff": "@@ -65,14 +65,35 @@ func EnsureSupportedDockerVersion() {\n}\n}\n+// MountMode describes if the mount should be ro or rw.\n+type MountMode int\n+\n+const (\n+ // ReadOnly is what the name says.\n+ ReadOnly MountMode = iota\n+ // ReadWrite is what the name says.\n+ ReadWrite\n+)\n+\n+// String returns the mount mode argument for this MountMode.\n+func (m MountMode) String() string {\n+ switch m {\n+ case ReadOnly:\n+ return \"ro\"\n+ case ReadWrite:\n+ return \"rw\"\n+ }\n+ panic(fmt.Sprintf(\"invalid mode: %d\", m))\n+}\n+\n// MountArg formats the volume argument to mount in the container.\n-func MountArg(source, target string) string {\n- return fmt.Sprintf(\"%s:%s\", source, target)\n+func MountArg(source, target string, mode MountMode) string {\n+ return fmt.Sprintf(\"-v=%s:%s:%v\", source, target, mode)\n}\n// LinkArg formats the link argument.\nfunc LinkArg(source *Docker, target string) string {\n- return fmt.Sprintf(\"%s:%s\", source.Name, target)\n+ return fmt.Sprintf(\"--link=%s:%s\", source.Name, target)\n}\n// PrepareFiles creates temp directory to copy files there. The sandbox doesn't\n@@ -155,11 +176,13 @@ func (d *Docker) Stop() error {\nreturn nil\n}\n-// Run calls 'docker run' with the arguments provided.\n-func (d *Docker) Run(args ...string) (string, error) {\n+// Run calls 'docker run' with the arguments provided. The container starts\n+// running in the backgroud and the call returns immediately.\n+func (d *Docker) Run(args ...string) error {\na := []string{\"run\", \"--runtime\", d.Runtime, \"--name\", d.Name, \"-d\"}\na = append(a, args...)\n- return do(a...)\n+ _, err := do(a...)\n+ return err\n}\n// Logs calls 'docker logs'.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Made a few changes to make testutil.Docker easier to use
PiperOrigin-RevId: 215023376
Change-Id: I139569bd15c013e5dd0f60d0c98a64eaa0ba9e8e |
259,992 | 28.09.2018 18:14:59 | 25,200 | 50c283b9f56bb7200938d9e207355f05f79f0d17 | Add test for 'signall --all' with stopped container | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/multi_container_test.go",
"new_path": "runsc/container/multi_container_test.go",
"diff": "@@ -527,6 +527,12 @@ func TestMultiContainerProcesses(t *testing.T) {\n// TestMultiContainerKillAll checks that all process that belong to a container\n// are killed when SIGKILL is sent to *all* processes in that container.\nfunc TestMultiContainerKillAll(t *testing.T) {\n+ for _, tc := range []struct {\n+ killContainer bool\n+ }{\n+ {killContainer: true},\n+ {killContainer: false},\n+ } {\napp, err := testutil.FindFile(\"runsc/container/test_app\")\nif err != nil {\nt.Fatal(\"error finding test_app:\", err)\n@@ -566,8 +572,33 @@ func TestMultiContainerKillAll(t *testing.T) {\nt.Fatal(err)\n}\n+ if tc.killContainer {\n+ // First kill the init process to make the container be stopped with\n+ // processes still running inside.\n+ containers[1].Signal(syscall.SIGKILL, false)\n+ op := func() error {\n+ c, err := Load(conf.RootDir, ids[1])\n+ if err != nil {\n+ return err\n+ }\n+ if c.Status != Stopped {\n+ return fmt.Errorf(\"container is not stopped\")\n+ }\n+ return nil\n+ }\n+ if err := testutil.Poll(op, 5*time.Second); err != nil {\n+ t.Fatalf(\"container did not stop %q: %v\", containers[1].ID, err)\n+ }\n+ }\n+\n+ c, err := Load(conf.RootDir, ids[1])\n+ if err != nil {\n+ t.Fatalf(\"failed to load child container %q: %v\", c.ID, err)\n+ }\n// Kill'Em All\n- containers[1].Signal(syscall.SIGKILL, true)\n+ if err := c.Signal(syscall.SIGKILL, true); err != nil {\n+ t.Fatalf(\"failed to send SIGKILL to container %q: %v\", c.ID, err)\n+ }\n// Check that all processes are gone.\nif err := waitForProcessCount(containers[1], 0); err != nil {\n@@ -578,3 +609,4 @@ func TestMultiContainerKillAll(t *testing.T) {\nt.Fatal(err)\n}\n}\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add test for 'signall --all' with stopped container
PiperOrigin-RevId: 215025517
Change-Id: I04b9d8022b3d9dfe279e466ddb91310b9860b9af |
259,992 | 30.09.2018 22:21:34 | 25,200 | 9c7eb13079e65100b69b41536a51d2433b05637b | Removed duplicate/stale TODOs | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/kill.go",
"new_path": "runsc/cmd/kill.go",
"diff": "@@ -80,8 +80,6 @@ func (k *Kill) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nif err != nil {\nFatalf(\"%v\", err)\n}\n- // TODO: Distinguish between already-exited containers and\n- // genuine errors.\nif err := c.Signal(sig, k.all); err != nil {\nFatalf(\"%v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -483,7 +483,6 @@ func (c *Container) Signal(sig syscall.Signal, all bool) error {\nif !c.isSandboxRunning() {\nreturn fmt.Errorf(\"container is not running\")\n}\n- // TODO: Query the container for its state, then save it.\nreturn c.Sandbox.Signal(c.ID, sig, all)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Removed duplicate/stale TODOs
PiperOrigin-RevId: 215162121
Change-Id: I35f06ac3235cf31c9e8a158dcf6261a7ded6c4c4 |
259,992 | 30.09.2018 23:22:13 | 25,200 | 43e6aff50e23763d12c71b054f100fd91da46736 | Don't fail if Root is readonly and is not a mount point
This makes runsc more friendly to run without docker or K8s. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -1556,6 +1556,27 @@ func TestGoferExits(t *testing.T) {\n}\n}\n+func TestRootNotMount(t *testing.T) {\n+ spec := testutil.NewSpecWithArgs(\"/bin/true\")\n+\n+ root, err := ioutil.TempDir(testutil.TmpDir(), \"root\")\n+ if err != nil {\n+ t.Fatalf(\"failure to create tmp dir: %v\", err)\n+ }\n+ spec.Root.Path = root\n+ spec.Root.Readonly = true\n+ spec.Mounts = []specs.Mount{\n+ {Destination: \"/bin\", Source: \"/bin\", Type: \"bind\", Options: []string{\"ro\"}},\n+ {Destination: \"/lib\", Source: \"/lib\", Type: \"bind\", Options: []string{\"ro\"}},\n+ {Destination: \"/lib64\", Source: \"/lib64\", Type: \"bind\", Options: []string{\"ro\"}},\n+ }\n+\n+ conf := testutil.TestConfig()\n+ if err := run(spec, conf); err != nil {\n+ t.Fatalf(\"error running sandbox: %v\", err)\n+ }\n+}\n+\n// executeSync synchronously executes a new process.\nfunc (cont *Container) executeSync(args *control.ExecArgs) (syscall.WaitStatus, error) {\npid, err := cont.Execute(args)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/fs.go",
"new_path": "runsc/container/fs.go",
"diff": "package container\nimport (\n+ \"bufio\"\n\"fmt\"\n\"os\"\n\"path/filepath\"\n@@ -100,18 +101,72 @@ func setupFS(spec *specs.Spec, conf *boot.Config, bundleDir string) error {\n}\n}\n- // Remount root as readonly after setup is done, if requested.\n+ // If root is read only, check if it needs to be remounted as readonly.\nif spec.Root.Readonly {\n+ isMountPoint, readonly, err := mountInfo(spec.Root.Path)\n+ if err != nil {\n+ return err\n+ }\n+ if readonly {\n+ return nil\n+ }\n+ if !isMountPoint {\n+ // Readonly root is not a mount point nor read-only. Can't do much other\n+ // than just logging a warning. The gofer will prevent files to be open\n+ // in write mode.\n+ log.Warningf(\"Mount where root is located is not read-only and cannot be changed: %q\", spec.Root.Path)\n+ return nil\n+ }\n+\n+ // If root is a mount point but not read-only, we can change mount options\n+ // to make it read-only for extra safety.\nlog.Infof(\"Remounting root as readonly: %q\", spec.Root.Path)\nflags := uintptr(syscall.MS_BIND | syscall.MS_REMOUNT | syscall.MS_RDONLY | syscall.MS_REC)\nsrc := spec.Root.Path\nif err := syscall.Mount(src, src, \"bind\", flags, \"\"); err != nil {\n- return fmt.Errorf(\"failed to remount root as readonly with source: %q, target: %q, flags: %#x, err: %v\", spec.Root.Path, spec.Root.Path, flags, err)\n+ return fmt.Errorf(\"failed to remount root as read-only with source: %q, target: %q, flags: %#x, err: %v\", spec.Root.Path, spec.Root.Path, flags, err)\n}\n}\nreturn nil\n}\n+// mountInfo returns whether the path is a mount point and whether the mount\n+// that path belongs to is read-only.\n+func mountInfo(path string) (bool, bool, error) {\n+ // Mounts are listed by their real paths.\n+ realPath, err := filepath.EvalSymlinks(path)\n+ if err != nil {\n+ return false, false, err\n+ }\n+ f, err := os.Open(\"/proc/mounts\")\n+ if err != nil {\n+ return false, false, err\n+ }\n+ scanner := bufio.NewScanner(f)\n+\n+ var mountPoint string\n+ var readonly bool\n+ for scanner.Scan() {\n+ line := scanner.Text()\n+ parts := strings.Split(line, \" \")\n+ if len(parts) < 4 {\n+ return false, false, fmt.Errorf(\"invalid /proc/mounts line format %q\", line)\n+ }\n+ mp := parts[1]\n+ opts := strings.Split(parts[3], \",\")\n+\n+ // Find the closest submount to the path.\n+ if strings.Contains(realPath, mp) && len(mp) > len(mountPoint) {\n+ mountPoint = mp\n+ readonly = specutils.ContainsStr(opts, \"ro\")\n+ }\n+ }\n+ if err := scanner.Err(); err != nil {\n+ return false, false, err\n+ }\n+ return mountPoint == realPath, readonly, nil\n+}\n+\n// destroyFS unmounts mounts done by runsc under `spec.Root.Path`. This\n// recovers the container rootfs into the original state.\nfunc destroyFS(spec *specs.Spec) error {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Don't fail if Root is readonly and is not a mount point
This makes runsc more friendly to run without docker or K8s.
PiperOrigin-RevId: 215165586
Change-Id: Id45a9fc24a3c09b1645f60dbaf70e64711a7a4cd |
259,992 | 01.10.2018 10:29:45 | 25,200 | a2ad8fef136b31989bfcd2f40003f6113aebaf1d | Make multi-container the default mode for runsc
And remove multicontainer option. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/config.go",
"new_path": "runsc/boot/config.go",
"diff": "@@ -193,10 +193,6 @@ type Config struct {\n// disabled. Pardon the double negation, but default to enabled is important.\nDisableSeccomp bool\n- // MultiContainer enables multiple containers support inside one sandbox.\n- // TODO: Remove this when multiple container is fully supported.\n- MultiContainer bool\n-\n// SpecFile is the file containing the OCI spec.\nSpecFile string\n@@ -224,7 +220,6 @@ func (c *Config) ToFlags() []string {\n\"--debug-log-dir=\" + c.DebugLogDir,\n\"--file-access=\" + c.FileAccess.String(),\n\"--overlay=\" + strconv.FormatBool(c.Overlay),\n- \"--multi-container=\" + strconv.FormatBool(c.MultiContainer),\n\"--network=\" + c.Network.String(),\n\"--log-packets=\" + strconv.FormatBool(c.LogPackets),\n\"--platform=\" + c.Platform.String(),\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -85,14 +85,14 @@ func (f *fdDispenser) empty() bool {\n// and all mounts. 'rootCtx' is used to walk directories to find mount points.\nfunc createMountNamespace(userCtx context.Context, rootCtx context.Context, spec *specs.Spec, conf *Config, goferFDs []int) (*fs.MountNamespace, error) {\nmounts := compileMounts(spec)\n- if conf.MultiContainer {\n+\n// Create a tmpfs mount where we create and mount a root filesystem for\n// each child container.\nmounts = append(mounts, specs.Mount{\nType: tmpfs,\nDestination: ChildContainersDir,\n})\n- }\n+\nfds := &fdDispenser{fds: goferFDs}\nrootInode, err := createRootMount(rootCtx, spec, conf, fds, mounts)\nif err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -579,8 +579,6 @@ func (l *Loader) executeAsync(args *control.ExecArgs) (kernel.ThreadID, error) {\nreturn tgid, nil\n}\n-// TODO: Per-container namespaces must be supported for -pid.\n-\n// waitContainer waits for the root process of a container to exit.\nfunc (l *Loader) waitContainer(cid string, waitStatus *uint32) error {\n// Don't defer unlock, as doing so would make it impossible for\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -267,7 +267,7 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\n// started in an existing sandbox, we must do so. The metadata will\n// indicate the ID of the sandbox, which is the same as the ID of the\n// init container in the sandbox.\n- if specutils.ShouldCreateSandbox(spec) || !conf.MultiContainer {\n+ if specutils.ShouldCreateSandbox(spec) {\nlog.Debugf(\"Creating new sandbox for container %q\", id)\nioFiles, err := c.createGoferProcess(spec, conf, bundleDir)\nif err != nil {\n@@ -345,7 +345,7 @@ func (c *Container) Start(conf *boot.Config) error {\n}\n}\n- if specutils.ShouldCreateSandbox(c.Spec) || !conf.MultiContainer {\n+ if specutils.ShouldCreateSandbox(c.Spec) {\nif err := c.Sandbox.StartRoot(c.Spec, conf); err != nil {\nreturn err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/main.go",
"new_path": "runsc/main.go",
"diff": "@@ -60,7 +60,6 @@ var (\nnetwork = flag.String(\"network\", \"sandbox\", \"specifies which network to use: sandbox (default), host, none. Using network inside the sandbox is more secure because it's isolated from the host network.\")\nfileAccess = flag.String(\"file-access\", \"exclusive\", \"specifies which filesystem to use for the root mount: exclusive (default), shared. Volume mounts are always shared.\")\noverlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable overlay. All modifications are stored in memory inside the sandbox.\")\n- multiContainer = flag.Bool(\"multi-container\", false, \"enable *experimental* multi-container support.\")\nwatchdogAction = flag.String(\"watchdog-action\", \"log\", \"sets what action the watchdog takes when triggered: log (default), panic.\")\npanicSignal = flag.Int(\"panic-signal\", -1, \"register signal handling that panics. Usually set to SIGUSR2(12) to troubleshoot hangs. -1 disables it.\")\n)\n@@ -140,7 +139,6 @@ func main() {\nPlatform: platformType,\nStrace: *strace,\nStraceLogSize: *straceLogSize,\n- MultiContainer: *multiContainer,\nWatchdogAction: wa,\nPanicSignal: *panicSignal,\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/network.go",
"new_path": "runsc/sandbox/network.go",
"diff": "@@ -57,35 +57,6 @@ const (\nfunc setupNetwork(conn *urpc.Client, pid int, spec *specs.Spec, conf *boot.Config) error {\nlog.Infof(\"Setting up network\")\n- if !conf.MultiContainer {\n- // HACK!\n- //\n- // When kubernetes starts a pod, it first creates a sandbox with an\n- // application that just pauses forever. Later, when a container is\n- // added to the pod, kubernetes will create another sandbox with a\n- // config that corresponds to the containerized application, and add it\n- // to the same namespaces as the pause sandbox.\n- //\n- // Running a second sandbox currently breaks because the two sandboxes\n- // have the same network namespace and configuration, and try to create\n- // a tap device on the same host device which fails.\n- //\n- // Runsc will eventually need to detect that this container is meant to\n- // be run in the same sandbox as the pausing application, and somehow\n- // make that happen.\n- //\n- // For now the following HACK disables networking for the \"pause\"\n- // sandbox, allowing the second sandbox to start up successfully.\n- //\n- // TODO: Remove this once multiple containers per sandbox\n- // is properly supported.\n- if spec.Annotations[crioContainerTypeAnnotation] == \"sandbox\" ||\n- spec.Annotations[containerdContainerTypeAnnotation] == \"sandbox\" {\n- log.Warningf(\"HACK: Disabling network\")\n- conf.Network = boot.NetworkNone\n- }\n- }\n-\nswitch conf.Network {\ncase boot.NetworkNone:\nlog.Infof(\"Network is disabled, create loopback interface only\")\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -109,7 +109,6 @@ func TestConfig() *boot.Config {\nLogPackets: true,\nNetwork: boot.NetworkNone,\nStrace: true,\n- MultiContainer: true,\nFileAccess: boot.FileAccessExclusive,\nTestOnlyAllowRunAsCurrentUserWithoutChroot: true,\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make multi-container the default mode for runsc
And remove multicontainer option.
PiperOrigin-RevId: 215236981
Change-Id: I9fd1d963d987e421e63d5817f91a25c819ced6cb |
259,881 | 01.10.2018 14:15:52 | 25,200 | 0400e5459288592768af12ab71609c6df6afe3d7 | Add itimer types to linux package, strace | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/BUILD",
"new_path": "pkg/abi/linux/BUILD",
"diff": "@@ -44,6 +44,7 @@ go_library(\n\"signal.go\",\n\"socket.go\",\n\"time.go\",\n+ \"timer.go\",\n\"tty.go\",\n\"uio.go\",\n\"utsname.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/abi/linux/timer.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+// itimer types for getitimer(2) and setitimer(2), from\n+// include/uapi/linux/time.h.\n+const (\n+ ITIMER_REAL = 0\n+ ITIMER_VIRTUAL = 1\n+ ITIMER_PROF = 2\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/linux64.go",
"new_path": "pkg/sentry/strace/linux64.go",
"diff": "@@ -53,9 +53,9 @@ var linuxAMD64 = SyscallMap{\n33: makeSyscallInfo(\"dup2\", Hex, Hex),\n34: makeSyscallInfo(\"pause\"),\n35: makeSyscallInfo(\"nanosleep\", Timespec, PostTimespec),\n- 36: makeSyscallInfo(\"getitimer\", Hex, PostItimerVal),\n+ 36: makeSyscallInfo(\"getitimer\", ItimerType, PostItimerVal),\n37: makeSyscallInfo(\"alarm\", Hex),\n- 38: makeSyscallInfo(\"setitimer\", Hex, ItimerVal, PostItimerVal),\n+ 38: makeSyscallInfo(\"setitimer\", ItimerType, ItimerVal, PostItimerVal),\n39: makeSyscallInfo(\"getpid\"),\n40: makeSyscallInfo(\"sendfile\", Hex, Hex, Hex, Hex),\n41: makeSyscallInfo(\"socket\", SockFamily, SockType, SockProtocol),\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/strace.go",
"new_path": "pkg/sentry/strace/strace.go",
"diff": "@@ -24,6 +24,7 @@ import (\n\"syscall\"\n\"time\"\n+ \"gvisor.googlesource.com/gvisor/pkg/abi\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/bits\"\n\"gvisor.googlesource.com/gvisor/pkg/eventchannel\"\n@@ -46,6 +47,22 @@ var LogMaximumSize uint = DefaultLogMaximumSize\n// do anything useful with binary text dump of byte array arguments.\nvar EventMaximumSize uint\n+// ItimerTypes are the possible itimer types.\n+var ItimerTypes = abi.ValueSet{\n+ {\n+ Value: linux.ITIMER_REAL,\n+ Name: \"ITIMER_REAL\",\n+ },\n+ {\n+ Value: linux.ITIMER_VIRTUAL,\n+ Name: \"ITIMER_VIRTUAL\",\n+ },\n+ {\n+ Value: linux.ITIMER_PROF,\n+ Name: \"ITIMER_PROF\",\n+ },\n+}\n+\nfunc iovecs(t *kernel.Task, addr usermem.Addr, iovcnt int, printContent bool, maxBytes uint64) string {\nif iovcnt < 0 || iovcnt > linux.UIO_MAXIOV {\nreturn fmt.Sprintf(\"%#x (error decoding iovecs: invalid iovcnt)\", addr)\n@@ -322,6 +339,8 @@ func (i *SyscallInfo) pre(t *kernel.Task, args arch.SyscallArguments, maximumBlo\noutput = append(output, futex(uint64(args[arg].Uint())))\ncase PtraceRequest:\noutput = append(output, PtraceRequestSet.Parse(args[arg].Uint64()))\n+ case ItimerType:\n+ output = append(output, ItimerTypes.Parse(uint64(args[arg].Int())))\ncase Oct:\noutput = append(output, \"0o\"+strconv.FormatUint(args[arg].Uint64(), 8))\ncase Hex:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/syscalls.go",
"new_path": "pkg/sentry/strace/syscalls.go",
"diff": "@@ -150,6 +150,9 @@ const (\n// Utimbuf is a pointer to a struct utimbuf.\nUtimbuf\n+ // Rusage is a struct rusage, formatted after syscall execution.\n+ Rusage\n+\n// CloneFlags are clone(2) flags.\nCloneFlags\n@@ -165,8 +168,8 @@ const (\n// PtraceRequest is the ptrace(2) request.\nPtraceRequest\n- // Rusage is a struct rusage, formatted after syscall execution.\n- Rusage\n+ // ItimerType is an itimer type (ITIMER_REAL, etc).\n+ ItimerType\n)\n// defaultFormat is the syscall argument format to use if the actual format is\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_timer.go",
"new_path": "pkg/sentry/syscalls/linux/sys_timer.go",
"diff": "@@ -25,19 +25,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n)\n-// ItimerType denotes the type of interval timer.\n-type ItimerType int\n-\n-// Interval timer types from <sys/time.h>.\n-const (\n- // ItimerReal equals to ITIMER_REAL.\n- ItimerReal ItimerType = iota\n- // ItimerVirtual equals to ITIMER_VIRTUAL.\n- ItimerVirtual\n- // ItimerProf equals to ITIMER_PROF.\n- ItimerProf\n-)\n-\nconst nsecPerSec = int64(time.Second)\n// copyItimerValIn copies an ItimerVal from the untrusted app range to the\n@@ -83,13 +70,13 @@ func copyItimerValOut(t *kernel.Task, addr usermem.Addr, itv *linux.ItimerVal) e\n}\n}\n-func findTimer(t *kernel.Task, w ItimerType) (*ktime.Timer, error) {\n- switch w {\n- case ItimerReal:\n+func findTimer(t *kernel.Task, which int32) (*ktime.Timer, error) {\n+ switch which {\n+ case linux.ITIMER_REAL:\nreturn t.ThreadGroup().Timer().RealTimer, nil\n- case ItimerVirtual:\n+ case linux.ITIMER_VIRTUAL:\nreturn t.ThreadGroup().Timer().VirtualTimer, nil\n- case ItimerProf:\n+ case linux.ITIMER_PROF:\nreturn t.ThreadGroup().Timer().ProfTimer, nil\ndefault:\nreturn nil, syscall.EINVAL\n@@ -98,7 +85,7 @@ func findTimer(t *kernel.Task, w ItimerType) (*ktime.Timer, error) {\n// Getitimer implements linux syscall getitimer(2).\nfunc Getitimer(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n- timerID := ItimerType(args[0].Int())\n+ timerID := args[0].Int()\nval := args[1].Pointer()\ntimer, err := findTimer(t, timerID)\n@@ -116,7 +103,7 @@ func Getitimer(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys\n// Setitimer implements linux syscall setitimer(2).\nfunc Setitimer(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n- timerID := ItimerType(args[0].Int())\n+ timerID := args[0].Int()\nnewVal := args[1].Pointer()\noldVal := args[2].Pointer()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add itimer types to linux package, strace
PiperOrigin-RevId: 215278262
Change-Id: Icd10384c99802be6097be938196044386441e282 |
259,992 | 03.10.2018 09:31:53 | 25,200 | 77e43adeab4abcd301d76222e0304f551fbcf0cc | Add TIOCINQ to allowed seccomp when hostinet is used | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/config.go",
"new_path": "runsc/boot/filter/config.go",
"diff": "@@ -335,6 +335,10 @@ func hostInetFilters() seccomp.SyscallRules {\nseccomp.AllowAny{},\nseccomp.AllowValue(syscall.TIOCOUTQ),\n},\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(syscall.TIOCINQ),\n+ },\n},\nsyscall.SYS_LISTEN: {},\nsyscall.SYS_READV: {},\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add TIOCINQ to allowed seccomp when hostinet is used
PiperOrigin-RevId: 215574070
Change-Id: Ib36e804adebaf756adb9cbc2752be9789691530b |
259,885 | 03.10.2018 13:50:58 | 25,200 | 8e729e0e1fd22147d2a609f9bae13aa4d96f02fd | Add //pkg/sync:generic_atomicptr. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sync/BUILD",
"new_path": "pkg/sync/BUILD",
"diff": "@@ -7,6 +7,14 @@ package(\nload(\"//tools/go_generics:defs.bzl\", \"go_template\")\n+go_template(\n+ name = \"generic_atomicptr\",\n+ srcs = [\"atomicptr_unsafe.go\"],\n+ types = [\n+ \"Value\",\n+ ],\n+)\n+\ngo_template(\nname = \"generic_seqatomic\",\nsrcs = [\"seqatomic_unsafe.go\"],\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sync/atomicptr_unsafe.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package template doesn't exist. This file must be instantiated using the\n+// go_template_instance rule in tools/go_generics/defs.bzl.\n+package template\n+\n+import (\n+ \"sync/atomic\"\n+ \"unsafe\"\n+)\n+\n+// Value is a required type parameter.\n+type Value struct{}\n+\n+// An AtomicPtr is a pointer to a value of type Value that can be atomically\n+// loaded and stored. The zero value of an AtomicPtr represents nil.\n+//\n+// Note that copying AtomicPtr by value performs a non-atomic read of the\n+// stored pointer, which is unsafe if Store() can be called concurrently; in\n+// this case, do `dst.Store(src.Load())` instead.\n+type AtomicPtr struct {\n+ ptr unsafe.Pointer\n+}\n+\n+// Load returns the value set by the most recent Store. It returns nil if there\n+// has been no previous call to Store.\n+func (p *AtomicPtr) Load() *Value {\n+ return (*Value)(atomic.LoadPointer(&p.ptr))\n+}\n+\n+// Store sets the value returned by Load to x.\n+func (p *AtomicPtr) Store(x *Value) {\n+ atomic.StorePointer(&p.ptr, (unsafe.Pointer)(x))\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sync/atomicptrtest/BUILD",
"diff": "+package(licenses = [\"notice\"]) # Apache 2.0\n+\n+load(\"//tools/go_stateify:defs.bzl\", \"go_library\", \"go_test\")\n+load(\"//tools/go_generics:defs.bzl\", \"go_template_instance\")\n+\n+go_template_instance(\n+ name = \"atomicptr_int\",\n+ out = \"atomicptr_int.go\",\n+ package = \"atomicptr\",\n+ suffix = \"Int\",\n+ template = \"//pkg/sync:generic_atomicptr\",\n+ types = {\n+ \"Value\": \"int\",\n+ },\n+)\n+\n+go_library(\n+ name = \"atomicptr\",\n+ srcs = [\"atomicptr_int.go\"],\n+ importpath = \"gvisor.googlesource.com/gvisor/pkg/sync/atomicptr\",\n+)\n+\n+go_test(\n+ name = \"atomicptr_test\",\n+ size = \"small\",\n+ srcs = [\"atomicptr_test.go\"],\n+ embed = [\":atomicptr\"],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sync/atomicptrtest/atomicptr_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package atomicptr\n+\n+import (\n+ \"testing\"\n+)\n+\n+func newInt(val int) *int {\n+ return &val\n+}\n+\n+func TestAtomicPtr(t *testing.T) {\n+ var p AtomicPtrInt\n+ if got := p.Load(); got != nil {\n+ t.Errorf(\"initial value is %p (%v), wanted nil\", got, got)\n+ }\n+ want := newInt(42)\n+ p.Store(want)\n+ if got := p.Load(); got != want {\n+ t.Errorf(\"wrong value: got %p (%v), wanted %p (%v)\", got, got, want, want)\n+ }\n+ want = newInt(100)\n+ p.Store(want)\n+ if got := p.Load(); got != want {\n+ t.Errorf(\"wrong value: got %p (%v), wanted %p (%v)\", got, got, want, want)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add //pkg/sync:generic_atomicptr.
PiperOrigin-RevId: 215620949
Change-Id: I519da4b44386d950443e5784fb8c48ff9a36c5d3 |
259,854 | 03.10.2018 17:02:05 | 25,200 | 4fef31f96c289d5e58c3c2997ee38fcb22c0378f | Add S/R support for FIOASYNC | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/file.go",
"new_path": "pkg/sentry/fs/file.go",
"diff": "@@ -85,6 +85,9 @@ type File struct {\n// async handles O_ASYNC notifications.\nasync FileAsync\n+ // saving indicates that this file is in the process of being saved.\n+ saving bool `state:\"nosave\"`\n+\n// mu is dual-purpose: first, to make read(2) and write(2) thread-safe\n// in conformity with POSIX, and second, to cancel operations before they\n// begin in response to interruptions (i.e. signals).\n@@ -127,10 +130,15 @@ func (f *File) DecRef() {\n// Release a reference on the Dirent.\nf.Dirent.DecRef()\n+ // Only unregister if we are currently registered. There is nothing\n+ // to register if f.async is nil (this happens when async mode is\n+ // enabled without setting an owner). Also, we unregister during\n+ // save.\nf.flagsMu.Lock()\n- if f.flags.Async && f.async != nil {\n+ if !f.saving && f.flags.Async && f.async != nil {\nf.async.Unregister(f)\n}\n+ f.async = nil\nf.flagsMu.Unlock()\n})\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/file_state.go",
"new_path": "pkg/sentry/fs/file_state.go",
"diff": "package fs\n+// beforeSave is invoked by stateify.\n+func (f *File) beforeSave() {\n+ f.saving = true\n+ if f.flags.Async && f.async != nil {\n+ f.async.Unregister(f)\n+ }\n+}\n+\n// afterLoad is invoked by stateify.\nfunc (f *File) afterLoad() {\nf.mu.Init()\n+ if f.flags.Async && f.async != nil {\n+ f.async.Register(f)\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/fasync/fasync.go",
"new_path": "pkg/sentry/kernel/fasync/fasync.go",
"diff": "@@ -32,8 +32,10 @@ func New() fs.FileAsync {\n}\n// FileAsync sends signals when the registered file is ready for IO.\n+//\n+// +stateify savable\ntype FileAsync struct {\n- mu sync.Mutex\n+ mu sync.Mutex `state:\"nosave\"`\ne waiter.Entry\nrequester *auth.Credentials\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/waiter/waiter.go",
"new_path": "pkg/waiter/waiter.go",
"diff": "@@ -113,6 +113,8 @@ type EntryCallback interface {\n// Entry represents a waiter that can be add to the a wait queue. It can\n// only be in one queue at a time, and is added \"intrusively\" to the queue with\n// no extra memory allocations.\n+//\n+// +stateify savable\ntype Entry struct {\n// Context stores any state the waiter may wish to store in the entry\n// itself, which may be used at wake up time.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add S/R support for FIOASYNC
PiperOrigin-RevId: 215655197
Change-Id: I668b1bc7c29daaf2999f8f759138bcbb09c4de6f |
259,854 | 03.10.2018 20:21:25 | 25,200 | beac59b37a8b0ea834904870e5c236d2627947a2 | Fix panic if FIOASYNC callback is registered and triggered without target | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/fasync/fasync.go",
"new_path": "pkg/sentry/kernel/fasync/fasync.go",
"diff": "@@ -60,6 +60,11 @@ func (a *FileAsync) Callback(e *waiter.Entry) {\nif tg != nil {\nt = tg.Leader()\n}\n+ if t == nil {\n+ // No recipient has been registered.\n+ a.mu.Unlock()\n+ return\n+ }\nc := t.Credentials()\n// Logic from sigio_perm in fs/fcntl.c.\nif a.requester.EffectiveKUID == 0 ||\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix panic if FIOASYNC callback is registered and triggered without target
PiperOrigin-RevId: 215674589
Change-Id: I4f8871b64c570dc6da448d2fe351cec8a406efeb |
259,992 | 03.10.2018 20:43:18 | 25,200 | 3f46f2e5017106d1569f759b8d19aee6e9827c58 | Fix sandbox chroot
Sandbox was setting chroot, but was not chaging the working
dir. Added test to ensure this doesn't happen in the future. | [
{
"change_type": "MODIFY",
"old_path": "kokoro/run_tests.sh",
"new_path": "kokoro/run_tests.sh",
"diff": "@@ -35,6 +35,11 @@ bazel build //...\nruntime=runsc_test_$((RANDOM))\nsudo -n ./runsc/test/install.sh --runtime ${runtime}\n+# Best effort to uninstall the runtime\n+uninstallRuntime() {\n+ sudo -n ./runsc/test/install.sh -u --runtime ${runtime}\n+}\n+\n# Run the tests and upload results.\n#\n# We turn off \"-e\" flag because we must move the log files even if the test\n@@ -43,6 +48,7 @@ set +e\nbazel test --test_output=errors //...\nexit_code=${?}\n+# Execute local tests that require docker.\nif [[ ${exit_code} -eq 0 ]]; then\n# These names are used to exclude tests not supported in certain\n# configuration, e.g. save/restore not supported with hostnet.\n@@ -59,8 +65,21 @@ if [[ ${exit_code} -eq 0 ]]; then\ndone\nfi\n-# Best effort to uninstall\n-sudo -n ./runsc/test/install.sh -u --runtime ${runtime}\n+# Execute local tests that require superuser.\n+if [[ ${exit_code} -eq 0 ]]; then\n+ bazel build //runsc/test/root:root_test\n+ root_test=$(find -L ./bazel-bin/ -executable -type f -name root_test | grep __main__)\n+ if [[ ! -f \"${root_test}\" ]]; then\n+ uninstallRuntime\n+ echo \"root_test executable not found\"\n+ exit 1\n+ fi\n+ sudo -n -E RUNSC_RUNTIME=${runtime} ${root_test}\n+ exit_code=${?}\n+fi\n+\n+uninstallRuntime\n+\nset -e\n# Find and rename all test xml and log files so that Sponge can pick them up.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/chroot.go",
"new_path": "runsc/sandbox/chroot.go",
"diff": "@@ -55,7 +55,7 @@ func setUpChroot() (string, error) {\nlog.Infof(\"Setting up sandbox chroot in %q\", chroot)\n// Mount /proc.\n- if err := mountInChroot(chroot, \"proc\", \"/proc\", \"proc\", 0); err != nil {\n+ if err := mountInChroot(chroot, \"proc\", \"/proc\", \"proc\", syscall.MS_NOSUID|syscall.MS_NODEV|syscall.MS_NOEXEC); err != nil {\nreturn \"\", fmt.Errorf(\"error mounting proc in chroot: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -475,6 +475,7 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\n}\ns.Chroot = chroot // Remember path so it can cleaned up.\ncmd.SysProcAttr.Chroot = chroot\n+ cmd.Dir = \"/\"\ncmd.Args[0] = \"/runsc\"\ncmd.Path = \"/runsc\"\n} else {\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/README.md",
"diff": "+# Tests\n+\n+The tests defined under this path are verifying functionality beyond what unit\n+tests can cover, e.g. integration and end to end tests. Due to their nature,\n+they may need extra setup in the test machine and extra configuration to run.\n+\n+- **integration:** defines integration tests that uses `docker run` to test\n+ functionality.\n+- **image:** basic end to end test for popular images.\n+- **root:** tests that require to be run as root.\n+- **testutil:** utilities library to support the tests.\n+\n+The following setup steps are required in order to run these tests:\n+\n+\n+ `./runsc/test/install.sh [--runtime <name>]`\n+\n+The tests expect the runtime name to be provided in the `RUNSC_RUNTIME`\n+environment variable (default: `runsc-test`). To run the tests execute:\n+\n+\n+```\n+bazel test --test_env=RUNSC_RUNTIME=runsc-test \\\n+ //runsc/test/image:image_test \\\n+ //runsc/test/integration:integration_test\n+```\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/image_test.go",
"new_path": "runsc/test/image/image_test.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-// Package image provides end-to-end image tests for runsc. These tests require\n-// docker and runsc to be installed on the machine. To set it up, run:\n-//\n-// ./runsc/test/install.sh [--runtime <name>]\n-//\n-// The tests expect the runtime name to be provided in the RUNSC_RUNTIME\n-// environment variable (default: runsc-test).\n-//\n+// Package image provides end-to-end image tests for runsc.\n+\n// Each test calls docker commands to start up a container, and tests that it is\n// behaving properly, like connecting to a port or looking at the output. The\n// container is killed and deleted at the end.\n+//\n+// Setup instruction in runsc/test/README.md.\npackage image\nimport (\n@@ -307,7 +303,7 @@ func TestRuby(t *testing.T) {\n}\n}\n-func MainTest(m *testing.M) {\n+func TestMain(m *testing.M) {\ntestutil.EnsureSupportedDockerVersion()\nos.Exit(m.Run())\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/integration/integration_test.go",
"new_path": "runsc/test/integration/integration_test.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-// Package image provides end-to-end integration tests for runsc. These tests require\n-// docker and runsc to be installed on the machine. To set it up, run:\n-//\n-// ./runsc/test/install.sh [--runtime <name>]\n-//\n-// The tests expect the runtime name to be provided in the RUNSC_RUNTIME\n-// environment variable (default: runsc-test).\n+// Package integration provides end-to-end integration tests for runsc.\n//\n// Each test calls docker commands to start up a container, and tests that it is\n-// behaving properly, with various runsc commands. The container is killed and deleted\n-// at the end.\n-\n+// behaving properly, with various runsc commands. The container is killed and\n+// deleted at the end.\n+//\n+// Setup instruction in runsc/test/README.md.\npackage integration\nimport (\n@@ -184,7 +179,7 @@ func TestConnectToSelf(t *testing.T) {\n}\n}\n-func MainTest(m *testing.M) {\n+func TestMain(m *testing.M) {\ntestutil.EnsureSupportedDockerVersion()\nos.Exit(m.Run())\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/root/BUILD",
"diff": "+package(licenses = [\"notice\"]) # Apache 2.0\n+\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\", \"go_test\")\n+\n+go_library(\n+ name = \"root\",\n+ srcs = [\"root.go\"],\n+ importpath = \"gvisor.googlesource.com/gvisor/runsc/test/root\",\n+)\n+\n+go_test(\n+ name = \"root_test\",\n+ size = \"small\",\n+ srcs = [\"chroot_test.go\"],\n+ embed = [\":root\"],\n+ tags = [\n+ # Requires docker and runsc to be configured before the test runs.\n+ # Also test only runs as root.\n+ \"manual\",\n+ \"local\",\n+ ],\n+ deps = [\n+ \"//runsc/specutils\",\n+ \"//runsc/test/testutil\",\n+ \"@com_github_syndtr_gocapability//capability:go_default_library\",\n+ ],\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/root/chroot_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package root is used for tests that requires sysadmin privileges run. First,\n+// follow the setup instruction in runsc/test/README.md. To run these test:\n+//\n+// bazel build //runsc/test/root:root_test\n+// root_test=$(find -L ./bazel-bin/ -executable -type f -name root_test | grep __main__)\n+// sudo RUNSC_RUNTIME=runsc-test ${root_test}\n+package root\n+\n+import (\n+ \"fmt\"\n+ \"io/ioutil\"\n+ \"os\"\n+ \"path/filepath\"\n+ \"reflect\"\n+ \"sort\"\n+ \"strconv\"\n+ \"strings\"\n+ \"testing\"\n+\n+ \"github.com/syndtr/gocapability/capability\"\n+ \"gvisor.googlesource.com/gvisor/runsc/specutils\"\n+ \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n+)\n+\n+// TestChroot verifies that the sandbox is chroot'd and that mounts are cleaned\n+// up after the sandbox is destroyed.\n+func TestChroot(t *testing.T) {\n+ d := testutil.MakeDocker(\"chroot-test\")\n+ if err := d.Run(\"alpine\", \"sleep\", \"10000\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+ defer d.CleanUp()\n+\n+ pid, err := d.SandboxPid()\n+ if err != nil {\n+ t.Fatalf(\"Docker.SandboxPid(): %v\", err)\n+ }\n+\n+ // Check that sandbox is chroot'ed.\n+ chroot, err := filepath.EvalSymlinks(filepath.Join(\"/proc\", strconv.Itoa(pid), \"root\"))\n+ if err != nil {\n+ t.Fatalf(\"error resolving /proc/<pid>/root symlink: %v\", err)\n+ }\n+ if want := \"/tmp/runsc-sandbox-chroot-\"; !strings.HasPrefix(chroot, want) {\n+ t.Errorf(\"sandbox is not chroot'd, it should be inside: %q, got: %q\", want, chroot)\n+ }\n+\n+ path, err := filepath.EvalSymlinks(filepath.Join(\"/proc\", strconv.Itoa(pid), \"cwd\"))\n+ if err != nil {\n+ t.Fatalf(\"error resolving /proc/<pid>/cwd symlink: %v\", err)\n+ }\n+ if chroot != path {\n+ t.Errorf(\"sandbox current dir is wrong, want: %q, got: %q\", chroot, path)\n+ }\n+\n+ fi, err := ioutil.ReadDir(chroot)\n+ if err != nil {\n+ t.Fatalf(\"error listing %q: %v\", chroot, err)\n+ }\n+ if want, got := 2, len(fi); want != got {\n+ t.Fatalf(\"chroot dir got %d entries, want %d\", want, got)\n+ }\n+\n+ // chroot dir is prepared by runsc and should contains only the executable\n+ // and /proc.\n+ files := []string{fi[0].Name(), fi[1].Name()}\n+ sort.Strings(files)\n+ if want := []string{\"proc\", \"runsc\"}; !reflect.DeepEqual(files, want) {\n+ t.Errorf(\"chroot got children %v, want %v\", files, want)\n+ }\n+\n+ d.CleanUp()\n+\n+ // Check that chroot directory was cleaned up.\n+ if _, err := os.Stat(chroot); err == nil || !os.IsNotExist(err) {\n+ t.Errorf(\"chroot directory %q was not deleted: %v\", chroot, err)\n+ }\n+}\n+\n+func TestMain(m *testing.M) {\n+ testutil.EnsureSupportedDockerVersion()\n+\n+ if !specutils.HasCapabilities(capability.CAP_SYS_ADMIN, capability.CAP_DAC_OVERRIDE) {\n+ fmt.Println(\"Test requires sysadmin privileges to run. Try again with sudo.\")\n+ os.Exit(1)\n+ }\n+\n+ os.Exit(m.Run())\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/root/root.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package root is empty. See chroot_test.go for description.\n+package root\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/docker.go",
"new_path": "runsc/test/testutil/docker.go",
"diff": "@@ -267,6 +267,19 @@ func (d *Docker) FindPort(sandboxPort int) (int, error) {\nreturn port, nil\n}\n+// SandboxPid returns the PID to the sandbox process.\n+func (d *Docker) SandboxPid() (int, error) {\n+ out, err := do(\"inspect\", \"-f={{.State.Pid}}\", d.Name)\n+ if err != nil {\n+ return -1, fmt.Errorf(\"error retrieving pid: %v\", err)\n+ }\n+ pid, err := strconv.Atoi(strings.TrimSuffix(string(out), \"\\n\"))\n+ if err != nil {\n+ return -1, fmt.Errorf(\"error parsing pid %q: %v\", out, err)\n+ }\n+ return pid, nil\n+}\n+\n// WaitForOutput calls 'docker logs' to retrieve containers output and searches\n// for the given pattern.\nfunc (d *Docker) WaitForOutput(pattern string, timeout time.Duration) (string, error) {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -238,7 +238,7 @@ func WaitForHTTP(port int, timeout time.Duration) error {\n}\n// RunAsRoot ensures the test runs with CAP_SYS_ADMIN and CAP_SYS_CHROOT. If\n-// need it will create a new user namespace and reexecute the test as root\n+// needed it will create a new user namespace and re-execute the test as root\n// inside of the namespace. This functionr returns when it's running as root. If\n// it needs to create another process, it will exit from there and not return.\nfunc RunAsRoot() {\n@@ -246,6 +246,8 @@ func RunAsRoot() {\nreturn\n}\n+ fmt.Println(\"*** Re-running test as root in new user namespace ***\")\n+\n// Current process doesn't have CAP_SYS_ADMIN, create user namespace and run\n// as root inside that namespace to get it.\nruntime.LockOSThread()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix sandbox chroot
Sandbox was setting chroot, but was not chaging the working
dir. Added test to ensure this doesn't happen in the future.
PiperOrigin-RevId: 215676270
Change-Id: I14352d3de64a4dcb90e50948119dc8328c9c15e1 |
259,881 | 08.10.2018 11:38:02 | 25,200 | 569c2b06c47d269d961405fa652d45e51860d005 | Statfs Namelen should be NAME_MAX not PATH_MAX
We accidentally set the wrong maximum. I've also added PATH_MAX and
NAME_MAX to the linux abi package. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/fs.go",
"new_path": "pkg/abi/linux/fs.go",
"diff": "@@ -28,6 +28,12 @@ const (\nV9FS_MAGIC = 0x01021997\n)\n+// Filesystem path limits, from uapi/linux/limits.h.\n+const (\n+ NAME_MAX = 255\n+ PATH_MAX = 4096\n+)\n+\n// Statfs is struct statfs, from uapi/asm-generic/statfs.h.\ntype Statfs struct {\n// Type is one of the filesystem magic values, defined above.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/elf.go",
"new_path": "pkg/sentry/loader/elf.go",
"diff": "@@ -19,7 +19,6 @@ import (\n\"debug/elf\"\n\"fmt\"\n\"io\"\n- \"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/abi\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n@@ -409,7 +408,7 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f *fs.File, info el\nctx.Infof(\"PT_INTERP path too small: %v\", phdr.Filesz)\nreturn loadedELF{}, syserror.ENOEXEC\n}\n- if phdr.Filesz > syscall.PathMax {\n+ if phdr.Filesz > linux.PATH_MAX {\nctx.Infof(\"PT_INTERP path too big: %v\", phdr.Filesz)\nreturn loadedELF{}, syserror.ENOEXEC\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/strace.go",
"new_path": "pkg/sentry/strace/strace.go",
"diff": "@@ -133,7 +133,7 @@ func dump(t *kernel.Task, addr usermem.Addr, size uint, maximumBlobSize uint) st\n}\nfunc path(t *kernel.Task, addr usermem.Addr) string {\n- path, err := t.CopyInString(addr, syscall.PathMax)\n+ path, err := t.CopyInString(addr, linux.PATH_MAX)\nif err != nil {\nreturn fmt.Sprintf(\"%#x (error decoding path: %s)\", addr, err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_file.go",
"new_path": "pkg/sentry/syscalls/linux/sys_file.go",
"diff": "@@ -115,7 +115,7 @@ func fileOpOn(t *kernel.Task, dirFD kdefs.FD, path string, resolve bool, fn func\n// copyInPath copies a path in.\nfunc copyInPath(t *kernel.Task, addr usermem.Addr, allowEmpty bool) (path string, dirPath bool, err error) {\n- path, err = t.CopyInString(addr, syscall.PathMax)\n+ path, err = t.CopyInString(addr, linux.PATH_MAX)\nif err != nil {\nreturn \"\", false, err\n}\n@@ -1080,7 +1080,7 @@ func symlinkAt(t *kernel.Task, dirFD kdefs.FD, newAddr usermem.Addr, oldAddr use\n// The oldPath is copied in verbatim. This is because the symlink\n// will include all details, including trailing slashes.\n- oldPath, err := t.CopyInString(oldAddr, syscall.PathMax)\n+ oldPath, err := t.CopyInString(oldAddr, linux.PATH_MAX)\nif err != nil {\nreturn err\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_stat.go",
"new_path": "pkg/sentry/syscalls/linux/sys_stat.go",
"diff": "package linux\nimport (\n- \"syscall\"\n-\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n@@ -198,7 +196,7 @@ func statfsImpl(t *kernel.Task, d *fs.Dirent, addr usermem.Addr) error {\nFiles: info.TotalFiles,\nFilesFree: info.FreeFiles,\n// Same as Linux for simple_statfs, see fs/libfs.c.\n- NameLength: syscall.PathMax,\n+ NameLength: linux.NAME_MAX,\nFragmentSize: d.Inode.StableAttr.BlockSize,\n// Leave other fields 0 like simple_statfs does.\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"new_path": "pkg/sentry/syscalls/linux/sys_thread.go",
"diff": "@@ -76,7 +76,7 @@ func Execve(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\nenvvAddr := args[2].Pointer()\n// Extract our arguments.\n- filename, err := t.CopyInString(filenameAddr, syscall.PathMax)\n+ filename, err := t.CopyInString(filenameAddr, linux.PATH_MAX)\nif err != nil {\nreturn 0, nil, err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Statfs Namelen should be NAME_MAX not PATH_MAX
We accidentally set the wrong maximum. I've also added PATH_MAX and
NAME_MAX to the linux abi package.
PiperOrigin-RevId: 216221311
Change-Id: I44805fcf21508831809692184a0eba4cee469633 |
259,881 | 08.10.2018 17:43:31 | 25,200 | b8048f75daa2ec13059162cb421236f99e5e4a0e | Uncapitalize error | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -142,7 +142,7 @@ func init() {\n// New also handles setting up a kernel for restoring a container.\nfunc New(id string, spec *specs.Spec, conf *Config, controllerFD, deviceFD int, goferFDs []int, stdioFDs []int, console bool) (*Loader, error) {\nif err := usage.Init(); err != nil {\n- return nil, fmt.Errorf(\"Error setting up memory usage: %v\", err)\n+ return nil, fmt.Errorf(\"error setting up memory usage: %v\", err)\n}\n// Create kernel and platform.\np, err := createPlatform(conf, deviceFD)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Uncapitalize error
PiperOrigin-RevId: 216281263
Change-Id: Ie0c189e7f5934b77c6302336723bc1181fd2866c |
260,013 | 09.10.2018 09:51:01 | 25,200 | acf7a951894a1b445ff61e945e32c989892f476f | Add memunit to sysinfo(2).
Also properly add padding after Procs in the linux.Sysinfo
structure. This will be implicitly padded to 64bits so we
need to do the same. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/linux.go",
"new_path": "pkg/abi/linux/linux.go",
"diff": "@@ -31,6 +31,7 @@ type Sysinfo struct {\nTotalSwap uint64\nFreeSwap uint64\nProcs uint16\n+ _ [6]byte // Pad Procs to 64bits.\nTotalHigh uint64\nFreeHigh uint64\nUnit uint32\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_sysinfo.go",
"new_path": "pkg/sentry/syscalls/linux/sys_sysinfo.go",
"diff": "@@ -36,6 +36,7 @@ func Sysinfo(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca\nUptime: t.Kernel().MonotonicClock().Now().Seconds(),\nTotalRAM: totalSize,\nFreeRAM: totalSize - totalUsage,\n+ Unit: 1,\n}\n_, err := t.CopyOut(addr, si)\nreturn 0, nil, err\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add memunit to sysinfo(2).
Also properly add padding after Procs in the linux.Sysinfo
structure. This will be implicitly padded to 64bits so we
need to do the same.
PiperOrigin-RevId: 216372907
Change-Id: I6eb6a27800da61d8f7b7b6e87bf0391a48fdb475 |
259,854 | 09.10.2018 15:11:46 | 25,200 | c36d2ef3733a0619b992f8ddc23b072474b04044 | Add new netstack metrics to the sentry | [
{
"change_type": "MODIFY",
"old_path": "pkg/metric/metric.go",
"new_path": "pkg/metric/metric.go",
"diff": "@@ -48,9 +48,6 @@ var (\n// TODO: Support metric fields.\n//\ntype Uint64Metric struct {\n- // metadata describes the metric. It is immutable.\n- metadata *pb.MetricMetadata\n-\n// value is the actual value of the metric. It must be accessed\n// atomically.\nvalue uint64\n@@ -101,24 +98,35 @@ func Disable() {\n}\n}\n-// NewUint64Metric creates a new metric with the given name.\n+type customUint64Metric struct {\n+ // metadata describes the metric. It is immutable.\n+ metadata *pb.MetricMetadata\n+\n+ // value returns the current value of the metric.\n+ value func() uint64\n+}\n+\n+// RegisterCustomUint64Metric registers a metric with the given name.\n+//\n+// Register must only be called at init and will return and error if called\n+// after Initialized.\n//\n-// Metrics must be statically defined (i.e., at startup). NewUint64Metric will\n-// return an error if called after Initialized.\n+// All metrics must be cumulative, meaning that the return values of value must\n+// only increase over time.\n//\n// Preconditions:\n// * name must be globally unique.\n// * Initialize/Disable have not been called.\n-func NewUint64Metric(name string, sync bool, description string) (*Uint64Metric, error) {\n+func RegisterCustomUint64Metric(name string, sync bool, description string, value func() uint64) error {\nif initialized {\n- return nil, ErrInitializationDone\n+ return ErrInitializationDone\n}\nif _, ok := allMetrics.m[name]; ok {\n- return nil, ErrNameInUse\n+ return ErrNameInUse\n}\n- m := &Uint64Metric{\n+ allMetrics.m[name] = customUint64Metric{\nmetadata: &pb.MetricMetadata{\nName: name,\nDescription: description,\n@@ -126,9 +134,25 @@ func NewUint64Metric(name string, sync bool, description string) (*Uint64Metric,\nSync: sync,\nType: pb.MetricMetadata_UINT64,\n},\n+ value: value,\n+ }\n+ return nil\n+}\n+\n+// MustRegisterCustomUint64Metric calls RegisterCustomUint64Metric and panics\n+// if it returns an error.\n+func MustRegisterCustomUint64Metric(name string, sync bool, description string, value func() uint64) {\n+ if err := RegisterCustomUint64Metric(name, sync, description, value); err != nil {\n+ panic(fmt.Sprintf(\"Unable to register metric %q: %v\", name, err))\n}\n- allMetrics.m[name] = m\n- return m, nil\n+}\n+\n+// NewUint64Metric creates and registers a new metric with the given name.\n+//\n+// Metrics must be statically defined (i.e., at init).\n+func NewUint64Metric(name string, sync bool, description string) (*Uint64Metric, error) {\n+ var m Uint64Metric\n+ return &m, RegisterCustomUint64Metric(name, sync, description, m.Value)\n}\n// MustCreateNewUint64Metric calls NewUint64Metric and panics if it returns an\n@@ -158,13 +182,13 @@ func (m *Uint64Metric) IncrementBy(v uint64) {\n// metricSet holds named metrics.\ntype metricSet struct {\n- m map[string]*Uint64Metric\n+ m map[string]customUint64Metric\n}\n// makeMetricSet returns a new metricSet.\nfunc makeMetricSet() metricSet {\nreturn metricSet{\n- m: make(map[string]*Uint64Metric),\n+ m: make(map[string]customUint64Metric),\n}\n}\n@@ -172,7 +196,7 @@ func makeMetricSet() metricSet {\nfunc (m *metricSet) Values() metricValues {\nvals := make(metricValues)\nfor k, v := range m.m {\n- vals[k] = v.Value()\n+ vals[k] = v.value()\n}\nreturn vals\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/BUILD",
"new_path": "pkg/sentry/socket/epsocket/BUILD",
"diff": "@@ -19,6 +19,7 @@ go_library(\n\"//pkg/abi/linux\",\n\"//pkg/binary\",\n\"//pkg/log\",\n+ \"//pkg/metric\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/context\",\n\"//pkg/sentry/device\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/epsocket.go",
"new_path": "pkg/sentry/socket/epsocket/epsocket.go",
"diff": "@@ -33,6 +33,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/binary\"\n+ \"gvisor.googlesource.com/gvisor/pkg/metric\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n@@ -53,6 +54,43 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/waiter\"\n)\n+func mustCreateMetric(name, description string) *tcpip.StatCounter {\n+ var cm tcpip.StatCounter\n+ metric.MustRegisterCustomUint64Metric(name, false /* sync */, description, cm.Value)\n+ return &cm\n+}\n+\n+// Metrics contains metrics exported by netstack.\n+var Metrics = tcpip.Stats{\n+ UnknownProtocolRcvdPackets: mustCreateMetric(\"/netstack/unknown_protocol_received_packets\", \"Number of packets received by netstack that were for an unknown or unsupported protocol.\"),\n+ MalformedRcvdPackets: mustCreateMetric(\"/netstack/malformed_received_packets\", \"Number of packets received by netstack that were deemed malformed.\"),\n+ DroppedPackets: mustCreateMetric(\"/netstack/dropped_packets\", \"Number of packets dropped by netstack due to full queues.\"),\n+ IP: tcpip.IPStats{\n+ PacketsReceived: mustCreateMetric(\"/netstack/ip/packets_received\", \"Total number of IP packets received from the link layer in nic.DeliverNetworkPacket.\"),\n+ InvalidAddressesReceived: mustCreateMetric(\"/netstack/ip/invalid_addresses_received\", \"Total number of IP packets received with an unknown or invalid destination address.\"),\n+ PacketsDelivered: mustCreateMetric(\"/netstack/ip/packets_delivered\", \"Total number of incoming IP packets that are successfully delivered to the transport layer via HandlePacket.\"),\n+ PacketsSent: mustCreateMetric(\"/netstack/ip/packets_sent\", \"Total number of IP packets sent via WritePacket.\"),\n+ OutgoingPacketErrors: mustCreateMetric(\"/netstack/ip/outgoing_packet_errors\", \"Total number of IP packets which failed to write to a link-layer endpoint.\"),\n+ },\n+ TCP: tcpip.TCPStats{\n+ ActiveConnectionOpenings: mustCreateMetric(\"/netstack/tcp/active_connection_openings\", \"Number of connections opened successfully via Connect.\"),\n+ PassiveConnectionOpenings: mustCreateMetric(\"/netstack/tcp/passive_connection_openings\", \"Number of connections opened successfully via Listen.\"),\n+ FailedConnectionAttempts: mustCreateMetric(\"/netstack/tcp/failed_connection_attempts\", \"Number of calls to Connect or Listen (active and passive openings, respectively) that end in an error.\"),\n+ ValidSegmentsReceived: mustCreateMetric(\"/netstack/tcp/valid_segments_received\", \"Number of TCP segments received that the transport layer successfully parsed.\"),\n+ InvalidSegmentsReceived: mustCreateMetric(\"/netstack/tcp/invalid_segments_received\", \"Number of TCP segments received that the transport layer could not parse.\"),\n+ SegmentsSent: mustCreateMetric(\"/netstack/tcp/segments_sent\", \"Number of TCP segments sent.\"),\n+ ResetsSent: mustCreateMetric(\"/netstack/tcp/resets_sent\", \"Number of TCP resets sent.\"),\n+ ResetsReceived: mustCreateMetric(\"/netstack/tcp/resets_received\", \"Number of TCP resets received.\"),\n+ },\n+ UDP: tcpip.UDPStats{\n+ PacketsReceived: mustCreateMetric(\"/netstack/udp/packets_received\", \"Number of UDP datagrams received via HandlePacket.\"),\n+ UnknownPortErrors: mustCreateMetric(\"/netstack/udp/unknown_port_errors\", \"Number of incoming UDP datagrams dropped because they did not have a known destination port.\"),\n+ ReceiveBufferErrors: mustCreateMetric(\"/netstack/udp/receive_buffer_errors\", \"Number of incoming UDP datagrams dropped due to the receiving buffer being in an invalid state.\"),\n+ MalformedPacketsReceived: mustCreateMetric(\"/netstack/udp/malformed_packets_received\", \"Number of incoming UDP datagrams dropped due to the UDP header being in a malformed state.\"),\n+ PacketsSent: mustCreateMetric(\"/netstack/udp/packets_sent\", \"Number of UDP datagrams sent via sendUDP.\"),\n+ },\n+}\n+\nconst sizeOfInt32 int = 4\nvar errStackType = syserr.New(\"expected but did not receive an epsocket.Stack\", linux.EINVAL)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -683,11 +683,14 @@ func newEmptyNetworkStack(conf *Config, clock tcpip.Clock) (inet.Stack, error) {\n// NetworkNone sets up loopback using netstack.\nnetProtos := []string{ipv4.ProtocolName, ipv6.ProtocolName, arp.ProtocolName}\nprotoNames := []string{tcp.ProtocolName, udp.ProtocolName, ping.ProtocolName4}\n- s := &epsocket.Stack{stack.New(netProtos, protoNames, stack.Options{Clock: clock})}\n+ s := epsocket.Stack{stack.New(netProtos, protoNames, stack.Options{\n+ Clock: clock,\n+ Stats: epsocket.Metrics,\n+ })}\nif err := s.Stack.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SACKEnabled(true)); err != nil {\nreturn nil, fmt.Errorf(\"failed to enable SACK: %v\", err)\n}\n- return s, nil\n+ return &s, nil\ndefault:\npanic(fmt.Sprintf(\"invalid network configuration: %v\", conf.Network))\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add new netstack metrics to the sentry
PiperOrigin-RevId: 216431260
Change-Id: Ia6e5c8d506940148d10ff2884cf4440f470e5820 |
259,992 | 09.10.2018 21:06:18 | 25,200 | 20508bafb88d2037ea3b2c8483b191ce72e7ad7e | Add tests to verify gofer is chroot'ed | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/debug.go",
"new_path": "runsc/cmd/debug.go",
"diff": "@@ -85,7 +85,7 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nif err != nil {\nFatalf(\"error loading container %q: %v\", id, err)\n}\n- if candidate.Pid() == d.pid {\n+ if candidate.SandboxPid() == d.pid {\nc = candidate\nbreak\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/list.go",
"new_path": "runsc/cmd/list.go",
"diff": "@@ -94,7 +94,7 @@ func (l *List) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nfor _, c := range containers {\nfmt.Fprintf(w, \"%s\\t%d\\t%s\\t%s\\t%s\\t%s\\n\",\nc.ID,\n- c.Pid(),\n+ c.SandboxPid(),\nc.Status,\nc.BundleDir,\nc.CreatedAt.Format(time.RFC3339Nano),\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -316,7 +316,7 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\n// Write the PID file. Containerd considers the create complete after\n// this file is created, so it must be the last thing we do.\nif pidFile != \"\" {\n- if err := ioutil.WriteFile(pidFile, []byte(strconv.Itoa(c.Pid())), 0644); err != nil {\n+ if err := ioutil.WriteFile(pidFile, []byte(strconv.Itoa(c.SandboxPid())), 0644); err != nil {\nc.Destroy()\nreturn nil, fmt.Errorf(\"error writing PID file: %v\", err)\n}\n@@ -426,9 +426,9 @@ func (c *Container) Event() (*boot.Event, error) {\nreturn c.Sandbox.Event(c.ID)\n}\n-// Pid returns the Pid of the sandbox the container is running in, or -1 if the\n+// SandboxPid returns the Pid of the sandbox the container is running in, or -1 if the\n// container is not running.\n-func (c *Container) Pid() int {\n+func (c *Container) SandboxPid() int {\nif err := c.requireStatus(\"get PID\", Created, Running, Paused); err != nil {\nreturn -1\n}\n@@ -566,7 +566,7 @@ func (c *Container) State() specs.State {\nVersion: specs.Version,\nID: c.ID,\nStatus: c.Status.String(),\n- Pid: c.Pid(),\n+ Pid: c.SandboxPid(),\nBundle: c.BundleDir,\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/root/chroot_test.go",
"new_path": "runsc/test/root/chroot_test.go",
"diff": "@@ -24,6 +24,7 @@ import (\n\"fmt\"\n\"io/ioutil\"\n\"os\"\n+ \"os/exec\"\n\"path/filepath\"\n\"reflect\"\n\"sort\"\n@@ -91,6 +92,75 @@ func TestChroot(t *testing.T) {\n}\n}\n+func TestChrootGofer(t *testing.T) {\n+ d := testutil.MakeDocker(\"chroot-test\")\n+ if err := d.Run(\"alpine\", \"sleep\", \"10000\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+ defer d.CleanUp()\n+\n+ // It's tricky to find gofers. Get sandbox PID first, then find parent. From\n+ // parent get all immediate children, remove the sandbox, and everything else\n+ // are gofers.\n+ sandPID, err := d.SandboxPid()\n+ if err != nil {\n+ t.Fatalf(\"Docker.SandboxPid(): %v\", err)\n+ }\n+\n+ // Find sandbox's parent PID.\n+ cmd := fmt.Sprintf(\"grep PPid /proc/%d/status | awk '{print $2}'\", sandPID)\n+ parent, err := exec.Command(\"sh\", \"-c\", cmd).CombinedOutput()\n+ if err != nil {\n+ t.Fatalf(\"failed to fetch runsc (%d) parent PID: %v, out:\\n%s\", sandPID, err, string(parent))\n+ }\n+ parentPID, err := strconv.Atoi(strings.TrimSpace(string(parent)))\n+ if err != nil {\n+ t.Fatalf(\"failed to parse PPID %q: %v\", string(parent), err)\n+ }\n+\n+ // Get all children from parent.\n+ childrenOut, err := exec.Command(\"/usr/bin/pgrep\", \"-P\", strconv.Itoa(parentPID)).CombinedOutput()\n+ if err != nil {\n+ t.Fatalf(\"failed to fetch containerd-shim children: %v\", err)\n+ }\n+ children := strings.Split(strings.TrimSpace(string(childrenOut)), \"\\n\")\n+\n+ // This where the root directory is mapped on the host and that's where the\n+ // gofer must have chroot'd to.\n+ root, err := d.RootDirInHost()\n+ if err != nil {\n+ t.Fatalf(\"Docker.RootDirInHost(): %v\", err)\n+ }\n+\n+ for _, child := range children {\n+ childPID, err := strconv.Atoi(child)\n+ if err != nil {\n+ t.Fatalf(\"failed to parse child PID %q: %v\", child, err)\n+ }\n+ if childPID == sandPID {\n+ // Skip the sandbox, all other immediate children are gofers.\n+ continue\n+ }\n+\n+ // Check that gofer is chroot'ed.\n+ chroot, err := filepath.EvalSymlinks(filepath.Join(\"/proc\", child, \"root\"))\n+ if err != nil {\n+ t.Fatalf(\"error resolving /proc/<pid>/root symlink: %v\", err)\n+ }\n+ if root != chroot {\n+ t.Errorf(\"gofer chroot is wrong, want: %q, got: %q\", root, chroot)\n+ }\n+\n+ path, err := filepath.EvalSymlinks(filepath.Join(\"/proc\", child, \"cwd\"))\n+ if err != nil {\n+ t.Fatalf(\"error resolving /proc/<pid>/cwd symlink: %v\", err)\n+ }\n+ if root != path {\n+ t.Errorf(\"gofer current dir is wrong, want: %q, got: %q\", root, path)\n+ }\n+ }\n+}\n+\nfunc TestMain(m *testing.M) {\ntestutil.EnsureSupportedDockerVersion()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/docker.go",
"new_path": "runsc/test/testutil/docker.go",
"diff": "@@ -280,6 +280,15 @@ func (d *Docker) SandboxPid() (int, error) {\nreturn pid, nil\n}\n+// RootDirInHost returns where the root directory is mapped on the host.\n+func (d *Docker) RootDirInHost() (string, error) {\n+ out, err := do(\"inspect\", \"-f={{.GraphDriver.Data.MergedDir}}\", d.Name)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"error retrieving pid: %v\", err)\n+ }\n+ return strings.TrimSuffix(string(out), \"\\n\"), nil\n+}\n+\n// WaitForOutput calls 'docker logs' to retrieve containers output and searches\n// for the given pattern.\nfunc (d *Docker) WaitForOutput(pattern string, timeout time.Duration) (string, error) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add tests to verify gofer is chroot'ed
PiperOrigin-RevId: 216472439
Change-Id: Ic4cb86c8e0a9cb022d3ceed9dc5615266c307cf9 |
260,026 | 10.10.2018 14:17:27 | 25,200 | 8388a505e735045f31c6f7180711ef57148dc517 | Support for older Linux kernels without getrandom | [
{
"change_type": "MODIFY",
"old_path": "pkg/rand/rand_linux.go",
"new_path": "pkg/rand/rand_linux.go",
"diff": "package rand\nimport (\n+ \"crypto/rand\"\n\"io\"\n+ \"sync\"\n\"golang.org/x/sys/unix\"\n)\n// reader implements an io.Reader that returns pseudorandom bytes.\n-type reader struct{}\n+type reader struct {\n+ once sync.Once\n+ useGetrandom bool\n+}\n// Read implements io.Reader.Read.\n-func (reader) Read(p []byte) (int, error) {\n+func (r *reader) Read(p []byte) (int, error) {\n+ r.once.Do(func() {\n+ _, err := unix.Getrandom(p, 0)\n+ if err != unix.ENOSYS {\n+ r.useGetrandom = true\n+ }\n+ })\n+\n+ if r.useGetrandom {\nreturn unix.Getrandom(p, 0)\n}\n+ return rand.Read(p)\n+}\n// Reader is the default reader.\n-var Reader io.Reader = reader{}\n+var Reader io.Reader = &reader{}\n// Read reads from the default reader.\nfunc Read(b []byte) (int, error) {\nreturn io.ReadFull(Reader, b)\n}\n+\n+// Init can be called to make sure /dev/urandom is pre-opened on kernels that\n+// do not support getrandom(2).\n+func Init() error {\n+ p := make([]byte, 1)\n+ _, err := Read(p)\n+ return err\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/BUILD",
"new_path": "runsc/boot/BUILD",
"diff": "@@ -25,6 +25,7 @@ go_library(\n\"//pkg/control/server\",\n\"//pkg/cpuid\",\n\"//pkg/log\",\n+ \"//pkg/rand\",\n\"//pkg/sentry/arch\",\n\"//pkg/sentry/context\",\n\"//pkg/sentry/control\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -17,7 +17,7 @@ package boot\nimport (\n\"fmt\"\n- \"math/rand\"\n+ mrand \"math/rand\"\n\"os\"\n\"os/signal\"\n\"runtime\"\n@@ -30,6 +30,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/cpuid\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n+ \"gvisor.googlesource.com/gvisor/pkg/rand\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/control\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/host\"\n@@ -133,7 +134,7 @@ type execProcess struct {\nfunc init() {\n// Initialize the random number generator.\n- rand.Seed(gtime.Now().UnixNano())\n+ mrand.Seed(gtime.Now().UnixNano())\n// Register the global syscall table.\nkernel.RegisterSyscallTable(slinux.AMD64)\n@@ -167,9 +168,16 @@ type Args struct {\n// New initializes a new kernel loader configured by spec.\n// New also handles setting up a kernel for restoring a container.\nfunc New(args Args) (*Loader, error) {\n+ // We initialize the rand package now to make sure /dev/urandom is pre-opened\n+ // on kernels that do not support getrandom(2).\n+ if err := rand.Init(); err != nil {\n+ return nil, fmt.Errorf(\"error setting up rand: %v\", err)\n+ }\n+\nif err := usage.Init(); err != nil {\nreturn nil, fmt.Errorf(\"error setting up memory usage: %v\", err)\n}\n+\n// Create kernel and platform.\np, err := createPlatform(args.Conf, args.DeviceFD)\nif err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Support for older Linux kernels without getrandom
Change-Id: I1fb9f5b47a264a7617912f6f56f995f3c4c5e578
PiperOrigin-RevId: 216591484 |
259,891 | 10.10.2018 16:49:40 | 25,200 | e21ba16d9cf7ba4f2d5f65651e06ab592032ef86 | Removes irrelevant TODO. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -501,9 +501,6 @@ func (l *Loader) startContainer(k *kernel.Kernel, spec *specs.Spec, conf *Config\ncaps,\nl.k.RootUserNamespace())\n- // TODO New containers should be started in new PID namespaces\n- // when indicated by the spec.\n-\nprocArgs, err := newProcess(cid, spec, creds, l.k)\nif err != nil {\nreturn fmt.Errorf(\"failed to create new process: %v\", err)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Removes irrelevant TODO.
PiperOrigin-RevId: 216616873
Change-Id: I4d974ab968058eadd01542081e18a987ef08f50a |
259,948 | 11.10.2018 11:40:34 | 25,200 | 0bfa03d61c7791aad03da5ac021bc60e4578858e | sentry: allow saving of unlinked files with open fds on virtual fs. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dirent_state.go",
"new_path": "pkg/sentry/fs/dirent_state.go",
"diff": "@@ -23,16 +23,20 @@ import (\n// beforeSave is invoked by stateify.\nfunc (d *Dirent) beforeSave() {\n- // Refuse to save if the file has already been deleted (but still has\n- // open fds, which is why the Dirent is still accessible). We know the\n- // the restore opening of the file will always fail. This condition will\n- // last until all the open fds and this Dirent are closed and released.\n+ // Refuse to save if the file is on a non-virtual file system and has\n+ // already been deleted (but still has open fds, which is why the Dirent\n+ // is still accessible). We know the the restore re-opening of the file\n+ // will always fail. This condition will last until all the open fds and\n+ // this Dirent are closed and released.\n+ //\n+ // Such \"dangling\" open files on virtual file systems (e.g., tmpfs) is\n+ // OK to save as their restore does not require re-opening the files.\n//\n// Note that this is rejection rather than failure---it would be\n// perfectly OK to save---we are simply disallowing it here to prevent\n// generating non-restorable state dumps. As the program continues its\n// execution, it may become allowed to save again.\n- if atomic.LoadInt32(&d.deleted) != 0 {\n+ if !d.Inode.IsVirtual() && atomic.LoadInt32(&d.deleted) != 0 {\nn, _ := d.FullName(nil /* root */)\npanic(ErrSaveRejection{fmt.Errorf(\"deleted file %q still has open fds\", n)})\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: allow saving of unlinked files with open fds on virtual fs.
PiperOrigin-RevId: 216733414
Change-Id: I33cd3eb818f0c39717d6656fcdfff6050b37ebb0 |
259,992 | 11.10.2018 11:56:42 | 25,200 | d40d80106988e9302aaa354d4f58caa6c31429b4 | Sandbox cgroup tests
Verify that cgroup is being properly set. | [
{
"change_type": "MODIFY",
"old_path": "runsc/cgroup/cgroup.go",
"new_path": "runsc/cgroup/cgroup.go",
"diff": "@@ -123,6 +123,8 @@ func fillFromAncestor(path string) (string, error) {\nreturn val, nil\n}\n+// countCpuset returns the number of CPU in a string formatted like:\n+// \"0-2,7,12-14 # bits 0, 1, 2, 7, 12, 13, and 14 set\" - man 7 cpuset\nfunc countCpuset(cpuset string) (int, error) {\nvar count int\nfor _, p := range strings.Split(cpuset, \",\") {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/integration/integration_test.go",
"new_path": "runsc/test/integration/integration_test.go",
"diff": "@@ -231,38 +231,6 @@ func TestNumCPU(t *testing.T) {\n}\n}\n-// TestCgroup sets cgroup options and checks that container can start.\n-// TODO: Verify that these were set to cgroup on the host.\n-func TestCgroup(t *testing.T) {\n- if err := testutil.Pull(\"alpine\"); err != nil {\n- t.Fatal(\"docker pull failed:\", err)\n- }\n- d := testutil.MakeDocker(\"cgroup-test\")\n-\n- var args []string\n- args = append(args, \"--cpu-shares=1000\")\n- args = append(args, \"--cpu-period=2000\")\n- args = append(args, \"--cpu-quota=3000\")\n- args = append(args, \"--cpuset-cpus=0\")\n- args = append(args, \"--cpuset-mems=0\")\n- args = append(args, \"--kernel-memory=100MB\")\n- args = append(args, \"--memory=1GB\")\n- args = append(args, \"--memory-reservation=500MB\")\n- args = append(args, \"--memory-swap=2GB\")\n- args = append(args, \"--memory-swappiness=5\")\n- args = append(args, \"--blkio-weight=750\")\n-\n- args = append(args, \"hello-world\")\n- if err := d.Run(args...); err != nil {\n- t.Fatal(\"docker create failed:\", err)\n- }\n- defer d.CleanUp()\n-\n- if _, err := d.WaitForOutput(\"Hello from Docker!\", 5*time.Second); err != nil {\n- t.Fatalf(\"docker didn't say hello: %v\", err)\n- }\n-}\n-\nfunc TestMain(m *testing.M) {\ntestutil.EnsureSupportedDockerVersion()\nos.Exit(m.Run())\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/root/BUILD",
"new_path": "runsc/test/root/BUILD",
"diff": "@@ -11,7 +11,10 @@ go_library(\ngo_test(\nname = \"root_test\",\nsize = \"small\",\n- srcs = [\"chroot_test.go\"],\n+ srcs = [\n+ \"cgroup_test.go\",\n+ \"chroot_test.go\",\n+ ],\nembed = [\":root\"],\ntags = [\n# Requires docker and runsc to be configured before the test runs.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/root/cgroup_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package root\n+\n+import (\n+ \"io/ioutil\"\n+ \"os\"\n+ \"path/filepath\"\n+ \"strings\"\n+ \"testing\"\n+\n+ \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n+)\n+\n+// TestCgroup sets cgroup options and checks that cgroup was properly configured.\n+func TestCgroup(t *testing.T) {\n+ if err := testutil.Pull(\"alpine\"); err != nil {\n+ t.Fatal(\"docker pull failed:\", err)\n+ }\n+ d := testutil.MakeDocker(\"cgroup-test\")\n+\n+ attrs := []struct {\n+ arg string\n+ ctrl string\n+ file string\n+ want string\n+ skipIfNotFound bool\n+ }{\n+ {\n+ arg: \"--cpu-shares=1000\",\n+ ctrl: \"cpu\",\n+ file: \"cpu.shares\",\n+ want: \"1000\",\n+ },\n+ {\n+ arg: \"--cpu-period=2000\",\n+ ctrl: \"cpu\",\n+ file: \"cpu.cfs_period_us\",\n+ want: \"2000\",\n+ },\n+ {\n+ arg: \"--cpu-quota=3000\",\n+ ctrl: \"cpu\",\n+ file: \"cpu.cfs_quota_us\",\n+ want: \"3000\",\n+ },\n+ {\n+ arg: \"--cpuset-cpus=0\",\n+ ctrl: \"cpuset\",\n+ file: \"cpuset.cpus\",\n+ want: \"0\",\n+ },\n+ {\n+ arg: \"--cpuset-mems=0\",\n+ ctrl: \"cpuset\",\n+ file: \"cpuset.mems\",\n+ want: \"0\",\n+ },\n+ {\n+ arg: \"--kernel-memory=100MB\",\n+ ctrl: \"memory\",\n+ file: \"memory.kmem.limit_in_bytes\",\n+ want: \"104857600\",\n+ },\n+ {\n+ arg: \"--memory=1GB\",\n+ ctrl: \"memory\",\n+ file: \"memory.limit_in_bytes\",\n+ want: \"1073741824\",\n+ },\n+ {\n+ arg: \"--memory-reservation=500MB\",\n+ ctrl: \"memory\",\n+ file: \"memory.soft_limit_in_bytes\",\n+ want: \"524288000\",\n+ },\n+ {\n+ arg: \"--memory-swap=2GB\",\n+ ctrl: \"memory\",\n+ file: \"memory.memsw.limit_in_bytes\",\n+ want: \"2147483648\",\n+ skipIfNotFound: true, // swap may be disabled on the machine.\n+ },\n+ {\n+ arg: \"--memory-swappiness=5\",\n+ ctrl: \"memory\",\n+ file: \"memory.swappiness\",\n+ want: \"5\",\n+ },\n+ {\n+ arg: \"--blkio-weight=750\",\n+ ctrl: \"blkio\",\n+ file: \"blkio.weight\",\n+ want: \"750\",\n+ },\n+ }\n+\n+ args := make([]string, 0, len(attrs))\n+ for _, attr := range attrs {\n+ args = append(args, attr.arg)\n+ }\n+\n+ args = append(args, \"alpine\", \"sleep\", \"10000\")\n+ if err := d.Run(args...); err != nil {\n+ t.Fatal(\"docker create failed:\", err)\n+ }\n+ defer d.CleanUp()\n+\n+ gid, err := d.ID()\n+ if err != nil {\n+ t.Fatalf(\"Docker.ID() failed: %v\", err)\n+ }\n+ t.Logf(\"cgroup ID: %s\", gid)\n+ for _, attr := range attrs {\n+ path := filepath.Join(\"/sys/fs/cgroup\", attr.ctrl, \"docker\", gid, attr.file)\n+ out, err := ioutil.ReadFile(path)\n+ if err != nil {\n+ if os.IsNotExist(err) && attr.skipIfNotFound {\n+ t.Logf(\"skipped %s/%s\", attr.ctrl, attr.file)\n+ continue\n+ }\n+ t.Fatalf(\"failed to read %q: %v\", path, err)\n+ }\n+ if got := strings.TrimSpace(string(out)); got != attr.want {\n+ t.Errorf(\"arg: %q, cgroup attribute %s/%s, got: %q, want: %q\", attr.arg, attr.ctrl, attr.file, got, attr.want)\n+ }\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/docker.go",
"new_path": "runsc/test/testutil/docker.go",
"diff": "@@ -298,6 +298,15 @@ func (d *Docker) RootDirInHost() (string, error) {\nreturn strings.TrimSuffix(string(out), \"\\n\"), nil\n}\n+// ID returns the container ID.\n+func (d *Docker) ID() (string, error) {\n+ out, err := do(\"inspect\", \"-f={{.Id}}\", d.Name)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"error retrieving ID: %v\", err)\n+ }\n+ return strings.TrimSpace(string(out)), nil\n+}\n+\n// WaitForOutput calls 'docker logs' to retrieve containers output and searches\n// for the given pattern.\nfunc (d *Docker) WaitForOutput(pattern string, timeout time.Duration) (string, error) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Sandbox cgroup tests
Verify that cgroup is being properly set.
PiperOrigin-RevId: 216736137
Change-Id: I0e27fd604eca67e7dd2e3548dc372ca9cc416309 |
259,992 | 11.10.2018 14:28:15 | 25,200 | e68d86e1bd47f7905e4452f7ce0e04e683561f85 | Make debug log file name configurable
This is a breaking change if you're using --debug-log-dir.
The fix is to replace it with --debug-log and add a '/' at
the end:
--debug-log-dir=/tmp/runsc ==> --debug-log=/tmp/runsc/ | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -297,7 +297,7 @@ Docker configuration (`/etc/docker/daemon.json`):\n\"runsc\": {\n\"path\": \"/usr/local/bin/runsc\",\n\"runtimeArgs\": [\n- \"--debug-log-dir=/tmp/runsc\",\n+ \"--debug-log=/tmp/runsc/\",\n\"--debug\",\n\"--strace\"\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/config.go",
"new_path": "runsc/boot/config.go",
"diff": "@@ -160,9 +160,8 @@ type Config struct {\n// LogFormat is the log format, \"text\" or \"json\".\nLogFormat string\n- // DebugLogDir is the directory to log debug information to, if not\n- // empty.\n- DebugLogDir string\n+ // DebugLog is the path to log debug information to, if not empty.\n+ DebugLog string\n// FileAccess indicates how the filesystem is accessed.\nFileAccess FileAccessType\n@@ -217,7 +216,7 @@ func (c *Config) ToFlags() []string {\n\"--debug=\" + strconv.FormatBool(c.Debug),\n\"--log=\" + c.LogFilename,\n\"--log-format=\" + c.LogFormat,\n- \"--debug-log-dir=\" + c.DebugLogDir,\n+ \"--debug-log=\" + c.DebugLog,\n\"--file-access=\" + c.FileAccess.String(),\n\"--overlay=\" + strconv.FormatBool(c.Overlay),\n\"--network=\" + c.Network.String(),\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/main.go",
"new_path": "runsc/main.go",
"diff": "@@ -45,7 +45,7 @@ var (\n// system that are not covered by the runtime spec.\n// Debugging flags.\n- debugLogDir = flag.String(\"debug-log-dir\", \"\", \"additional location for logs. It creates individual log files per command\")\n+ debugLog = flag.String(\"debug-log\", \"\", \"additional location for logs. If it ends with '/', log files are created inside the directory with default names. The following variables are available: %TIMESTAMP%, %COMMAND%.\")\nlogPackets = flag.Bool(\"log-packets\", false, \"enable network packet logging\")\nlogFD = flag.Int(\"log-fd\", -1, \"file descriptor to log to. If set, the 'log' flag is ignored.\")\ndebugLogFD = flag.Int(\"debug-log-fd\", -1, \"file descriptor to write debug logs to. If set, the 'debug-log-dir' flag is ignored.\")\n@@ -131,7 +131,7 @@ func main() {\nDebug: *debug,\nLogFilename: *logFilename,\nLogFormat: *logFormat,\n- DebugLogDir: *debugLogDir,\n+ DebugLog: *debugLog,\nFileAccess: fsAccess,\nOverlay: *overlay,\nNetwork: netType,\n@@ -195,13 +195,10 @@ func main() {\n}\ne = log.MultiEmitter{e, log.GoogleEmitter{&log.Writer{Next: f}}}\n- } else if *debugLogDir != \"\" {\n- if err := os.MkdirAll(*debugLogDir, 0775); err != nil {\n- cmd.Fatalf(\"error creating dir %q: %v\", *debugLogDir, err)\n- }\n- f, err := specutils.DebugLogFile(*debugLogDir, subcommand)\n+ } else if *debugLog != \"\" {\n+ f, err := specutils.DebugLogFile(*debugLog, subcommand)\nif err != nil {\n- cmd.Fatalf(\"error opening debug log file in %q: %v\", *debugLogDir, err)\n+ cmd.Fatalf(\"error opening debug log file in %q: %v\", *debugLog, err)\n}\ne = log.MultiEmitter{e, log.GoogleEmitter{&log.Writer{Next: f}}}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -291,10 +291,10 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\ncmd.Args = append(cmd.Args, \"--log-fd=\"+strconv.Itoa(nextFD))\nnextFD++\n}\n- if conf.DebugLogDir != \"\" {\n- debugLogFile, err := specutils.DebugLogFile(conf.DebugLogDir, \"boot\")\n+ if conf.DebugLog != \"\" {\n+ debugLogFile, err := specutils.DebugLogFile(conf.DebugLog, \"boot\")\nif err != nil {\n- return fmt.Errorf(\"error opening debug log file in %q: %v\", conf.DebugLogDir, err)\n+ return fmt.Errorf(\"error opening debug log file in %q: %v\", conf.DebugLog, err)\n}\ndefer debugLogFile.Close()\ncmd.ExtraFiles = append(cmd.ExtraFiles, debugLogFile)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/specutils/specutils.go",
"new_path": "runsc/specutils/specutils.go",
"diff": "@@ -351,12 +351,25 @@ func WaitForReady(pid int, timeout time.Duration, ready func() (bool, error)) er\nreturn backoff.Retry(op, b)\n}\n-// DebugLogFile opens a file in logDir based on the timestamp and subcommand\n-// for writing.\n-func DebugLogFile(logDir, subcommand string) (*os.File, error) {\n- // Format: <debug-log-dir>/runsc.log.<yyyymmdd-hhmmss.uuuuuu>.<command>\n- filename := fmt.Sprintf(\"runsc.log.%s.%s\", time.Now().Format(\"20060102-150405.000000\"), subcommand)\n- return os.OpenFile(filepath.Join(logDir, filename), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0664)\n+// DebugLogFile opens a log file using 'logPattern' as location. If 'logPattern'\n+// ends with '/', it's used as a directory with default file name.\n+// 'logPattern' can contain variables that are substitued:\n+// - %TIMESTAMP%: is replaced with a timestamp using the following format:\n+// <yyyymmdd-hhmmss.uuuuuu>\n+// - %COMMAND%: is replaced with 'command'\n+func DebugLogFile(logPattern, command string) (*os.File, error) {\n+ if strings.HasSuffix(logPattern, \"/\") {\n+ // Default format: <debug-log>/runsc.log.<yyyymmdd-hhmmss.uuuuuu>.<command>\n+ logPattern += \"runsc.log.%TIMESTAMP%.%COMMAND%\"\n+ }\n+ logPattern = strings.Replace(logPattern, \"%TIMESTAMP%\", time.Now().Format(\"20060102-150405.000000\"), -1)\n+ logPattern = strings.Replace(logPattern, \"%COMMAND%\", command, -1)\n+\n+ dir := filepath.Dir(logPattern)\n+ if err := os.MkdirAll(dir, 0775); err != nil {\n+ return nil, fmt.Errorf(\"error creating dir %q: %v\", dir, err)\n+ }\n+ return os.OpenFile(logPattern, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0664)\n}\n// Mount creates the mount point and calls Mount with the given flags.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/install.sh",
"new_path": "runsc/test/install.sh",
"diff": "@@ -75,7 +75,7 @@ if [[ ${uninstall} == 0 ]]; then\nmkdir -p \"${logdir}\"\nsudo -n chmod a+wx \"${logdir}\"\n- declare -r args=\"--debug-log-dir \"${logdir}\" --debug --strace --log-packets\"\n+ declare -r args=\"--debug-log '${logdir}/' --debug --strace --log-packets\"\nsudo -n \"${dockercfg}\" runtime-add \"${runtime}\" \"${runsc}\" ${args}\nsudo -n \"${dockercfg}\" runtime-add \"${runtime}\"-kvm \"${runsc}\" --platform=kvm ${args}\nsudo -n \"${dockercfg}\" runtime-add \"${runtime}\"-hostnet \"${runsc}\" --network=host ${args}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make debug log file name configurable
This is a breaking change if you're using --debug-log-dir.
The fix is to replace it with --debug-log and add a '/' at
the end:
--debug-log-dir=/tmp/runsc ==> --debug-log=/tmp/runsc/
PiperOrigin-RevId: 216761212
Change-Id: I244270a0a522298c48115719fa08dad55e34ade1 |
259,992 | 11.10.2018 15:20:52 | 25,200 | 86680fa00240e3e439d1275f4f8bf89678cf3355 | Add String() method to AddressMask | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -139,6 +139,11 @@ type Address string\n// AddressMask is a bitmask for an address.\ntype AddressMask string\n+// String implements Stringer.\n+func (a AddressMask) String() string {\n+ return Address(a).String()\n+}\n+\n// Subnet is a subnet defined by its address and mask.\ntype Subnet struct {\naddress Address\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add String() method to AddressMask
PiperOrigin-RevId: 216770391
Change-Id: Idcdc28b2fe9e1b0b63b8119d445f05a8bcbce81e |
259,992 | 11.10.2018 17:44:50 | 25,200 | f074f0c2c77c4aec24700a49ebcbca1a7f2285e0 | Make the gofer process enter namespaces
This is done to further isolate the gofer from the host. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "@@ -726,11 +726,21 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bund\ncmd := exec.Command(binPath, args...)\ncmd.ExtraFiles = goferEnds\n+ // Enter new namespaces to isolate from the rest of the system. Don't unshare\n+ // cgroup because gofer is added to a cgroup in the caller's namespace.\n+ nss := []specs.LinuxNamespace{\n+ {Type: specs.IPCNamespace},\n+ {Type: specs.MountNamespace},\n+ {Type: specs.NetworkNamespace},\n+ {Type: specs.PIDNamespace},\n+ {Type: specs.UTSNamespace},\n+ }\n+\n// Setup any uid/gid mappings, and create or join the configured user\n// namespace so the gofer's view of the filesystem aligns with the\n// users in the sandbox.\n+ nss = append(nss, specutils.FilterNS([]specs.LinuxNamespaceType{specs.UserNamespace}, spec)...)\nspecutils.SetUIDGIDMappings(cmd, spec)\n- nss := specutils.FilterNS([]specs.LinuxNamespaceType{specs.UserNamespace}, spec)\n// Start the gofer in the given namespace.\nlog.Debugf(\"Starting gofer: %s %v\", binPath, args)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -408,12 +408,14 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\ncmd.SysProcAttr.Setsid = true\n// nss is the set of namespaces to join or create before starting the sandbox\n- // process. IPC and UTS namespaces from the host are not used as they\n+ // process. Mount, IPC and UTS namespaces from the host are not used as they\n// are virtualized inside the sandbox. Be paranoid and run inside an empty\n- // namespace for these.\n- log.Infof(\"Sandbox will be started in new IPC and UTS namespaces\")\n+ // namespace for these. Don't unshare cgroup because sandbox is added to a\n+ // cgroup in the caller's namespace.\n+ log.Infof(\"Sandbox will be started in new mount, IPC and UTS namespaces\")\nnss := []specs.LinuxNamespace{\n{Type: specs.IPCNamespace},\n+ {Type: specs.MountNamespace},\n{Type: specs.UTSNamespace},\n}\n@@ -426,9 +428,6 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\nnss = append(nss, specs.LinuxNamespace{Type: specs.PIDNamespace})\n}\n- log.Infof(\"Sandbox will be started in new mount namespace\")\n- nss = append(nss, specs.LinuxNamespace{Type: specs.MountNamespace})\n-\n// Joins the network namespace if network is enabled. the sandbox talks\n// directly to the host network, which may have been configured in the\n// namespace.\n@@ -440,9 +439,9 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\nnss = append(nss, specs.LinuxNamespace{Type: specs.NetworkNamespace})\n}\n- // User namespace depends on the following options:\n- // - Host network/filesystem: requires to run inside the user namespace\n- // specified in the spec or the current namespace if none is configured.\n+ // User namespace depends on the network type. Host network requires to run\n+ // inside the user namespace specified in the spec or the current namespace\n+ // if none is configured.\nif conf.Network == boot.NetworkHost {\nif userns, ok := specutils.GetNS(specs.UserNamespace, spec); ok {\nlog.Infof(\"Sandbox will be started in container's user namespace: %+v\", userns)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make the gofer process enter namespaces
This is done to further isolate the gofer from the host.
PiperOrigin-RevId: 216790991
Change-Id: Ia265b77e4e50f815d08f743a05669f9d75ad7a6f |
259,884 | 12.10.2018 12:58:42 | 25,200 | a771775f3a4680b3a121deb6f583ed62f4da8bef | Added spec command to create OCI spec config.json
The spec command is analygous to the 'runc spec' command and allows for
the convenient creation of a config.json file for users that don't have
runc handy. | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/BUILD",
"new_path": "runsc/cmd/BUILD",
"diff": "@@ -23,6 +23,7 @@ go_library(\n\"restore.go\",\n\"resume.go\",\n\"run.go\",\n+ \"spec.go\",\n\"start.go\",\n\"state.go\",\n\"wait.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/cmd/spec.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package cmd\n+\n+import (\n+ \"io/ioutil\"\n+ \"os\"\n+ \"path/filepath\"\n+\n+ \"context\"\n+ \"flag\"\n+ \"github.com/google/subcommands\"\n+)\n+\n+var specTemplate = []byte(`{\n+ \"ociVersion\": \"1.0.0\",\n+ \"process\": {\n+ \"terminal\": true,\n+ \"user\": {\n+ \"uid\": 0,\n+ \"gid\": 0\n+ },\n+ \"args\": [\n+ \"sh\"\n+ ],\n+ \"env\": [\n+ \"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\n+ \"TERM=xterm\"\n+ ],\n+ \"cwd\": \"/\",\n+ \"capabilities\": {\n+ \"bounding\": [\n+ \"CAP_AUDIT_WRITE\",\n+ \"CAP_KILL\",\n+ \"CAP_NET_BIND_SERVICE\"\n+ ],\n+ \"effective\": [\n+ \"CAP_AUDIT_WRITE\",\n+ \"CAP_KILL\",\n+ \"CAP_NET_BIND_SERVICE\"\n+ ],\n+ \"inheritable\": [\n+ \"CAP_AUDIT_WRITE\",\n+ \"CAP_KILL\",\n+ \"CAP_NET_BIND_SERVICE\"\n+ ],\n+ \"permitted\": [\n+ \"CAP_AUDIT_WRITE\",\n+ \"CAP_KILL\",\n+ \"CAP_NET_BIND_SERVICE\"\n+ ],\n+ \"ambient\": [\n+ \"CAP_AUDIT_WRITE\",\n+ \"CAP_KILL\",\n+ \"CAP_NET_BIND_SERVICE\"\n+ ]\n+ },\n+ \"rlimits\": [\n+ {\n+ \"type\": \"RLIMIT_NOFILE\",\n+ \"hard\": 1024,\n+ \"soft\": 1024\n+ }\n+ ]\n+ },\n+ \"root\": {\n+ \"path\": \"rootfs\",\n+ \"readonly\": true\n+ },\n+ \"hostname\": \"runsc\",\n+ \"mounts\": [\n+ {\n+ \"destination\": \"/proc\",\n+ \"type\": \"proc\",\n+ \"source\": \"proc\"\n+ },\n+ {\n+ \"destination\": \"/dev\",\n+ \"type\": \"tmpfs\",\n+ \"source\": \"tmpfs\",\n+ \"options\": []\n+ },\n+ {\n+ \"destination\": \"/sys\",\n+ \"type\": \"sysfs\",\n+ \"source\": \"sysfs\",\n+ \"options\": [\n+ \"nosuid\",\n+ \"noexec\",\n+ \"nodev\",\n+ \"ro\"\n+ ]\n+ }\n+ ],\n+ \"linux\": {\n+ \"namespaces\": [\n+ {\n+ \"type\": \"pid\"\n+ },\n+ {\n+ \"type\": \"network\"\n+ },\n+ {\n+ \"type\": \"ipc\"\n+ },\n+ {\n+ \"type\": \"uts\"\n+ },\n+ {\n+ \"type\": \"mount\"\n+ }\n+ ]\n+ }\n+}`)\n+\n+// Spec implements subcommands.Command for the \"spec\" command.\n+type Spec struct {\n+ bundle string\n+}\n+\n+// Name implements subcommands.Command.Name.\n+func (*Spec) Name() string {\n+ return \"spec\"\n+}\n+\n+// Synopsis implements subcommands.Command.Synopsis.\n+func (*Spec) Synopsis() string {\n+ return \"create a new OCI bundle specification file\"\n+}\n+\n+// Usage implements subcommands.Command.Usage.\n+func (*Spec) Usage() string {\n+ return `spec [options] - create a new OCI bundle specification file.\n+\n+The spec command creates a new specification file (config.json) for a new OCI bundle.\n+\n+The specification file is a starter file that runs the \"sh\" command in the container. You\n+should edit the file to suit your needs. You can find out more about the format of the\n+specification file by visiting the OCI runtime spec repository:\n+https://github.com/opencontainers/runtime-spec/\n+\n+EXAMPLE:\n+ $ mkdir -p bundle/rootfs\n+ $ cd bundle\n+ $ runsc spec\n+ $ docker export $(docker create hello-world) | tar -xf - -C rootfs\n+ $ sed -i 's;\"sh\";\"/hello\";' config.json\n+ $ sudo runsc run hello\n+\n+`\n+}\n+\n+// SetFlags implements subcommands.Command.SetFlags.\n+func (s *Spec) SetFlags(f *flag.FlagSet) {\n+ f.StringVar(&s.bundle, \"bundle\", \".\", \"path to the root of the OCI bundle\")\n+}\n+\n+// Execute implements subcommands.Command.Execute.\n+func (s *Spec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {\n+ confPath := filepath.Join(s.bundle, \"config.json\")\n+ if _, err := os.Stat(confPath); !os.IsNotExist(err) {\n+ Fatalf(\"file %q already exists\", confPath)\n+ }\n+\n+ if err := ioutil.WriteFile(confPath, specTemplate, 0664); err != nil {\n+ Fatalf(\"error writing to %q: %v\", confPath, err)\n+ }\n+\n+ return subcommands.ExitSuccess\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/main.go",
"new_path": "runsc/main.go",
"diff": "@@ -86,6 +86,7 @@ func main() {\nsubcommands.Register(new(cmd.Restore), \"\")\nsubcommands.Register(new(cmd.Resume), \"\")\nsubcommands.Register(new(cmd.Run), \"\")\n+ subcommands.Register(new(cmd.Spec), \"\")\nsubcommands.Register(new(cmd.Start), \"\")\nsubcommands.Register(new(cmd.State), \"\")\nsubcommands.Register(new(cmd.Wait), \"\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Added spec command to create OCI spec config.json
The spec command is analygous to the 'runc spec' command and allows for
the convenient creation of a config.json file for users that don't have
runc handy.
Change-Id: Ifdfec37e023048ea461c32da1a9042a45b37d856
PiperOrigin-RevId: 216907826 |
259,891 | 12.10.2018 13:57:10 | 25,200 | 47d3862c33b7b74b451ea71139abdea34d5b46bd | runsc: Support retrieving MTU via netdevice ioctl.
This enables ifconfig to display MTU. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/epsocket.go",
"new_path": "pkg/sentry/socket/epsocket/epsocket.go",
"diff": "@@ -1266,9 +1266,10 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe\n// Gets the metric of the device. As per netdevice(7), this\n// always just sets ifr_metric to 0.\nusermem.ByteOrder.PutUint32(ifr.Data[:4], 0)\n+\ncase syscall.SIOCGIFMTU:\n// Gets the MTU of the device.\n- // TODO: Implement.\n+ usermem.ByteOrder.PutUint32(ifr.Data[:4], iface.MTU)\ncase syscall.SIOCGIFMAP:\n// Gets the hardware parameters of the device.\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Support retrieving MTU via netdevice ioctl.
This enables ifconfig to display MTU.
PiperOrigin-RevId: 216917021
Change-Id: Id513b23d9d76899bcb71b0b6a25036f41629a923 |
259,948 | 15.10.2018 09:30:49 | 25,200 | 4ea69fce8def9e030cbbc4d803b95e632175750c | sentry: save fs.Dirent deleted info. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dirent.go",
"new_path": "pkg/sentry/fs/dirent.go",
"diff": "@@ -119,7 +119,7 @@ type Dirent struct {\nparent *Dirent\n// deleted may be set atomically when removed.\n- deleted int32 `state:\"nosave\"`\n+ deleted int32\n// frozen indicates this entry can't walk to unknown nodes.\nfrozen bool\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: save fs.Dirent deleted info.
PiperOrigin-RevId: 217155458
Change-Id: Id3265b1ec784787039e2131c80254ac4937330c7 |
259,854 | 15.10.2018 20:21:06 | 25,200 | 324ad3564ba42a5106be77a06d0cd52290e1cd22 | Refactor host.ConnectedEndpoint
* Integrate recvMsg and sendMsg functions into Recv and Send respectively as
they are no longer shared.
* Clean up partial read/write error handling code.
* Re-order code to make sense given that there is no longer a host.endpoint
type. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/host/socket.go",
"new_path": "pkg/sentry/fs/host/socket.go",
"diff": "@@ -41,137 +41,6 @@ import (\n// N.B. 8MB is the default maximum on Linux (2 * sysctl_wmem_max).\nconst maxSendBufferSize = 8 << 20\n-// newSocket allocates a new unix socket with host endpoint.\n-func newSocket(ctx context.Context, orgfd int, saveable bool) (*fs.File, error) {\n- ownedfd := orgfd\n- srfd := -1\n- if saveable {\n- var err error\n- ownedfd, err = syscall.Dup(orgfd)\n- if err != nil {\n- return nil, err\n- }\n- srfd = orgfd\n- }\n- f := fd.New(ownedfd)\n- var q waiter.Queue\n- e, err := NewConnectedEndpoint(f, &q, \"\" /* path */)\n- if err != nil {\n- if saveable {\n- f.Close()\n- } else {\n- f.Release()\n- }\n- return nil, syserr.TranslateNetstackError(err).ToError()\n- }\n-\n- e.srfd = srfd\n- e.Init()\n-\n- ep := unix.NewExternal(e.stype, uniqueid.GlobalProviderFromContext(ctx), &q, e, e)\n-\n- return unixsocket.New(ctx, ep), nil\n-}\n-\n-// NewSocketWithDirent allocates a new unix socket with host endpoint.\n-//\n-// This is currently only used by unsaveable Gofer nodes.\n-//\n-// NewSocketWithDirent takes ownership of f on success.\n-func NewSocketWithDirent(ctx context.Context, d *fs.Dirent, f *fd.FD, flags fs.FileFlags) (*fs.File, error) {\n- f2 := fd.New(f.FD())\n- var q waiter.Queue\n- e, err := NewConnectedEndpoint(f2, &q, \"\" /* path */)\n- if err != nil {\n- f2.Release()\n- return nil, syserr.TranslateNetstackError(err).ToError()\n- }\n-\n- // Take ownship of the FD.\n- f.Release()\n-\n- e.Init()\n-\n- ep := unix.NewExternal(e.stype, uniqueid.GlobalProviderFromContext(ctx), &q, e, e)\n-\n- return unixsocket.NewWithDirent(ctx, d, ep, flags), nil\n-}\n-\n-func sendMsg(fd int, data [][]byte, controlMessages unix.ControlMessages, maxlen int, truncate bool) (uintptr, *tcpip.Error) {\n- if !controlMessages.Empty() {\n- return 0, tcpip.ErrInvalidEndpointState\n- }\n- n, totalLen, err := fdWriteVec(fd, data, maxlen, truncate)\n- if n < totalLen && err == nil {\n- // The host only returns a short write if it would otherwise\n- // block (and only for stream sockets).\n- err = syserror.EAGAIN\n- }\n- return n, translateError(err)\n-}\n-\n-func recvMsg(fd int, data [][]byte, numRights uintptr, peek bool, addr *tcpip.FullAddress, maxlen int) (uintptr, uintptr, unix.ControlMessages, *tcpip.Error) {\n- var cm unet.ControlMessage\n- if numRights > 0 {\n- cm.EnableFDs(int(numRights))\n- }\n- rl, ml, cl, rerr := fdReadVec(fd, data, []byte(cm), peek, maxlen)\n- if rl == 0 && rerr != nil {\n- return 0, 0, unix.ControlMessages{}, translateError(rerr)\n- }\n-\n- // Trim the control data if we received less than the full amount.\n- if cl < uint64(len(cm)) {\n- cm = cm[:cl]\n- }\n-\n- // Avoid extra allocations in the case where there isn't any control data.\n- if len(cm) == 0 {\n- return rl, ml, unix.ControlMessages{}, translateError(rerr)\n- }\n-\n- fds, err := cm.ExtractFDs()\n- if err != nil {\n- return 0, 0, unix.ControlMessages{}, translateError(err)\n- }\n-\n- if len(fds) == 0 {\n- return rl, ml, unix.ControlMessages{}, translateError(rerr)\n- }\n- return rl, ml, control.New(nil, nil, newSCMRights(fds)), translateError(rerr)\n-}\n-\n-// NewConnectedEndpoint creates a new ConnectedEndpoint backed by a host FD\n-// that will pretend to be bound at a given sentry path.\n-//\n-// The caller is responsible for calling Init(). Additionaly, Release needs to\n-// be called twice because host.ConnectedEndpoint is both a unix.Receiver and\n-// unix.ConnectedEndpoint.\n-func NewConnectedEndpoint(file *fd.FD, queue *waiter.Queue, path string) (*ConnectedEndpoint, *tcpip.Error) {\n- e := ConnectedEndpoint{\n- path: path,\n- queue: queue,\n- file: file,\n- srfd: -1,\n- }\n-\n- if err := e.init(); err != nil {\n- return nil, err\n- }\n-\n- // AtomicRefCounters start off with a single reference. We need two.\n- e.ref.IncRef()\n-\n- return &e, nil\n-}\n-\n-// Init will do initialization required without holding other locks.\n-func (c *ConnectedEndpoint) Init() {\n- if err := fdnotifier.AddFD(int32(c.file.FD()), c.queue); err != nil {\n- panic(err)\n- }\n-}\n-\n// ConnectedEndpoint is a host FD backed implementation of\n// unix.ConnectedEndpoint and unix.Receiver.\n//\n@@ -249,6 +118,93 @@ func (c *ConnectedEndpoint) init() *tcpip.Error {\nreturn nil\n}\n+// NewConnectedEndpoint creates a new ConnectedEndpoint backed by a host FD\n+// that will pretend to be bound at a given sentry path.\n+//\n+// The caller is responsible for calling Init(). Additionaly, Release needs to\n+// be called twice because ConnectedEndpoint is both a unix.Receiver and\n+// unix.ConnectedEndpoint.\n+func NewConnectedEndpoint(file *fd.FD, queue *waiter.Queue, path string) (*ConnectedEndpoint, *tcpip.Error) {\n+ e := ConnectedEndpoint{\n+ path: path,\n+ queue: queue,\n+ file: file,\n+ srfd: -1,\n+ }\n+\n+ if err := e.init(); err != nil {\n+ return nil, err\n+ }\n+\n+ // AtomicRefCounters start off with a single reference. We need two.\n+ e.ref.IncRef()\n+\n+ return &e, nil\n+}\n+\n+// Init will do initialization required without holding other locks.\n+func (c *ConnectedEndpoint) Init() {\n+ if err := fdnotifier.AddFD(int32(c.file.FD()), c.queue); err != nil {\n+ panic(err)\n+ }\n+}\n+\n+// NewSocketWithDirent allocates a new unix socket with host endpoint.\n+//\n+// This is currently only used by unsaveable Gofer nodes.\n+//\n+// NewSocketWithDirent takes ownership of f on success.\n+func NewSocketWithDirent(ctx context.Context, d *fs.Dirent, f *fd.FD, flags fs.FileFlags) (*fs.File, error) {\n+ f2 := fd.New(f.FD())\n+ var q waiter.Queue\n+ e, err := NewConnectedEndpoint(f2, &q, \"\" /* path */)\n+ if err != nil {\n+ f2.Release()\n+ return nil, syserr.TranslateNetstackError(err).ToError()\n+ }\n+\n+ // Take ownship of the FD.\n+ f.Release()\n+\n+ e.Init()\n+\n+ ep := unix.NewExternal(e.stype, uniqueid.GlobalProviderFromContext(ctx), &q, e, e)\n+\n+ return unixsocket.NewWithDirent(ctx, d, ep, flags), nil\n+}\n+\n+// newSocket allocates a new unix socket with host endpoint.\n+func newSocket(ctx context.Context, orgfd int, saveable bool) (*fs.File, error) {\n+ ownedfd := orgfd\n+ srfd := -1\n+ if saveable {\n+ var err error\n+ ownedfd, err = syscall.Dup(orgfd)\n+ if err != nil {\n+ return nil, err\n+ }\n+ srfd = orgfd\n+ }\n+ f := fd.New(ownedfd)\n+ var q waiter.Queue\n+ e, err := NewConnectedEndpoint(f, &q, \"\" /* path */)\n+ if err != nil {\n+ if saveable {\n+ f.Close()\n+ } else {\n+ f.Release()\n+ }\n+ return nil, syserr.TranslateNetstackError(err).ToError()\n+ }\n+\n+ e.srfd = srfd\n+ e.Init()\n+\n+ ep := unix.NewExternal(e.stype, uniqueid.GlobalProviderFromContext(ctx), &q, e, e)\n+\n+ return unixsocket.New(ctx, ep), nil\n+}\n+\n// Send implements unix.ConnectedEndpoint.Send.\nfunc (c *ConnectedEndpoint) Send(data [][]byte, controlMessages unix.ControlMessages, from tcpip.FullAddress) (uintptr, bool, *tcpip.Error) {\nc.mu.RLock()\n@@ -257,14 +213,30 @@ func (c *ConnectedEndpoint) Send(data [][]byte, controlMessages unix.ControlMess\nreturn 0, false, tcpip.ErrClosedForSend\n}\n+ if !controlMessages.Empty() {\n+ return 0, false, tcpip.ErrInvalidEndpointState\n+ }\n+\n// Since stream sockets don't preserve message boundaries, we can write\n// only as much of the message as fits in the send buffer.\ntruncate := c.stype == unix.SockStream\n- n, err := sendMsg(c.file.FD(), data, controlMessages, c.sndbuf, truncate)\n- // There is no need for the callee to call SendNotify because sendMsg uses\n- // the host's sendmsg(2) and the host kernel's queue.\n- return n, false, err\n+ n, totalLen, err := fdWriteVec(c.file.FD(), data, c.sndbuf, truncate)\n+ if n < totalLen && err == nil {\n+ // The host only returns a short write if it would otherwise\n+ // block (and only for stream sockets).\n+ err = syserror.EAGAIN\n+ }\n+ if n > 0 && err != syserror.EAGAIN {\n+ // The caller may need to block to send more data, but\n+ // otherwise there isn't anything that can be done about an\n+ // error with a partial write.\n+ err = nil\n+ }\n+\n+ // There is no need for the callee to call SendNotify because fdWriteVec\n+ // uses the host's sendmsg(2) and the host kernel's queue.\n+ return n, false, translateError(err)\n}\n// SendNotify implements unix.ConnectedEndpoint.SendNotify.\n@@ -318,17 +290,46 @@ func (c *ConnectedEndpoint) Recv(data [][]byte, creds bool, numRights uintptr, p\nreturn 0, 0, unix.ControlMessages{}, tcpip.FullAddress{}, false, tcpip.ErrClosedForReceive\n}\n+ var cm unet.ControlMessage\n+ if numRights > 0 {\n+ cm.EnableFDs(int(numRights))\n+ }\n+\n// N.B. Unix sockets don't have a receive buffer, the send buffer\n// serves both purposes.\n- rl, ml, cm, err := recvMsg(c.file.FD(), data, numRights, peek, nil, c.sndbuf)\n- if rl > 0 && err == tcpip.ErrWouldBlock {\n- // Message did not fill buffer; that's fine, no need to block.\n+ rl, ml, cl, err := fdReadVec(c.file.FD(), data, []byte(cm), peek, c.sndbuf)\n+ if rl > 0 && err != nil {\n+ // We got some data, so all we need to do on error is return\n+ // the data that we got. Short reads are fine, no need to\n+ // block.\nerr = nil\n}\n+ if err != nil {\n+ return 0, 0, unix.ControlMessages{}, tcpip.FullAddress{}, false, translateError(err)\n+ }\n- // There is no need for the callee to call RecvNotify because recvMsg uses\n+ // There is no need for the callee to call RecvNotify because fdReadVec uses\n// the host's recvmsg(2) and the host kernel's queue.\n- return rl, ml, cm, tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, err\n+\n+ // Trim the control data if we received less than the full amount.\n+ if cl < uint64(len(cm)) {\n+ cm = cm[:cl]\n+ }\n+\n+ // Avoid extra allocations in the case where there isn't any control data.\n+ if len(cm) == 0 {\n+ return rl, ml, unix.ControlMessages{}, tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, nil\n+ }\n+\n+ fds, err := cm.ExtractFDs()\n+ if err != nil {\n+ return 0, 0, unix.ControlMessages{}, tcpip.FullAddress{}, false, translateError(err)\n+ }\n+\n+ if len(fds) == 0 {\n+ return rl, ml, unix.ControlMessages{}, tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, nil\n+ }\n+ return rl, ml, control.New(nil, nil, newSCMRights(fds)), tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, nil\n}\n// close releases all resources related to the endpoint.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/unix/unix.go",
"new_path": "pkg/tcpip/transport/unix/unix.go",
"diff": "@@ -562,6 +562,9 @@ type ConnectedEndpoint interface {\n// Send sends a single message. This method does not block.\n//\n// notify indicates if SendNotify should be called.\n+ //\n+ // tcpip.ErrWouldBlock can be returned along with a partial write if\n+ // the caller should block to send the rest of the data.\nSend(data [][]byte, controlMessages ControlMessages, from tcpip.FullAddress) (n uintptr, notify bool, err *tcpip.Error)\n// SendNotify notifies the ConnectedEndpoint of a successful Send. This\n"
}
] | Go | Apache License 2.0 | google/gvisor | Refactor host.ConnectedEndpoint
* Integrate recvMsg and sendMsg functions into Recv and Send respectively as
they are no longer shared.
* Clean up partial read/write error handling code.
* Re-order code to make sense given that there is no longer a host.endpoint
type.
PiperOrigin-RevId: 217255072
Change-Id: Ib43fe9286452f813b8309d969be11f5fa40694cd |
259,992 | 17.10.2018 09:30:11 | 25,200 | ba33a70e47492f9cc8e3550ed795c892553ac1d4 | Attempt to deflake TestPythonHello
It has timed out running with kokoro a few times. I passes
consistently on my machine (200+ runsc). Increase the timeout
to see if it helps.
Failure: image_test.go:212: WaitForHTTP() timeout: Get dial tcp [::1]:32785: connect: connection refused | [
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/image_test.go",
"new_path": "runsc/test/image/image_test.go",
"diff": "@@ -208,7 +208,7 @@ func TestPythonHello(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := testutil.WaitForHTTP(port, 20*time.Second); err != nil {\n+ if err := testutil.WaitForHTTP(port, 30*time.Second); err != nil {\nt.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Attempt to deflake TestPythonHello
It has timed out running with kokoro a few times. I passes
consistently on my machine (200+ runsc). Increase the timeout
to see if it helps.
Failure: image_test.go:212: WaitForHTTP() timeout: Get http://localhost:32785/: dial tcp [::1]:32785: connect: connection refused
PiperOrigin-RevId: 217532428
Change-Id: Ibf860aecf537830bef832e436f2e804b3fc12f2d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.