author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
259,992 | 02.07.2018 12:50:37 | 25,200 | fa64c2a1517d20c08447bb2230f2903ec3baade9 | Make default limits the same as with runc
Closes | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/limits.go",
"new_path": "runsc/boot/limits.go",
"diff": "@@ -23,29 +23,50 @@ import (\n// Mapping from linux resource names to limits.LimitType.\nvar fromLinuxResource = map[string]limits.LimitType{\n+ \"RLIMIT_AS\": limits.AS,\n+ \"RLIMIT_CORE\": limits.Core,\n\"RLIMIT_CPU\": limits.CPU,\n- \"RLIMIT_FSIZE\": limits.FileSize,\n\"RLIMIT_DATA\": limits.Data,\n- \"RLIMIT_STACK\": limits.Stack,\n- \"RLIMIT_CORE\": limits.Core,\n- \"RLIMIT_RSS\": limits.Rss,\n- \"RLIMIT_NPROC\": limits.ProcessCount,\n- \"RLIMIT_NOFILE\": limits.NumberOfFiles,\n- \"RLIMIT_MEMLOCK\": limits.MemoryPagesLocked,\n- \"RLIMIT_AS\": limits.AS,\n+ \"RLIMIT_FSIZE\": limits.FileSize,\n\"RLIMIT_LOCKS\": limits.Locks,\n- \"RLIMIT_SIGPENDING\": limits.SignalsPending,\n+ \"RLIMIT_MEMLOCK\": limits.MemoryPagesLocked,\n\"RLIMIT_MSGQUEUE\": limits.MessageQueueBytes,\n\"RLIMIT_NICE\": limits.Nice,\n+ \"RLIMIT_NOFILE\": limits.NumberOfFiles,\n+ \"RLIMIT_NPROC\": limits.ProcessCount,\n+ \"RLIMIT_RSS\": limits.Rss,\n\"RLIMIT_RTPRIO\": limits.RealTimePriority,\n\"RLIMIT_RTTIME\": limits.Rttime,\n+ \"RLIMIT_SIGPENDING\": limits.SignalsPending,\n+ \"RLIMIT_STACK\": limits.Stack,\n}\nfunc createLimitSet(spec *specs.Spec) (*limits.LimitSet, error) {\n- ls, err := limits.NewLinuxDistroLimitSet()\n+ ls, err := limits.NewLinuxLimitSet()\nif err != nil {\nreturn nil, err\n}\n+\n+ // Set default limits based on what containers get by default, ex:\n+ // $ docker run --rm debian prlimit\n+ ls.SetUnchecked(limits.AS, limits.Limit{Cur: limits.Infinity, Max: limits.Infinity})\n+ ls.SetUnchecked(limits.Core, limits.Limit{Cur: limits.Infinity, Max: limits.Infinity})\n+ ls.SetUnchecked(limits.CPU, limits.Limit{Cur: limits.Infinity, Max: limits.Infinity})\n+ ls.SetUnchecked(limits.Data, limits.Limit{Cur: limits.Infinity, Max: limits.Infinity})\n+ ls.SetUnchecked(limits.FileSize, limits.Limit{Cur: limits.Infinity, Max: limits.Infinity})\n+ ls.SetUnchecked(limits.Locks, limits.Limit{Cur: limits.Infinity, Max: limits.Infinity})\n+ ls.SetUnchecked(limits.MemoryPagesLocked, limits.Limit{Cur: 65536, Max: 65536})\n+ ls.SetUnchecked(limits.MessageQueueBytes, limits.Limit{Cur: 819200, Max: 819200})\n+ ls.SetUnchecked(limits.Nice, limits.Limit{Cur: 0, Max: 0})\n+ ls.SetUnchecked(limits.NumberOfFiles, limits.Limit{Cur: 1048576, Max: 1048576})\n+ ls.SetUnchecked(limits.ProcessCount, limits.Limit{Cur: limits.Infinity, Max: limits.Infinity})\n+ ls.SetUnchecked(limits.Rss, limits.Limit{Cur: limits.Infinity, Max: limits.Infinity})\n+ ls.SetUnchecked(limits.RealTimePriority, limits.Limit{Cur: 0, Max: 0})\n+ ls.SetUnchecked(limits.Rttime, limits.Limit{Cur: limits.Infinity, Max: limits.Infinity})\n+ ls.SetUnchecked(limits.SignalsPending, limits.Limit{Cur: 0, Max: 0})\n+ ls.SetUnchecked(limits.Stack, limits.Limit{Cur: 8388608, Max: limits.Infinity})\n+\n+ // Then apply overwrites on top of defaults.\nfor _, rl := range spec.Process.Rlimits {\nlt, ok := fromLinuxResource[rl.Type]\nif !ok {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Make default limits the same as with runc
Closes #2
PiperOrigin-RevId: 202997196
Change-Id: I0c9f6f5a8a1abe1ae427bca5f590bdf9f82a6675 |
259,881 | 02.07.2018 17:38:01 | 25,200 | 2821dfe6ce95ad32bb0084cb3b2335bf7b31de7a | Hold d.parent.mu when reading d.name | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dirent.go",
"new_path": "pkg/sentry/fs/dirent.go",
"diff": "@@ -1342,7 +1342,15 @@ func (d *Dirent) InotifyEvent(events, cookie uint32) {\n// The ordering below is important, Linux always notifies the parent first.\nif d.parent != nil {\n- d.parent.Inode.Watches.Notify(d.name, events, cookie)\n+ // name is immediately stale w.r.t. renames (renameMu doesn't\n+ // protect against renames in the same directory). Holding\n+ // d.parent.mu around Notify() wouldn't matter since Notify\n+ // doesn't provide a synchronous mechanism for reading the name\n+ // anyway.\n+ d.parent.mu.Lock()\n+ name := d.name\n+ d.parent.mu.Unlock()\n+ d.parent.Inode.Watches.Notify(name, events, cookie)\n}\nd.Inode.Watches.Notify(\"\", events, cookie)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Hold d.parent.mu when reading d.name
PiperOrigin-RevId: 203041657
Change-Id: I120783d91712818e600505454c9276f8d9877f37 |
259,881 | 03.07.2018 11:27:29 | 25,200 | 062a6f6ec5f4bf2ce46790a22d8e7278d51e6836 | Handle NUL-only paths in exec
The path in execve(2), interpreter script, and ELF interpreter may all
be no more than a NUL-byte. Handle each of those cases. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/elf.go",
"new_path": "pkg/sentry/loader/elf.go",
"diff": "@@ -405,6 +405,10 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f *fs.File, info el\n}\ncase elf.PT_INTERP:\n+ if phdr.Filesz < 2 {\n+ ctx.Infof(\"PT_INTERP path too small: %v\", phdr.Filesz)\n+ return loadedELF{}, syserror.ENOEXEC\n+ }\nif phdr.Filesz > syscall.PathMax {\nctx.Infof(\"PT_INTERP path too big: %v\", phdr.Filesz)\nreturn loadedELF{}, syserror.ENOEXEC\n@@ -423,8 +427,26 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f *fs.File, info el\nreturn loadedELF{}, syserror.ENOEXEC\n}\n- // Strip NUL-terminator from string.\n- interpreter = string(path[:len(path)-1])\n+ // Strip NUL-terminator and everything beyond from\n+ // string. Note that there may be a NUL-terminator\n+ // before len(path)-1.\n+ interpreter = string(path[:bytes.IndexByte(path, '\\x00')])\n+ if interpreter == \"\" {\n+ // Linux actually attempts to open_exec(\"\\0\").\n+ // open_exec -> do_open_execat fails to check\n+ // that name != '\\0' before calling\n+ // do_filp_open, which thus opens the working\n+ // directory. do_open_execat returns EACCES\n+ // because the directory is not a regular file.\n+ //\n+ // We bypass that nonsense and simply\n+ // short-circuit with EACCES. Those this does\n+ // mean that there may be some edge cases where\n+ // the open path would return a different\n+ // error.\n+ ctx.Infof(\"PT_INTERP path is empty: %v\", path)\n+ return loadedELF{}, syserror.EACCES\n+ }\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/interpreter.go",
"new_path": "pkg/sentry/loader/interpreter.go",
"diff": "@@ -82,6 +82,11 @@ func parseInterpreterScript(ctx context.Context, filename string, f *fs.File, ar\n}\n}\n+ if string(interp) == \"\" {\n+ ctx.Infof(\"Interpreter script contains no interpreter: %v\", line)\n+ return \"\", []string{}, syserror.ENOEXEC\n+ }\n+\n// Build the new argument list:\n//\n// 1. The interpreter.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/loader.go",
"new_path": "pkg/sentry/loader/loader.go",
"diff": "@@ -55,6 +55,11 @@ func readFull(ctx context.Context, f *fs.File, dst usermem.IOSequence, offset in\n//\n// name must be a readable, executable, regular file.\nfunc openPath(ctx context.Context, mm *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals uint, name string) (*fs.Dirent, *fs.File, error) {\n+ if name == \"\" {\n+ ctx.Infof(\"cannot open empty name\")\n+ return nil, nil, syserror.ENOENT\n+ }\n+\nd, err := mm.FindInode(ctx, root, wd, name, maxTraversals)\nif err != nil {\nreturn nil, nil, err\n"
}
] | Go | Apache License 2.0 | google/gvisor | Handle NUL-only paths in exec
The path in execve(2), interpreter script, and ELF interpreter may all
be no more than a NUL-byte. Handle each of those cases.
PiperOrigin-RevId: 203155745
Change-Id: I1c8b1b387924b23b2cf942341dfc76c9003da959 |
259,992 | 03.07.2018 11:33:20 | 25,200 | c1b4c1ffee850aea2880f5f91a1e48e840933c71 | Fix flaky image_test
Some failures were being ignored in run_tests.sh
Give more time for mysql to setup
Fix typo with network=host tests
Change httpd test to wait on http server being available, not only output | [
{
"change_type": "MODIFY",
"old_path": "kokoro/gcp_ubuntu/run_tests.sh",
"new_path": "kokoro/gcp_ubuntu/run_tests.sh",
"diff": "@@ -44,12 +44,15 @@ bazel test --test_output=errors //...\nexit_code=${?}\nif [[ ${exit_code} -eq 0 ]]; then\n+ declare -a variations=(\"\" \"-kvm\" \"-hostnet\" \"-overlay\")\n+ for v in \"${variations[@]}\"; do\n# image_test is tagged manual\n- bazel test --test_output=errors --test_env=RUNSC_RUNTIME=${runtime} //runsc/test/image:image_test\n- bazel test --test_output=errors --test_env=RUNSC_RUNTIME=${runtime}-kvm //runsc/test/image:image_test\n- bazel test --test_output=errors --test_env=RUNSC_RUNTIME=${runtime}-nethost //runsc/test/image:image_test\n- bazel test --test_output=errors --test_env=RUNSC_RUNTIME=${runtime}-overlay //runsc/test/image:image_test\n+ bazel test --test_output=errors --test_env=RUNSC_RUNTIME=${runtime}${v} //runsc/test/image:image_test\nexit_code=${?}\n+ if [[ ${exit_code} -ne 0 ]]; then\n+ break\n+ fi\n+ done\nfi\n# Best effort to uninstall\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/BUILD",
"new_path": "runsc/test/image/BUILD",
"diff": "@@ -4,7 +4,7 @@ load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\", \"go_test\")\ngo_test(\nname = \"image_test\",\n- size = \"small\",\n+ size = \"large\",\nsrcs = [\"image_test.go\"],\ndata = [\n\"latin10k.txt\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/image_test.go",
"new_path": "runsc/test/image/image_test.go",
"diff": "@@ -156,7 +156,7 @@ func (d *docker) waitForOutput(pattern string, timeout time.Duration) error {\n// Success!\nreturn nil\n}\n- time.Sleep(10 * time.Millisecond)\n+ time.Sleep(100 * time.Millisecond)\n}\nreturn fmt.Errorf(\"timeout waiting for output %q: %s\", re.String(), out)\n}\n@@ -168,7 +168,7 @@ func (d *docker) waitForHTTP(port int, timeout time.Duration) error {\n// Success!\nreturn nil\n}\n- time.Sleep(10 * time.Millisecond)\n+ time.Sleep(100 * time.Millisecond)\n}\nreturn fmt.Errorf(\"timeout waiting for HTTP server on port %d\", port)\n}\n@@ -238,8 +238,8 @@ func TestHttpd(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := d.waitForOutput(\"'httpd -D FOREGROUND'\", 5*time.Second); err != nil {\n- t.Fatalf(\"docker.WaitForOutput() timeout: %v\", err)\n+ if err := d.waitForHTTP(port, 5*time.Second); err != nil {\n+ t.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n}\nif err := testHTTPServer(port); err != nil {\n@@ -287,7 +287,7 @@ func TestMysql(t *testing.T) {\ndefer d.cleanUp()\n// Wait until it's up and running.\n- if err := d.waitForOutput(\"port: 3306 MySQL Community Server\", 30*time.Second); err != nil {\n+ if err := d.waitForOutput(\"port: 3306 MySQL Community Server\", 3*time.Minute); err != nil {\nt.Fatalf(\"docker.WaitForOutput() timeout: %v\", err)\n}\n@@ -311,10 +311,10 @@ func TestMysql(t *testing.T) {\ndefer client.cleanUp()\n// Ensure file executed to the end and shutdown mysql.\n- if err := client.waitForOutput(\"--------------\\nshutdown\\n--------------\", 5*time.Second); err != nil {\n+ if err := client.waitForOutput(\"--------------\\nshutdown\\n--------------\", 15*time.Second); err != nil {\nt.Fatalf(\"docker.WaitForOutput() timeout: %v\", err)\n}\n- if err := d.waitForOutput(\"mysqld: Shutdown complete\", 15*time.Second); err != nil {\n+ if err := d.waitForOutput(\"mysqld: Shutdown complete\", 30*time.Second); err != nil {\nt.Fatalf(\"docker.WaitForOutput() timeout: %v\", err)\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix flaky image_test
- Some failures were being ignored in run_tests.sh
- Give more time for mysql to setup
- Fix typo with network=host tests
- Change httpd test to wait on http server being available, not only output
PiperOrigin-RevId: 203156896
Change-Id: Ie1801dcd76e9b5fe4722c4d8695c76e40988dd74 |
259,992 | 03.07.2018 12:00:09 | 25,200 | 52ddb8571c466577843d8eb1c5e270dd54f1ade6 | Skip overlay on root when its readonly | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/fs.go",
"new_path": "runsc/boot/fs.go",
"diff": "@@ -207,7 +207,7 @@ func createRootMount(ctx context.Context, spec *specs.Spec, conf *Config, fds *f\nreturn nil, fmt.Errorf(\"error adding submount overlay: %v\", err)\n}\n- if conf.Overlay {\n+ if conf.Overlay && !spec.Root.Readonly {\nlog.Debugf(\"Adding overlay on top of root mount\")\n// Overlay a tmpfs filesystem on top of the root.\nrootInode, err = addOverlay(ctx, conf, rootInode, \"root-overlay-upper\", mf)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -998,6 +998,77 @@ func TestMountNewDir(t *testing.T) {\n}\n}\n+func TestReadonlyRoot(t *testing.T) {\n+ spec := testutil.NewSpecWithArgs(\"/bin/touch\", \"/foo\")\n+ spec.Root.Readonly = true\n+ rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ defer os.RemoveAll(bundleDir)\n+\n+ conf.Overlay = true\n+\n+ // Create, start and wait for the container.\n+ s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer s.Destroy()\n+ if err := s.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+ ws, err := s.Wait()\n+ if err != nil {\n+ t.Fatalf(\"error waiting on container: %v\", err)\n+ }\n+ if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {\n+ t.Fatalf(\"container failed, waitStatus: %v\", ws)\n+ }\n+}\n+\n+func TestReadonlyMount(t *testing.T) {\n+ spec := testutil.NewSpecWithArgs(\"/bin/touch\", \"/foo/file\")\n+ dir, err := ioutil.TempDir(\"\", \"ro-mount\")\n+ if err != nil {\n+ t.Fatalf(\"ioutil.TempDir() failed: %v\", err)\n+ }\n+ spec.Mounts = append(spec.Mounts, specs.Mount{\n+ Destination: \"/foo\",\n+ Source: dir,\n+ Type: \"bind\",\n+ Options: []string{\"ro\"},\n+ })\n+ spec.Root.Readonly = false\n+\n+ rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ defer os.RemoveAll(bundleDir)\n+\n+ conf.Overlay = true\n+\n+ // Create, start and wait for the container.\n+ s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer s.Destroy()\n+ if err := s.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+ ws, err := s.Wait()\n+ if err != nil {\n+ t.Fatalf(\"error waiting on container: %v\", err)\n+ }\n+ if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {\n+ t.Fatalf(\"container failed, waitStatus: %v\", ws)\n+ }\n+}\n+\n// TestAbbreviatedIDs checks that runsc supports using abbreviated container\n// IDs in place of full IDs.\nfunc TestAbbreviatedIDs(t *testing.T) {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Skip overlay on root when its readonly
PiperOrigin-RevId: 203161098
Change-Id: Ia1904420cb3ee830899d24a4fe418bba6533be64 |
259,881 | 03.07.2018 12:52:39 | 25,200 | 660f1203ff1949a7b7869b801f4aa2133d30b91f | Fix runsc VDSO mapping
accidentally moved vdso into an
inner scope, never assigning the vdso variable passed to the Kernel and
thus skipping VDSO mappings.
Fix this and remove the ability for loadVDSO to skip VDSO mappings,
since tests that do so are gone. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/vdso.go",
"new_path": "pkg/sentry/loader/vdso.go",
"diff": "@@ -278,12 +278,6 @@ func PrepareVDSO(p platform.Platform) (*VDSO, error) {\n//\n// loadVDSO takes a reference on the VDSO and parameter page FrameRegions.\nfunc loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) (usermem.Addr, error) {\n- if v == nil {\n- // Should be used only by tests.\n- ctx.Warningf(\"No VDSO provided, skipping VDSO mapping\")\n- return 0, nil\n- }\n-\nif v.os != bin.os {\nctx.Warningf(\"Binary ELF OS %v and VDSO ELF OS %v differ\", bin.os, v.os)\nreturn 0, syserror.ENOEXEC\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -134,7 +134,7 @@ func New(spec *specs.Spec, conf *Config, controllerFD, restoreFD int, ioFDs []in\n// Create VDSO.\n//\n// Pass k as the platform since it is savable, unlike the actual platform.\n- vdso, err := loader.PrepareVDSO(k)\n+ vdso, err = loader.PrepareVDSO(k)\nif err != nil {\nreturn nil, fmt.Errorf(\"error creating vdso: %v\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix runsc VDSO mapping
80bdf8a4068de3ac4a73b6b61a0cdcfe3e3571af accidentally moved vdso into an
inner scope, never assigning the vdso variable passed to the Kernel and
thus skipping VDSO mappings.
Fix this and remove the ability for loadVDSO to skip VDSO mappings,
since tests that do so are gone.
PiperOrigin-RevId: 203169135
Change-Id: Ifd8cadcbaf82f959223c501edcc4d83d05327eba |
259,985 | 03.07.2018 14:07:43 | 25,200 | 34af9a61741f26be403231ec302b4e0795147906 | Fix data race on inotify.Watch.mask. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/inotify.go",
"new_path": "pkg/sentry/fs/inotify.go",
"diff": "@@ -16,6 +16,7 @@ package fs\nimport (\n\"sync\"\n+ \"sync/atomic\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/ilist\"\n@@ -279,13 +280,13 @@ func (i *Inotify) AddWatch(target *Dirent, mask uint32) int32 {\n// same inode. Obtain an extra reference if necessary.\nexisting.Pin(target)\n+ newmask := mask\nif mergeMask := mask&linux.IN_MASK_ADD != 0; mergeMask {\n// \"Add (OR) events to watch mask for this pathname if it already\n// exists (instead of replacing mask).\" -- inotify(7)\n- existing.mask |= mask\n- } else {\n- existing.mask = mask\n+ newmask |= atomic.LoadUint32(&existing.mask)\n}\n+ atomic.StoreUint32(&existing.mask, newmask)\nreturn existing.wd\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/inotify_watch.go",
"new_path": "pkg/sentry/fs/inotify_watch.go",
"diff": "@@ -16,6 +16,7 @@ package fs\nimport (\n\"sync\"\n+ \"sync/atomic\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n)\n@@ -33,9 +34,6 @@ type Watch struct {\n// Descriptor for this watch. This is unique across an inotify instance.\nwd int32\n- // Events being monitored via this watch.\n- mask uint32\n-\n// The inode being watched. Note that we don't directly hold a reference on\n// this inode. Instead we hold a reference on the dirent(s) containing the\n// inode, which we record in pins.\n@@ -48,6 +46,10 @@ type Watch struct {\n// mu protects the fields below.\nmu sync.Mutex `state:\"nosave\"`\n+ // Events being monitored via this watch. Must be accessed atomically,\n+ // writes are protected by mu.\n+ mask uint32\n+\n// pins is the set of dirents this watch is currently pinning in memory by\n// holding a reference to them. See Pin()/Unpin().\npins map[*Dirent]bool\n@@ -62,7 +64,7 @@ func (w *Watch) ID() uint64 {\n// should continue to be be notified of events after the target has been\n// unlinked.\nfunc (w *Watch) NotifyParentAfterUnlink() bool {\n- return w.mask&linux.IN_EXCL_UNLINK == 0\n+ return atomic.LoadUint32(&w.mask)&linux.IN_EXCL_UNLINK == 0\n}\n// isRenameEvent returns true if eventMask describes a rename event.\n@@ -73,7 +75,7 @@ func isRenameEvent(eventMask uint32) bool {\n// Notify queues a new event on this watch.\nfunc (w *Watch) Notify(name string, events uint32, cookie uint32) {\nunmaskableBits := ^uint32(0) &^ linux.IN_ALL_EVENTS\n- effectiveMask := unmaskableBits | w.mask\n+ effectiveMask := unmaskableBits | atomic.LoadUint32(&w.mask)\nmatchedEvents := effectiveMask & events\nif matchedEvents == 0 {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix data race on inotify.Watch.mask.
PiperOrigin-RevId: 203180463
Change-Id: Ief50988c1c028f81ec07a26e704d893e86985bf0 |
259,991 | 06.07.2018 09:37:32 | 25,200 | f107a5b1a0e264d518617c57f0cf310b63e8b59c | Tests pause and resume functionality on a Python container. | [
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/BUILD",
"new_path": "runsc/test/image/BUILD",
"diff": "@@ -5,7 +5,10 @@ load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\", \"go_test\")\ngo_test(\nname = \"image_test\",\nsize = \"large\",\n- srcs = [\"image_test.go\"],\n+ srcs = [\n+ \"image_test.go\",\n+ \"python_test.go\",\n+ ],\ndata = [\n\"latin10k.txt\",\n\"mysql.sql\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/image/python_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package image\n+\n+import (\n+ \"fmt\"\n+ \"net/http\"\n+ \"testing\"\n+ \"time\"\n+)\n+\n+func TestPythonHello(t *testing.T) {\n+ d := makeDocker(\"python-hello-test\")\n+ if out, err := d.run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ }\n+ defer d.cleanUp()\n+\n+ // Find where port 8080 is mapped to.\n+ port, err := d.findPort(8080)\n+ if err != nil {\n+ t.Fatalf(\"docker.findPort(8080) failed: %v\", err)\n+ }\n+\n+ // Wait until it's up and running.\n+ if err := d.waitForHTTP(port, 5*time.Second); err != nil {\n+ t.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n+ }\n+\n+ // Ensure that content is being served.\n+ url := fmt.Sprintf(\"http://localhost:%d\", port)\n+ resp, err := http.Get(url)\n+ if err != nil {\n+ t.Errorf(\"Error reaching http server: %v\", err)\n+ }\n+ if want := http.StatusOK; resp.StatusCode != want {\n+ t.Errorf(\"Wrong response code, got: %d, want: %d\", resp.StatusCode, want)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Tests pause and resume functionality on a Python container.
PiperOrigin-RevId: 203488336
Change-Id: I55e1b646f1fae73c27a49e064875d55f5605b200 |
259,854 | 06.07.2018 10:57:37 | 25,200 | 5c88e6a15d46bba6237a44d98c4e172237c9aea3 | Add non-AMD64 support to rawfile | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/rawfile/BUILD",
"new_path": "pkg/tcpip/link/rawfile/BUILD",
"diff": "@@ -6,6 +6,8 @@ go_library(\nname = \"rawfile\",\nsrcs = [\n\"blockingpoll_amd64.s\",\n+ \"blockingpoll_unsafe.go\",\n+ \"blockingpoll_unsafe_amd64.go\",\n\"errors.go\",\n\"rawfile_unsafe.go\",\n],\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/link/rawfile/blockingpoll_unsafe.go",
"diff": "+// Copyright 2018 The Netstack Authors. All rights reserved.\n+// Use of this source code is governed by a BSD-style\n+// license that can be found in the LICENSE file.\n+\n+// +build linux,!amd64\n+\n+package rawfile\n+\n+import (\n+ \"syscall\"\n+ \"unsafe\"\n+)\n+\n+func blockingPoll(fds *pollEvent, nfds int, timeout int64) (int, syscall.Errno) {\n+ n, _, e := syscall.Syscall(syscall.SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))\n+ return int(n), e\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/link/rawfile/blockingpoll_unsafe_amd64.go",
"diff": "+// Copyright 2018 The Netstack Authors. All rights reserved.\n+// Use of this source code is governed by a BSD-style\n+// license that can be found in the LICENSE file.\n+\n+// +build linux,amd64\n+\n+package rawfile\n+\n+import (\n+ \"syscall\"\n+)\n+\n+//go:noescape\n+func blockingPoll(fds *pollEvent, nfds int, timeout int64) (int, syscall.Errno)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/rawfile/errors.go",
"new_path": "pkg/tcpip/link/rawfile/errors.go",
"diff": "// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n+// +build linux\n+\npackage rawfile\nimport (\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/rawfile/rawfile_unsafe.go",
"new_path": "pkg/tcpip/link/rawfile/rawfile_unsafe.go",
"diff": "// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n+// +build linux\n+\n// Package rawfile contains utilities for using the netstack with raw host\n// files on Linux hosts.\npackage rawfile\n@@ -13,9 +15,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n)\n-//go:noescape\n-func blockingPoll(fds unsafe.Pointer, nfds int, timeout int64) (n int, err syscall.Errno)\n-\n// GetMTU determines the MTU of a network interface device.\nfunc GetMTU(name string) (uint32, error) {\nfd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_DGRAM, 0)\n@@ -108,6 +107,12 @@ func NonBlockingWriteN(fd int, bs ...[]byte) *tcpip.Error {\nreturn nil\n}\n+type pollEvent struct {\n+ fd int32\n+ events int16\n+ revents int16\n+}\n+\n// BlockingRead reads from a file descriptor that is set up as non-blocking. If\n// no data is available, it will block in a poll() syscall until the file\n// descirptor becomes readable.\n@@ -118,16 +123,12 @@ func BlockingRead(fd int, b []byte) (int, *tcpip.Error) {\nreturn int(n), nil\n}\n- event := struct {\n- fd int32\n- events int16\n- revents int16\n- }{\n+ event := pollEvent{\nfd: int32(fd),\nevents: 1, // POLLIN\n}\n- _, e = blockingPoll(unsafe.Pointer(&event), 1, -1)\n+ _, e = blockingPoll(&event, 1, -1)\nif e != 0 && e != syscall.EINTR {\nreturn 0, TranslateErrno(e)\n}\n@@ -144,16 +145,12 @@ func BlockingReadv(fd int, iovecs []syscall.Iovec) (int, *tcpip.Error) {\nreturn int(n), nil\n}\n- event := struct {\n- fd int32\n- events int16\n- revents int16\n- }{\n+ event := pollEvent{\nfd: int32(fd),\nevents: 1, // POLLIN\n}\n- _, e = blockingPoll(unsafe.Pointer(&event), 1, -1)\n+ _, e = blockingPoll(&event, 1, -1)\nif e != 0 && e != syscall.EINTR {\nreturn 0, TranslateErrno(e)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add non-AMD64 support to rawfile
PiperOrigin-RevId: 203499064
Change-Id: I2cd5189638e94ce926f1e82c1264a8d3ece9dfa5 |
259,881 | 09.07.2018 11:43:06 | 25,200 | 0dedac637ff9f6f7a0556d42d90787584a4051da | Trim all whitespace between interpreter and arg
Multiple whitespace characters are allowed. This fixes Ubuntu's
/usr/sbin/invoke-rc.d, which has trailing whitespace after the
interpreter which we were treating as an arg. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/interpreter.go",
"new_path": "pkg/sentry/loader/interpreter.go",
"diff": "@@ -66,7 +66,7 @@ func parseInterpreterScript(ctx context.Context, filename string, f *fs.File, ar\n// Skip any whitespace before the interpeter.\nline = bytes.TrimLeft(line, \" \\t\")\n- // Linux only looks for a space or tab delimiting the interpreter and\n+ // Linux only looks for spaces or tabs delimiting the interpreter and\n// arg.\n//\n// execve(2): \"On Linux, the entire string following the interpreter\n@@ -77,9 +77,7 @@ func parseInterpreterScript(ctx context.Context, filename string, f *fs.File, ar\ni = bytes.IndexAny(line, \" \\t\")\nif i >= 0 {\ninterp = line[:i]\n- if i+1 < len(line) {\n- arg = line[i+1:]\n- }\n+ arg = bytes.TrimLeft(line[i:], \" \\t\")\n}\nif string(interp) == \"\" {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Trim all whitespace between interpreter and arg
Multiple whitespace characters are allowed. This fixes Ubuntu's
/usr/sbin/invoke-rc.d, which has trailing whitespace after the
interpreter which we were treating as an arg.
PiperOrigin-RevId: 203802278
Change-Id: I0a6cdb0af4b139cf8abb22fa70351fe3697a5c6b |
259,885 | 09.07.2018 16:15:14 | 25,200 | 41aeb680b1882c9416e25e100b5ff5eebead36de | Inherit parent in clone(CLONE_THREAD) under TaskSet.mu. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_clone.go",
"new_path": "pkg/sentry/kernel/task_clone.go",
"diff": "@@ -220,18 +220,15 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\npidns = pidns.NewChild(userns)\n}\ntg := t.tg\n- parent := t.parent\nif opts.NewThreadGroup {\nsh := t.tg.signalHandlers\nif opts.NewSignalHandlers {\nsh = sh.Fork()\n}\ntg = NewThreadGroup(pidns, sh, opts.TerminationSignal, tg.limits.GetCopy(), t.k.monotonicClock)\n- parent = t\n}\ncfg := &TaskConfig{\nKernel: t.k,\n- Parent: parent,\nThreadGroup: tg,\nTaskContext: tc,\nTaskResources: t.tr.Fork(!opts.NewFiles, !opts.NewFSContext),\n@@ -242,6 +239,11 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\nUTSNamespace: utsns,\nIPCNamespace: ipcns,\n}\n+ if opts.NewThreadGroup {\n+ cfg.Parent = t\n+ } else {\n+ cfg.InheritParent = t\n+ }\nif opts.NewNetworkNamespace {\ncfg.NetworkNamespaced = true\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_start.go",
"new_path": "pkg/sentry/kernel/task_start.go",
"diff": "@@ -31,6 +31,10 @@ type TaskConfig struct {\n// Parent is the new task's parent. Parent may be nil.\nParent *Task\n+ // If InheritParent is not nil, use InheritParent's parent as the new\n+ // task's parent.\n+ InheritParent *Task\n+\n// ThreadGroup is the ThreadGroup the new task belongs to.\n*ThreadGroup\n@@ -133,6 +137,9 @@ func (ts *TaskSet) newTask(cfg *TaskConfig) (*Task, error) {\n// IDs).\nt.updateLogPrefixLocked()\n+ if cfg.InheritParent != nil {\n+ t.parent = cfg.InheritParent.parent\n+ }\nif t.parent != nil {\nt.parent.children[t] = struct{}{}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Inherit parent in clone(CLONE_THREAD) under TaskSet.mu.
PiperOrigin-RevId: 203849534
Change-Id: I4d81513bfd32e0b7fc40c8a4c194eba7abc35a83 |
260,013 | 09.07.2018 20:47:32 | 25,200 | da9b5153f2fafab1597b34336f8a95c1b861f0ac | Fix two race conditions in tcp stack. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -949,9 +949,10 @@ func (e *endpoint) protocolMainLoop(passive bool) *tcpip.Error {\n}\nif n¬ifyReset != 0 {\n+ e.mu.Lock()\ne.resetConnectionLocked(tcpip.ErrConnectionAborted)\n+ e.mu.Unlock()\n}\n-\nif n¬ifyClose != 0 && closeTimer == nil {\n// Reset the connection 3 seconds after the\n// endpoint has been closed.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -93,6 +93,8 @@ type endpoint struct {\n//\n// Once the peer has closed its send side, rcvClosed is set to true\n// to indicate to users that no more data is coming.\n+ //\n+ // rcvListMu can be taken after the endpoint mu below.\nrcvListMu sync.Mutex `state:\"nosave\"`\nrcvList segmentList\nrcvClosed bool\n@@ -394,7 +396,10 @@ func (e *endpoint) Read(*tcpip.FullAddress) (buffer.View, tcpip.ControlMessages,\n// but has some pending unread data. Also note that a RST being received\n// would cause the state to become stateError so we should allow the\n// reads to proceed before returning a ECONNRESET.\n- if s := e.state; s != stateConnected && s != stateClosed && e.rcvBufUsed == 0 {\n+ e.rcvListMu.Lock()\n+ bufUsed := e.rcvBufUsed\n+ if s := e.state; s != stateConnected && s != stateClosed && bufUsed == 0 {\n+ e.rcvListMu.Unlock()\ne.mu.RUnlock()\nif s == stateError {\nreturn buffer.View{}, tcpip.ControlMessages{}, e.hardError\n@@ -402,7 +407,6 @@ func (e *endpoint) Read(*tcpip.FullAddress) (buffer.View, tcpip.ControlMessages,\nreturn buffer.View{}, tcpip.ControlMessages{}, tcpip.ErrInvalidEndpointState\n}\n- e.rcvListMu.Lock()\nv, err := e.readLocked()\ne.rcvListMu.Unlock()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix two race conditions in tcp stack.
PiperOrigin-RevId: 203880278
Change-Id: I66b790a616de59142859cc12db4781b57ea626d3 |
259,854 | 09.07.2018 21:19:58 | 25,200 | afd655a5d8b9d9bc747ee99b1ec2475cc526c996 | Notify UDP and Ping endpoints on close | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/ping/endpoint.go",
"new_path": "pkg/tcpip/transport/ping/endpoint.go",
"diff": "@@ -92,7 +92,6 @@ func newEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waite\n// associated with it.\nfunc (e *endpoint) Close() {\ne.mu.Lock()\n- defer e.mu.Unlock()\nswitch e.state {\ncase stateBound, stateConnected:\n@@ -113,6 +112,10 @@ func (e *endpoint) Close() {\n// Update the state.\ne.state = stateClosed\n+\n+ e.mu.Unlock()\n+\n+ e.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.EventIn | waiter.EventOut)\n}\n// Read reads data from the endpoint. This method does not block if\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/endpoint.go",
"new_path": "pkg/tcpip/transport/udp/endpoint.go",
"diff": "@@ -121,7 +121,6 @@ func NewConnectedEndpoint(stack *stack.Stack, r *stack.Route, id stack.Transport\n// associated with it.\nfunc (e *endpoint) Close() {\ne.mu.Lock()\n- defer e.mu.Unlock()\nswitch e.state {\ncase stateBound, stateConnected:\n@@ -142,6 +141,10 @@ func (e *endpoint) Close() {\n// Update the state.\ne.state = stateClosed\n+\n+ e.mu.Unlock()\n+\n+ e.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.EventIn | waiter.EventOut)\n}\n// Read reads data from the endpoint. This method does not block if\n"
}
] | Go | Apache License 2.0 | google/gvisor | Notify UDP and Ping endpoints on close
PiperOrigin-RevId: 203883138
Change-Id: I7500c0a70f5d71c3fb37e2477f7fc466fa92fd3e |
259,948 | 10.07.2018 13:53:39 | 25,200 | bf580cf64dbea1c70a3269914fad6490f7a4968d | netstack: only do connected TCP S/R for loopback connections. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/loopback/loopback.go",
"new_path": "pkg/tcpip/link/loopback/loopback.go",
"diff": "@@ -56,7 +56,7 @@ func (*endpoint) MTU() uint32 {\n// Capabilities implements stack.LinkEndpoint.Capabilities. Loopback advertises\n// itself as supporting checksum offload, but in reality it's just omitted.\nfunc (*endpoint) Capabilities() stack.LinkEndpointCapabilities {\n- return stack.CapabilityChecksumOffload\n+ return stack.CapabilityChecksumOffload | stack.CapabilitySaveRestore\n}\n// MaxHeaderLength implements stack.LinkEndpoint.MaxHeaderLength. Given that the\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -201,6 +201,7 @@ type LinkEndpointCapabilities uint\nconst (\nCapabilityChecksumOffload LinkEndpointCapabilities = 1 << iota\nCapabilityResolutionRequired\n+ CapabilitySaveRestore\n)\n// LinkEndpoint is the interface implemented by data link layer protocols (e.g.,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint_state.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint_state.go",
"diff": "@@ -50,11 +50,16 @@ func (e *endpoint) beforeSave() {\nswitch e.state {\ncase stateInitial, stateBound:\n- case stateListen, stateConnecting, stateConnected:\n- if e.state == stateConnected && !e.workerRunning {\n+ case stateConnected:\n+ if e.route.Capabilities()&stack.CapabilitySaveRestore == 0 {\n+ panic(tcpip.ErrSaveRejection{fmt.Errorf(\"endpoint cannot be saved in connected state: local %v:%d, remote %v:%d\", e.id.LocalAddress, e.id.LocalPort, e.id.RemoteAddress, e.id.RemotePort)})\n+ }\n+ if !e.workerRunning {\n// The endpoint must be in acceptedChan.\nbreak\n}\n+ fallthrough\n+ case stateListen, stateConnecting:\ne.drainSegmentLocked()\nif e.state != stateClosed && e.state != stateError {\nif !e.workerRunning {\n"
}
] | Go | Apache License 2.0 | google/gvisor | netstack: only do connected TCP S/R for loopback connections.
PiperOrigin-RevId: 204006237
Change-Id: Ica8402ab54d9dd7d11cc41c6d74aacef51d140b7 |
259,885 | 10.07.2018 13:58:00 | 25,200 | 06920b3d1bb6346a20aa0e154b14e68116919dbc | Exit tmpfs.fileInodeOperations.Translate early if required.Start >= EOF.
Otherwise required and optional can be empty or have negative length. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tmpfs/inode_file.go",
"new_path": "pkg/sentry/fs/tmpfs/inode_file.go",
"diff": "@@ -451,9 +451,12 @@ func (f *fileInodeOperations) Translate(ctx context.Context, required, optional\n// Constrain translations to f.attr.Unstable.Size (rounded up) to prevent\n// translation to pages that may be concurrently truncated.\npgend := fs.OffsetPageEnd(f.attr.Unstable.Size)\n- var buserr error\n+ var beyondEOF bool\nif required.End > pgend {\n- buserr = &memmap.BusError{io.EOF}\n+ if required.Start >= pgend {\n+ return nil, &memmap.BusError{io.EOF}\n+ }\n+ beyondEOF = true\nrequired.End = pgend\n}\nif optional.End > pgend {\n@@ -481,9 +484,12 @@ func (f *fileInodeOperations) Translate(ctx context.Context, required, optional\n// Don't return the error returned by f.data.Fill if it occurred outside of\n// required.\nif translatedEnd < required.End && cerr != nil {\n- return ts, cerr\n+ return ts, &memmap.BusError{cerr}\n+ }\n+ if beyondEOF {\n+ return ts, &memmap.BusError{io.EOF}\n}\n- return ts, buserr\n+ return ts, nil\n}\n// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Exit tmpfs.fileInodeOperations.Translate early if required.Start >= EOF.
Otherwise required and optional can be empty or have negative length.
PiperOrigin-RevId: 204007079
Change-Id: I59e472a87a8caac11ffb9a914b8d79bf0cd70995 |
259,991 | 11.07.2018 09:36:20 | 25,200 | 81ae5f3df533d5e5990baaa105392f59e28d5730 | Created runsc and docker integration tests.
Moved some of the docker image functions to testutil.go.
Test runsc commands create, start, stop, pause, and resume. | [
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/image_test.go",
"new_path": "runsc/test/image/image_test.go",
"diff": "// Package image provides end-to-end image tests for runsc. These tests require\n// docker and runsc to be installed on the machine. To set it up, run:\n//\n-// ./runsc/test/image/install.sh [--runtime <name>]\n+// ./runsc/test/install.sh [--runtime <name>]\n//\n// The tests expect the runtime name to be provided in the RUNSC_RUNTIME\n// environment variable (default: runsc-test).\n@@ -28,14 +28,8 @@ package image\nimport (\n\"fmt\"\n\"io/ioutil\"\n- \"log\"\n- \"math/rand\"\n\"net/http\"\n\"os\"\n- \"os/exec\"\n- \"path\"\n- \"regexp\"\n- \"strconv\"\n\"strings\"\n\"testing\"\n\"time\"\n@@ -43,144 +37,14 @@ import (\n\"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n)\n-func init() {\n- rand.Seed(time.Now().UnixNano())\n-}\n-\n-func runtime() string {\n- r := os.Getenv(\"RUNSC_RUNTIME\")\n- if r == \"\" {\n- return \"runsc-test\"\n- }\n- return r\n-}\n-\n-func mountArg(source, target string) string {\n- return fmt.Sprintf(\"%s:%s\", source, target)\n-}\n-\n-func linkArg(source *docker, target string) string {\n- return fmt.Sprintf(\"%s:%s\", source.name, target)\n-}\n-\n-// prepareFiles creates temp directory to copy files there. The sandbox doesn't\n-// have access to files in the test dir.\n-func prepareFiles(names ...string) (string, error) {\n- dir, err := ioutil.TempDir(\"\", \"image-test\")\n- if err != nil {\n- return \"\", fmt.Errorf(\"ioutil.TempDir failed: %v\", err)\n- }\n- if err := os.Chmod(dir, 0777); err != nil {\n- return \"\", fmt.Errorf(\"os.Chmod(%q, 0777) failed: %v\", dir, err)\n- }\n- for _, name := range names {\n- src := getLocalPath(name)\n- dst := path.Join(dir, name)\n- if err := testutil.Copy(src, dst); err != nil {\n- return \"\", fmt.Errorf(\"testutil.Copy(%q, %q) failed: %v\", src, dst, err)\n- }\n- }\n- return dir, nil\n-}\n-\n-func getLocalPath(file string) string {\n- return path.Join(\".\", file)\n-}\n-\n-type docker struct {\n- runtime string\n- name string\n-}\n-\n-func makeDocker(namePrefix string) docker {\n- suffix := fmt.Sprintf(\"-%06d\", rand.Int())[:7]\n- return docker{name: namePrefix + suffix, runtime: runtime()}\n-}\n-\n-// do executes docker command.\n-func (d *docker) do(args ...string) (string, error) {\n- fmt.Printf(\"Running: docker %s\\n\", args)\n- cmd := exec.Command(\"docker\", args...)\n- out, err := cmd.CombinedOutput()\n- if err != nil {\n- return \"\", fmt.Errorf(\"error executing docker %s: %v\", args, err)\n- }\n- return string(out), nil\n-}\n-\n-// run calls 'docker run' with the arguments provided.\n-func (d *docker) run(args ...string) (string, error) {\n- a := []string{\"run\", \"--runtime\", d.runtime, \"--name\", d.name, \"-d\"}\n- a = append(a, args...)\n- return d.do(a...)\n-}\n-\n-// cleanUp kills and deletes the container.\n-func (d *docker) cleanUp() error {\n- if _, err := d.do(\"kill\", d.name); err != nil {\n- return fmt.Errorf(\"error killing container %q: %v\", d.name, err)\n- }\n- if _, err := d.do(\"rm\", d.name); err != nil {\n- return fmt.Errorf(\"error deleting container %q: %v\", d.name, err)\n- }\n- return nil\n-}\n-\n-// findPort returns the host port that is mapped to 'sandboxPort'. This calls\n-// docker to allocate a free port in the host and prevent conflicts.\n-func (d *docker) findPort(sandboxPort int) (int, error) {\n- format := fmt.Sprintf(`{{ (index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort }}`, sandboxPort)\n- out, err := d.do(\"inspect\", \"-f\", format, d.name)\n- if err != nil {\n- return -1, fmt.Errorf(\"error retrieving port: %v\", err)\n- }\n- port, err := strconv.Atoi(strings.TrimSuffix(string(out), \"\\n\"))\n- if err != nil {\n- return -1, fmt.Errorf(\"error parsing port %q: %v\", out, err)\n- }\n- return port, nil\n-}\n-\n-// waitForOutput calls 'docker logs' to retrieve containers output and searches\n-// for the given pattern.\n-func (d *docker) waitForOutput(pattern string, timeout time.Duration) error {\n- re := regexp.MustCompile(pattern)\n- var out string\n- for exp := time.Now().Add(timeout); time.Now().Before(exp); {\n- var err error\n- out, err = d.do(\"logs\", d.name)\n- if err != nil {\n- return err\n- }\n- if re.MatchString(out) {\n- // Success!\n- return nil\n- }\n- time.Sleep(100 * time.Millisecond)\n- }\n- return fmt.Errorf(\"timeout waiting for output %q: %s\", re.String(), out)\n-}\n-\n-func (d *docker) waitForHTTP(port int, timeout time.Duration) error {\n- for exp := time.Now().Add(timeout); time.Now().Before(exp); {\n- url := fmt.Sprintf(\"http://localhost:%d/\", port)\n- if _, err := http.Get(url); err == nil {\n- // Success!\n- return nil\n- }\n- time.Sleep(100 * time.Millisecond)\n- }\n- return fmt.Errorf(\"timeout waiting for HTTP server on port %d\", port)\n-}\n-\nfunc TestHelloWorld(t *testing.T) {\n- d := makeDocker(\"hello-test\")\n- if out, err := d.run(\"hello-world\"); err != nil {\n+ d := testutil.MakeDocker(\"hello-test\")\n+ if out, err := d.Run(\"hello-world\"); err != nil {\nt.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n}\n- defer d.cleanUp()\n+ defer d.CleanUp()\n- if err := d.waitForOutput(\"Hello from Docker!\", 5*time.Second); err != nil {\n+ if err := d.WaitForOutput(\"Hello from Docker!\", 5*time.Second); err != nil {\nt.Fatalf(\"docker didn't say hello: %v\", err)\n}\n}\n@@ -218,27 +82,27 @@ func testHTTPServer(port int) error {\n}\nfunc TestHttpd(t *testing.T) {\n- d := makeDocker(\"http-test\")\n+ d := testutil.MakeDocker(\"http-test\")\n- dir, err := prepareFiles(\"latin10k.txt\")\n+ dir, err := testutil.PrepareFiles(\"latin10k.txt\")\nif err != nil {\n- t.Fatalf(\"prepareFiles() failed: %v\", err)\n+ t.Fatalf(\"PrepareFiles() failed: %v\", err)\n}\n// Start the container.\n- if out, err := d.run(\"-p\", \"80\", \"-v\", mountArg(dir, \"/usr/local/apache2/htdocs:ro\"), \"httpd\"); err != nil {\n+ if out, err := d.Run(\"-p\", \"80\", \"-v\", testutil.MountArg(dir, \"/usr/local/apache2/htdocs:ro\"), \"httpd\"); err != nil {\nt.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n}\n- defer d.cleanUp()\n+ defer d.CleanUp()\n// Find where port 80 is mapped to.\n- port, err := d.findPort(80)\n+ port, err := d.FindPort(80)\nif err != nil {\n- t.Fatalf(\"docker.findPort(80) failed: %v\", err)\n+ t.Fatalf(\"docker.FindPort(80) failed: %v\", err)\n}\n// Wait until it's up and running.\n- if err := d.waitForHTTP(port, 5*time.Second); err != nil {\n+ if err := d.WaitForHTTP(port, 5*time.Second); err != nil {\nt.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n}\n@@ -248,27 +112,27 @@ func TestHttpd(t *testing.T) {\n}\nfunc TestNginx(t *testing.T) {\n- d := makeDocker(\"net-test\")\n+ d := testutil.MakeDocker(\"net-test\")\n- dir, err := prepareFiles(\"latin10k.txt\")\n+ dir, err := testutil.PrepareFiles(\"latin10k.txt\")\nif err != nil {\n- t.Fatalf(\"prepareFiles() failed: %v\", err)\n+ t.Fatalf(\"PrepareFiles() failed: %v\", err)\n}\n// Start the container.\n- if out, err := d.run(\"-p\", \"80\", \"-v\", mountArg(dir, \"/usr/share/nginx/html:ro\"), \"nginx\"); err != nil {\n+ if out, err := d.Run(\"-p\", \"80\", \"-v\", testutil.MountArg(dir, \"/usr/share/nginx/html:ro\"), \"nginx\"); err != nil {\nt.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n}\n- defer d.cleanUp()\n+ defer d.CleanUp()\n// Find where port 80 is mapped to.\n- port, err := d.findPort(80)\n+ port, err := d.FindPort(80)\nif err != nil {\n- t.Fatalf(\"docker.findPort(80) failed: %v\", err)\n+ t.Fatalf(\"docker.FindPort(80) failed: %v\", err)\n}\n// Wait until it's up and running.\n- if err := d.waitForHTTP(port, 5*time.Second); err != nil {\n+ if err := d.WaitForHTTP(port, 5*time.Second); err != nil {\nt.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n}\n@@ -278,64 +142,48 @@ func TestNginx(t *testing.T) {\n}\nfunc TestMysql(t *testing.T) {\n- d := makeDocker(\"mysql-test\")\n+ d := testutil.MakeDocker(\"mysql-test\")\n// Start the container.\n- if out, err := d.run(\"-e\", \"MYSQL_ROOT_PASSWORD=foobar123\", \"mysql\"); err != nil {\n+ if out, err := d.Run(\"-e\", \"MYSQL_ROOT_PASSWORD=foobar123\", \"mysql\"); err != nil {\nt.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n}\n- defer d.cleanUp()\n+ defer d.CleanUp()\n// Wait until it's up and running.\n- if err := d.waitForOutput(\"port: 3306 MySQL Community Server\", 3*time.Minute); err != nil {\n+ if err := d.WaitForOutput(\"port: 3306 MySQL Community Server\", 3*time.Minute); err != nil {\nt.Fatalf(\"docker.WaitForOutput() timeout: %v\", err)\n}\n- client := makeDocker(\"mysql-client-test\")\n- dir, err := prepareFiles(\"mysql.sql\")\n+ client := testutil.MakeDocker(\"mysql-client-test\")\n+ dir, err := testutil.PrepareFiles(\"mysql.sql\")\nif err != nil {\n- t.Fatalf(\"prepareFiles() failed: %v\", err)\n+ t.Fatalf(\"PrepareFiles() failed: %v\", err)\n}\n// Tell mysql client to connect to the server and execute the file in verbose\n// mode to verify the output.\nargs := []string{\n- \"--link\", linkArg(&d, \"mysql\"),\n- \"-v\", mountArg(dir, \"/sql\"),\n+ \"--link\", testutil.LinkArg(&d, \"mysql\"),\n+ \"-v\", testutil.MountArg(dir, \"/sql\"),\n\"mysql\",\n\"mysql\", \"-hmysql\", \"-uroot\", \"-pfoobar123\", \"-v\", \"-e\", \"source /sql/mysql.sql\",\n}\n- if out, err := client.run(args...); err != nil {\n+ if out, err := client.Run(args...); err != nil {\nt.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n}\n- defer client.cleanUp()\n+ defer client.CleanUp()\n// Ensure file executed to the end and shutdown mysql.\n- if err := client.waitForOutput(\"--------------\\nshutdown\\n--------------\", 15*time.Second); err != nil {\n+ if err := client.WaitForOutput(\"--------------\\nshutdown\\n--------------\", 15*time.Second); err != nil {\nt.Fatalf(\"docker.WaitForOutput() timeout: %v\", err)\n}\n- if err := d.waitForOutput(\"mysqld: Shutdown complete\", 30*time.Second); err != nil {\n+ if err := d.WaitForOutput(\"mysqld: Shutdown complete\", 30*time.Second); err != nil {\nt.Fatalf(\"docker.WaitForOutput() timeout: %v\", err)\n}\n}\nfunc MainTest(m *testing.M) {\n- // Check correct docker is installed.\n- cmd := exec.Command(\"docker\", \"version\")\n- out, err := cmd.CombinedOutput()\n- if err != nil {\n- log.Fatalf(\"Error running %q: %v\", \"docker version\", err)\n- }\n- re := regexp.MustCompile(`Version:\\s+(\\d+)\\.(\\d+)\\.\\d.*`)\n- matches := re.FindStringSubmatch(string(out))\n- if len(matches) != 3 {\n- log.Fatalf(\"Invalid docker output: %s\", out)\n- }\n- major, _ := strconv.Atoi(matches[1])\n- minor, _ := strconv.Atoi(matches[2])\n- if major < 17 || (major == 17 && minor < 9) {\n- log.Fatalf(\"Docker version 17.09.0 or greater is required, found: %02d.%02d\", major, minor)\n- }\n-\n+ testutil.EnsureSupportedDockerVersion()\nos.Exit(m.Run())\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/python_test.go",
"new_path": "runsc/test/image/python_test.go",
"diff": "@@ -19,23 +19,25 @@ import (\n\"net/http\"\n\"testing\"\n\"time\"\n+\n+ \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n)\nfunc TestPythonHello(t *testing.T) {\n- d := makeDocker(\"python-hello-test\")\n- if out, err := d.run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\n+ d := testutil.MakeDocker(\"python-hello-test\")\n+ if out, err := d.Run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\nt.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n}\n- defer d.cleanUp()\n+ defer d.CleanUp()\n// Find where port 8080 is mapped to.\n- port, err := d.findPort(8080)\n+ port, err := d.FindPort(8080)\nif err != nil {\n- t.Fatalf(\"docker.findPort(8080) failed: %v\", err)\n+ t.Fatalf(\"docker.FindPort(8080) failed: %v\", err)\n}\n// Wait until it's up and running.\n- if err := d.waitForHTTP(port, 5*time.Second); err != nil {\n+ if err := d.WaitForHTTP(port, 5*time.Second); err != nil {\nt.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n}\n"
},
{
"change_type": "RENAME",
"old_path": "runsc/test/image/install.sh",
"new_path": "runsc/test/install.sh",
"diff": ""
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/integration/BUILD",
"diff": "+package(licenses = [\"notice\"]) # Apache 2.0\n+\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\", \"go_test\")\n+\n+go_test(\n+ name = \"integration_test\",\n+ size = \"large\",\n+ srcs = [\n+ \"integration_test.go\",\n+ ],\n+ embed = [\":integration\"],\n+ tags = [\n+ # Requires docker and runsc to be configured before the test runs.\n+ \"manual\",\n+ \"local\",\n+ ],\n+ deps = [\n+ \"//runsc/test/testutil\",\n+ ],\n+)\n+\n+go_library(\n+ name = \"integration\",\n+ srcs = [\"integration.go\"],\n+ importpath = \"gvisor.googlesource.com/gvisor/runsc/test/integration\",\n+)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/integration/integration.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package integration is empty. See integration_test.go for description.\n+package integration\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/integration/integration_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package image provides end-to-end integration tests for runsc. These tests require\n+// docker and runsc to be installed on the machine. To set it up, run:\n+//\n+// ./runsc/test/install.sh [--runtime <name>]\n+//\n+// The tests expect the runtime name to be provided in the RUNSC_RUNTIME\n+// environment variable (default: runsc-test).\n+//\n+// Each test calls docker commands to start up a container, and tests that it is\n+// behaving properly, with various runsc commands. The container is killed and deleted\n+// at the end.\n+\n+package integration\n+\n+import (\n+ \"fmt\"\n+ \"net\"\n+ \"net/http\"\n+ \"os\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n+)\n+\n+// This container is a docker image for the Flask microframework hello world application.\n+const container = \"python-hello-test\"\n+\n+// httpRequestSucceeds sends a request to a given url and checks that the status is OK.\n+func httpRequestSucceeds(client http.Client, url string) error {\n+ // Ensure that content is being served.\n+ resp, err := client.Get(url)\n+ if err != nil {\n+ return fmt.Errorf(\"error reaching http server: %v\", err)\n+ }\n+ if want := http.StatusOK; resp.StatusCode != want {\n+ return fmt.Errorf(\"wrong response code, got: %d, want: %d\", resp.StatusCode, want)\n+ }\n+ return nil\n+}\n+\n+// TestLifeCycle tests a basic Create/Start/Stop docker container life cycle.\n+func TestLifeCycle(t *testing.T) {\n+ d := testutil.MakeDocker(container)\n+\n+ // Test docker create.\n+ if out, err := d.Do(\"create\", \"--runtime\", d.Runtime, \"--name\", d.Name, \"-p\", \"8080\", \"google/python-hello\"); err != nil {\n+ t.Fatalf(\"docker create failed: %v\\nout: %s\", err, out)\n+ }\n+\n+ // Test docker start.\n+ if out, err := d.Do(\"start\", d.Name); err != nil {\n+ d.CleanUp()\n+ t.Fatalf(\"docker start failed: %v\\nout: %s\", err, out)\n+ }\n+\n+ // Test docker stop.\n+ if out, err := d.Do(\"stop\", d.Name); err != nil {\n+ d.CleanUp()\n+ t.Fatalf(\"docker stop failed: %v\\nout: %s\", err, out)\n+ }\n+\n+ // Test removing the container.\n+ if out, err := d.Do(\"rm\", d.Name); err != nil {\n+ t.Fatalf(\"docker rm failed: %v\\nout: %s\", err, out)\n+ }\n+}\n+\n+func TestPauseResume(t *testing.T) {\n+ d := testutil.MakeDocker(container)\n+ if out, err := d.Run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ }\n+ defer d.CleanUp()\n+\n+ // Find where port 8080 is mapped to.\n+ port, err := d.FindPort(8080)\n+ if err != nil {\n+ t.Fatalf(\"docker.FindPort(8080) failed: %v\", err)\n+ }\n+\n+ // Wait until it's up and running.\n+ if err := d.WaitForHTTP(port, 5*time.Second); err != nil {\n+ t.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n+ }\n+\n+ timeout := time.Duration(2 * time.Second)\n+ client := http.Client{\n+ Timeout: timeout,\n+ }\n+\n+ url := fmt.Sprintf(\"http://localhost:%d\", port)\n+ // Check that container is working.\n+ if err := httpRequestSucceeds(client, url); err != nil {\n+ t.Errorf(\"http request failed: %v\", err)\n+ }\n+\n+ // Pause container.\n+ if out, err := d.Do(\"pause\", d.Name); err != nil {\n+ t.Fatalf(\"docker pause failed: %v\\nout: %s\", err, out)\n+ }\n+\n+ // Check if container is paused.\n+ switch _, err := client.Get(url); v := err.(type) {\n+ case nil:\n+ t.Errorf(\"http req expected to fail but it succeeded\")\n+ case net.Error:\n+ if !v.Timeout() {\n+ t.Errorf(\"http req got error %v, wanted timeout\", v)\n+ }\n+ default:\n+ t.Errorf(\"http req got unexpected error %v\", v)\n+ }\n+\n+ // Resume container.\n+ if out, err := d.Do(\"unpause\", d.Name); err != nil {\n+ t.Fatalf(\"docker unpause failed: %v\\nout: %s\", err, out)\n+ }\n+\n+ // Wait until it's up and running.\n+ if err := d.WaitForHTTP(port, 5*time.Second); err != nil {\n+ t.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n+ }\n+\n+ // Check if container is working again.\n+ if err := httpRequestSucceeds(client, url); err != nil {\n+ t.Errorf(\"http request failed: %v\", err)\n+ }\n+}\n+\n+func MainTest(m *testing.M) {\n+ testutil.EnsureSupportedDockerVersion()\n+ os.Exit(m.Run())\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/BUILD",
"new_path": "runsc/test/testutil/BUILD",
"diff": "@@ -4,7 +4,10 @@ load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\")\ngo_library(\nname = \"testutil\",\n- srcs = [\"testutil.go\"],\n+ srcs = [\n+ \"docker.go\",\n+ \"testutil.go\",\n+ ],\nimportpath = \"gvisor.googlesource.com/gvisor/runsc/test/testutil\",\nvisibility = [\n\"//runsc:__subpackages__\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/testutil/docker.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package testutil\n+\n+import (\n+ \"fmt\"\n+ \"io/ioutil\"\n+ \"log\"\n+ \"math/rand\"\n+ \"net/http\"\n+ \"os\"\n+ \"os/exec\"\n+ \"path\"\n+ \"regexp\"\n+ \"strconv\"\n+ \"strings\"\n+ \"time\"\n+)\n+\n+func init() {\n+ rand.Seed(time.Now().UnixNano())\n+}\n+\n+func runtime() string {\n+ r := os.Getenv(\"RUNSC_RUNTIME\")\n+ if r == \"\" {\n+ return \"runsc-test\"\n+ }\n+ return r\n+}\n+\n+// EnsureSupportedDockerVersion checks if correct docker is installed.\n+func EnsureSupportedDockerVersion() {\n+ cmd := exec.Command(\"docker\", \"version\")\n+ out, err := cmd.CombinedOutput()\n+ if err != nil {\n+ log.Fatalf(\"Error running %q: %v\", \"docker version\", err)\n+ }\n+ re := regexp.MustCompile(`Version:\\s+(\\d+)\\.(\\d+)\\.\\d.*`)\n+ matches := re.FindStringSubmatch(string(out))\n+ if len(matches) != 3 {\n+ log.Fatalf(\"Invalid docker output: %s\", out)\n+ }\n+ major, _ := strconv.Atoi(matches[1])\n+ minor, _ := strconv.Atoi(matches[2])\n+ if major < 17 || (major == 17 && minor < 9) {\n+ log.Fatalf(\"Docker version 17.09.0 or greater is required, found: %02d.%02d\", major, minor)\n+ }\n+}\n+\n+// MountArg formats the volume argument to mount in the container.\n+func MountArg(source, target string) string {\n+ return fmt.Sprintf(\"%s:%s\", source, target)\n+}\n+\n+// LinkArg formats the link argument.\n+func LinkArg(source *Docker, target string) string {\n+ return fmt.Sprintf(\"%s:%s\", source.Name, target)\n+}\n+\n+// PrepareFiles creates temp directory to copy files there. The sandbox doesn't\n+// have access to files in the test dir.\n+func PrepareFiles(names ...string) (string, error) {\n+ dir, err := ioutil.TempDir(\"\", \"image-test\")\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"ioutil.TempDir failed: %v\", err)\n+ }\n+ if err := os.Chmod(dir, 0777); err != nil {\n+ return \"\", fmt.Errorf(\"os.Chmod(%q, 0777) failed: %v\", dir, err)\n+ }\n+ for _, name := range names {\n+ src := getLocalPath(name)\n+ dst := path.Join(dir, name)\n+ if err := Copy(src, dst); err != nil {\n+ return \"\", fmt.Errorf(\"testutil.Copy(%q, %q) failed: %v\", src, dst, err)\n+ }\n+ }\n+ return dir, nil\n+}\n+\n+func getLocalPath(file string) string {\n+ return path.Join(\".\", file)\n+}\n+\n+// Docker contains the name and the runtime of a docker container.\n+type Docker struct {\n+ Runtime string\n+ Name string\n+}\n+\n+// MakeDocker sets up the struct for a Docker container.\n+// Names of containers will be unique.\n+func MakeDocker(namePrefix string) Docker {\n+ suffix := fmt.Sprintf(\"-%06d\", rand.Int())[:7]\n+ return Docker{Name: namePrefix + suffix, Runtime: runtime()}\n+}\n+\n+// Do executes docker command.\n+func (d *Docker) Do(args ...string) (string, error) {\n+ fmt.Printf(\"Running: docker %s\\n\", args)\n+ cmd := exec.Command(\"docker\", args...)\n+ out, err := cmd.CombinedOutput()\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"error executing docker %s: %v\", args, err)\n+ }\n+ return string(out), nil\n+}\n+\n+// Run calls 'docker run' with the arguments provided.\n+func (d *Docker) Run(args ...string) (string, error) {\n+ a := []string{\"run\", \"--runtime\", d.Runtime, \"--name\", d.Name, \"-d\"}\n+ a = append(a, args...)\n+ return d.Do(a...)\n+}\n+\n+// CleanUp kills and deletes the container.\n+func (d *Docker) CleanUp() error {\n+ if _, err := d.Do(\"kill\", d.Name); err != nil {\n+ return fmt.Errorf(\"error killing container %q: %v\", d.Name, err)\n+ }\n+ if _, err := d.Do(\"rm\", d.Name); err != nil {\n+ return fmt.Errorf(\"error deleting container %q: %v\", d.Name, err)\n+ }\n+ return nil\n+}\n+\n+// FindPort returns the host port that is mapped to 'sandboxPort'. This calls\n+// docker to allocate a free port in the host and prevent conflicts.\n+func (d *Docker) FindPort(sandboxPort int) (int, error) {\n+ format := fmt.Sprintf(`{{ (index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort }}`, sandboxPort)\n+ out, err := d.Do(\"inspect\", \"-f\", format, d.Name)\n+ if err != nil {\n+ return -1, fmt.Errorf(\"error retrieving port: %v\", err)\n+ }\n+ port, err := strconv.Atoi(strings.TrimSuffix(string(out), \"\\n\"))\n+ if err != nil {\n+ return -1, fmt.Errorf(\"error parsing port %q: %v\", out, err)\n+ }\n+ return port, nil\n+}\n+\n+// WaitForOutput calls 'docker logs' to retrieve containers output and searches\n+// for the given pattern.\n+func (d *Docker) WaitForOutput(pattern string, timeout time.Duration) error {\n+ re := regexp.MustCompile(pattern)\n+ var out string\n+ for exp := time.Now().Add(timeout); time.Now().Before(exp); {\n+ var err error\n+ out, err = d.Do(\"logs\", d.Name)\n+ if err != nil {\n+ return err\n+ }\n+ if re.MatchString(out) {\n+ // Success!\n+ return nil\n+ }\n+ time.Sleep(100 * time.Millisecond)\n+ }\n+ return fmt.Errorf(\"timeout waiting for output %q: %s\", re.String(), out)\n+}\n+\n+// WaitForHTTP tries GET requests on a port until the call succeeds or a timeout.\n+func (d *Docker) WaitForHTTP(port int, timeout time.Duration) error {\n+ for exp := time.Now().Add(timeout); time.Now().Before(exp); {\n+ url := fmt.Sprintf(\"http://localhost:%d/\", port)\n+ if _, err := http.Get(url); err == nil {\n+ // Success!\n+ return nil\n+ }\n+ time.Sleep(100 * time.Millisecond)\n+ }\n+ return fmt.Errorf(\"timeout waiting for HTTP server on port %d\", port)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Created runsc and docker integration tests.
Moved some of the docker image functions to testutil.go.
Test runsc commands create, start, stop, pause, and resume.
PiperOrigin-RevId: 204138452
Change-Id: Id00bc58d2ad230db5e9e905eed942187e68e7c7b |
259,885 | 11.07.2018 11:51:05 | 25,200 | ee0ef506d4060eaf0736997a56fd8490e2434495 | Add MemoryManager.Pin. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/mm/pma.go",
"new_path": "pkg/sentry/mm/pma.go",
"diff": "@@ -578,6 +578,96 @@ func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivat\n}\n}\n+// Pin returns the platform.File ranges currently mapped by addresses in ar in\n+// mm, acquiring a reference on the returned ranges which the caller must\n+// release by calling Unpin. If not all addresses are mapped, Pin returns a\n+// non-nil error. Note that Pin may return both a non-empty slice of\n+// PinnedRanges and a non-nil error.\n+//\n+// Pin does not prevent mapped ranges from changing, making it unsuitable for\n+// most I/O. It should only be used in contexts that would use get_user_pages()\n+// in the Linux kernel.\n+//\n+// Preconditions: ar.Length() != 0. ar must be page-aligned.\n+func (mm *MemoryManager) Pin(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) ([]PinnedRange, error) {\n+ if checkInvariants {\n+ if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() {\n+ panic(fmt.Sprintf(\"invalid ar: %v\", ar))\n+ }\n+ }\n+\n+ // Ensure that we have usable vmas.\n+ mm.mappingMu.RLock()\n+ vseg, vend, verr := mm.getVMAsLocked(ctx, ar, at, ignorePermissions)\n+ if vendaddr := vend.Start(); vendaddr < ar.End {\n+ if vendaddr <= ar.Start {\n+ mm.mappingMu.RUnlock()\n+ return nil, verr\n+ }\n+ ar.End = vendaddr\n+ }\n+\n+ // Ensure that we have usable pmas.\n+ mm.activeMu.Lock()\n+ pseg, pend, perr := mm.getPMAsLocked(ctx, vseg, ar, pmaOpts{\n+ breakCOW: at.Write,\n+ })\n+ mm.mappingMu.RUnlock()\n+ if pendaddr := pend.Start(); pendaddr < ar.End {\n+ if pendaddr <= ar.Start {\n+ mm.activeMu.Unlock()\n+ return nil, perr\n+ }\n+ ar.End = pendaddr\n+ }\n+\n+ // Gather pmas.\n+ var prs []PinnedRange\n+ for pseg.Ok() && pseg.Start() < ar.End {\n+ psar := pseg.Range().Intersect(ar)\n+ f := pseg.ValuePtr().file\n+ fr := pseg.fileRangeOf(psar)\n+ f.IncRef(fr)\n+ prs = append(prs, PinnedRange{\n+ Source: psar,\n+ File: f,\n+ Offset: fr.Start,\n+ })\n+ pseg = pseg.NextSegment()\n+ }\n+ mm.activeMu.Unlock()\n+\n+ // Return the first error in order of progress through ar.\n+ if perr != nil {\n+ return prs, perr\n+ }\n+ return prs, verr\n+}\n+\n+// PinnedRanges are returned by MemoryManager.Pin.\n+type PinnedRange struct {\n+ // Source is the corresponding range of addresses.\n+ Source usermem.AddrRange\n+\n+ // File is the mapped file.\n+ File platform.File\n+\n+ // Offset is the offset into File at which this PinnedRange begins.\n+ Offset uint64\n+}\n+\n+// FileRange returns the platform.File offsets mapped by pr.\n+func (pr PinnedRange) FileRange() platform.FileRange {\n+ return platform.FileRange{pr.Offset, pr.Offset + uint64(pr.Source.Length())}\n+}\n+\n+// Unpin releases the reference held by prs.\n+func Unpin(prs []PinnedRange) {\n+ for i := range prs {\n+ prs[i].File.DecRef(prs[i].FileRange())\n+ }\n+}\n+\n// movePMAsLocked moves all pmas in oldAR to newAR.\n//\n// Preconditions: mm.activeMu must be locked for writing. oldAR.Length() != 0.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/platform.go",
"new_path": "pkg/sentry/platform/platform.go",
"diff": "@@ -305,8 +305,8 @@ type File interface {\nMapInto(as AddressSpace, addr usermem.Addr, fr FileRange, at usermem.AccessType, precommit bool) error\n// MapInternal returns a mapping of the given file offsets in the invoking\n- // process' address space for reading and writing. The lifetime of the\n- // returned mapping is implementation-defined.\n+ // process' address space for reading and writing. The returned mapping is\n+ // valid as long as a reference is held on the mapped range.\n//\n// Note that fr.Start and fr.End need not be page-aligned.\n//\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add MemoryManager.Pin.
PiperOrigin-RevId: 204162313
Change-Id: Ib0593dde88ac33e222c12d0dca6733ef1f1035dc |
259,885 | 11.07.2018 14:23:17 | 25,200 | b9c469f37282129031a6036cfe43028faaeb1a96 | Move ptrace constants to abi/linux. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/BUILD",
"new_path": "pkg/abi/linux/BUILD",
"diff": "@@ -48,6 +48,7 @@ go_library(\n\"netlink_route.go\",\n\"poll.go\",\n\"prctl.go\",\n+ \"ptrace.go\",\n\"rusage.go\",\n\"sched.go\",\n\"seccomp.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/abi/linux/ptrace.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+// ptrace commands from include/uapi/linux/ptrace.h.\n+const (\n+ PTRACE_TRACEME = 0\n+ PTRACE_PEEKTEXT = 1\n+ PTRACE_PEEKDATA = 2\n+ PTRACE_PEEKUSR = 3\n+ PTRACE_POKETEXT = 4\n+ PTRACE_POKEDATA = 5\n+ PTRACE_POKEUSR = 6\n+ PTRACE_CONT = 7\n+ PTRACE_KILL = 8\n+ PTRACE_SINGLESTEP = 9\n+ PTRACE_ATTACH = 16\n+ PTRACE_DETACH = 17\n+ PTRACE_SYSCALL = 24\n+ PTRACE_SETOPTIONS = 0x4200\n+ PTRACE_GETEVENTMSG = 0x4201\n+ PTRACE_GETSIGINFO = 0x4202\n+ PTRACE_SETSIGINFO = 0x4203\n+ PTRACE_GETREGSET = 0x4204\n+ PTRACE_SETREGSET = 0x4205\n+ PTRACE_SEIZE = 0x4206\n+ PTRACE_INTERRUPT = 0x4207\n+ PTRACE_LISTEN = 0x4208\n+ PTRACE_PEEKSIGINFO = 0x4209\n+ PTRACE_GETSIGMASK = 0x420a\n+ PTRACE_SETSIGMASK = 0x420b\n+ PTRACE_SECCOMP_GET_FILTER = 0x420c\n+ PTRACE_SECCOMP_GET_METADATA = 0x420d\n+)\n+\n+// ptrace commands from arch/x86/include/uapi/asm/ptrace-abi.h.\n+const (\n+ PTRACE_GETREGS = 12\n+ PTRACE_SETREGS = 13\n+ PTRACE_GETFPREGS = 14\n+ PTRACE_SETFPREGS = 15\n+ PTRACE_GETFPXREGS = 18\n+ PTRACE_SETFPXREGS = 19\n+ PTRACE_OLDSETOPTIONS = 21\n+ PTRACE_GET_THREAD_AREA = 25\n+ PTRACE_SET_THREAD_AREA = 26\n+ PTRACE_ARCH_PRCTL = 30\n+ PTRACE_SYSEMU = 31\n+ PTRACE_SYSEMU_SINGLESTEP = 32\n+ PTRACE_SINGLEBLOCK = 33\n+)\n+\n+// ptrace event codes from include/uapi/linux/ptrace.h.\n+const (\n+ PTRACE_EVENT_FORK = 1\n+ PTRACE_EVENT_VFORK = 2\n+ PTRACE_EVENT_CLONE = 3\n+ PTRACE_EVENT_EXEC = 4\n+ PTRACE_EVENT_VFORK_DONE = 5\n+ PTRACE_EVENT_EXIT = 6\n+ PTRACE_EVENT_SECCOMP = 7\n+ PTRACE_EVENT_STOP = 128\n+)\n+\n+// PTRACE_SETOPTIONS options from include/uapi/linux/ptrace.h.\n+const (\n+ PTRACE_O_TRACESYSGOOD = 1\n+ PTRACE_O_TRACEFORK = 1 << PTRACE_EVENT_FORK\n+ PTRACE_O_TRACEVFORK = 1 << PTRACE_EVENT_VFORK\n+ PTRACE_O_TRACECLONE = 1 << PTRACE_EVENT_CLONE\n+ PTRACE_O_TRACEEXEC = 1 << PTRACE_EVENT_EXEC\n+ PTRACE_O_TRACEVFORKDONE = 1 << PTRACE_EVENT_VFORK_DONE\n+ PTRACE_O_TRACEEXIT = 1 << PTRACE_EVENT_EXIT\n+ PTRACE_O_TRACESECCOMP = 1 << PTRACE_EVENT_SECCOMP\n+ PTRACE_O_EXITKILL = 1 << 20\n+ PTRACE_O_SUSPEND_SECCOMP = 1 << 21\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/ptrace.go",
"new_path": "pkg/sentry/kernel/ptrace.go",
"diff": "@@ -16,7 +16,6 @@ package kernel\nimport (\n\"fmt\"\n- \"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n@@ -24,19 +23,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n)\n-// ptrace constants from Linux's include/uapi/linux/ptrace.h.\n-const (\n- _PTRACE_EVENT_SECCOMP = 7\n- PTRACE_SEIZE = 0x4206\n- PTRACE_INTERRUPT = 0x4207\n- PTRACE_LISTEN = 0x4208\n- PTRACE_PEEKSIGINFO = 0x4209\n- PTRACE_GETSIGMASK = 0x420a\n- PTRACE_SETSIGMASK = 0x420b\n- _PTRACE_O_EXITKILL = 1 << 20\n- _PTRACE_O_TRACESECCOMP = 1 << _PTRACE_EVENT_SECCOMP\n-)\n-\n// ptraceOptions are the subset of options controlling a task's ptrace behavior\n// that are set by ptrace(PTRACE_SETOPTIONS).\ntype ptraceOptions struct {\n@@ -505,7 +491,7 @@ func (t *Task) ptraceSeccomp(data uint16) bool {\nreturn false\n}\nt.Debugf(\"Entering PTRACE_EVENT_SECCOMP stop\")\n- t.ptraceEventLocked(_PTRACE_EVENT_SECCOMP, uint64(data))\n+ t.ptraceEventLocked(linux.PTRACE_EVENT_SECCOMP, uint64(data))\nreturn true\n}\n@@ -587,19 +573,19 @@ func (t *Task) ptraceClone(kind ptraceCloneKind, child *Task, opts *CloneOptions\ncase ptraceCloneKindClone:\nif t.ptraceOpts.TraceClone {\nt.Debugf(\"Entering PTRACE_EVENT_CLONE stop\")\n- t.ptraceEventLocked(syscall.PTRACE_EVENT_CLONE, uint64(t.tg.pidns.tids[child]))\n+ t.ptraceEventLocked(linux.PTRACE_EVENT_CLONE, uint64(t.tg.pidns.tids[child]))\nevent = true\n}\ncase ptraceCloneKindFork:\nif t.ptraceOpts.TraceFork {\nt.Debugf(\"Entering PTRACE_EVENT_FORK stop\")\n- t.ptraceEventLocked(syscall.PTRACE_EVENT_FORK, uint64(t.tg.pidns.tids[child]))\n+ t.ptraceEventLocked(linux.PTRACE_EVENT_FORK, uint64(t.tg.pidns.tids[child]))\nevent = true\n}\ncase ptraceCloneKindVfork:\nif t.ptraceOpts.TraceVfork {\nt.Debugf(\"Entering PTRACE_EVENT_VFORK stop\")\n- t.ptraceEventLocked(syscall.PTRACE_EVENT_VFORK, uint64(t.tg.pidns.tids[child]))\n+ t.ptraceEventLocked(linux.PTRACE_EVENT_VFORK, uint64(t.tg.pidns.tids[child]))\nevent = true\n}\ndefault:\n@@ -657,7 +643,7 @@ func (t *Task) ptraceVforkDone(child ThreadID) bool {\nreturn false\n}\nt.Debugf(\"Entering PTRACE_EVENT_VFORK_DONE stop\")\n- t.ptraceEventLocked(syscall.PTRACE_EVENT_VFORK_DONE, uint64(child))\n+ t.ptraceEventLocked(linux.PTRACE_EVENT_VFORK_DONE, uint64(child))\nreturn true\n}\n@@ -680,7 +666,7 @@ func (t *Task) ptraceExec(oldTID ThreadID) {\n}\nif t.ptraceOpts.TraceExec {\nt.Debugf(\"Entering PTRACE_EVENT_EXEC stop\")\n- t.ptraceEventLocked(syscall.PTRACE_EVENT_EXEC, uint64(oldTID))\n+ t.ptraceEventLocked(linux.PTRACE_EVENT_EXEC, uint64(oldTID))\nreturn\n}\n// \"If the PTRACE_O_TRACEEXEC option is not in effect for the execing\n@@ -714,7 +700,7 @@ func (t *Task) ptraceExit() {\nstatus := t.exitStatus.Status()\nt.tg.signalHandlers.mu.Unlock()\nt.Debugf(\"Entering PTRACE_EVENT_EXIT stop\")\n- t.ptraceEventLocked(syscall.PTRACE_EVENT_EXIT, uint64(status))\n+ t.ptraceEventLocked(linux.PTRACE_EVENT_EXIT, uint64(status))\n}\n// Preconditions: The TaskSet mutex must be locked.\n@@ -762,7 +748,7 @@ func (t *Task) ptraceKill(target *Task) error {\n// Ptrace implements the ptrace system call.\nfunc (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\n// PTRACE_TRACEME ignores all other arguments.\n- if req == syscall.PTRACE_TRACEME {\n+ if req == linux.PTRACE_TRACEME {\nreturn t.ptraceTraceme()\n}\n// All other ptrace requests operate on a current or future tracee\n@@ -774,12 +760,12 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\n// PTRACE_ATTACH (and PTRACE_SEIZE, which is unimplemented) do not require\n// that target is not already a tracee.\n- if req == syscall.PTRACE_ATTACH {\n+ if req == linux.PTRACE_ATTACH {\nreturn t.ptraceAttach(target)\n}\n// PTRACE_KILL (and PTRACE_INTERRUPT, which is unimplemented) require that\n// the target is a tracee, but does not require that it is ptrace-stopped.\n- if req == syscall.PTRACE_KILL {\n+ if req == linux.PTRACE_KILL {\nreturn t.ptraceKill(target)\n}\n// All other ptrace requests require that the target is a ptrace-stopped\n@@ -812,37 +798,37 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\n// Resuming commands end the ptrace stop, but only if successful.\nswitch req {\n- case syscall.PTRACE_DETACH:\n+ case linux.PTRACE_DETACH:\nif err := t.ptraceDetach(target, linux.Signal(data)); err != nil {\ntarget.ptraceUnfreeze()\nreturn err\n}\nreturn nil\n- case syscall.PTRACE_CONT:\n+ case linux.PTRACE_CONT:\nif err := target.ptraceUnstop(ptraceSyscallNone, false, linux.Signal(data)); err != nil {\ntarget.ptraceUnfreeze()\nreturn err\n}\nreturn nil\n- case syscall.PTRACE_SYSCALL:\n+ case linux.PTRACE_SYSCALL:\nif err := target.ptraceUnstop(ptraceSyscallIntercept, false, linux.Signal(data)); err != nil {\ntarget.ptraceUnfreeze()\nreturn err\n}\nreturn nil\n- case syscall.PTRACE_SINGLESTEP:\n+ case linux.PTRACE_SINGLESTEP:\nif err := target.ptraceUnstop(ptraceSyscallNone, true, linux.Signal(data)); err != nil {\ntarget.ptraceUnfreeze()\nreturn err\n}\nreturn nil\n- case syscall.PTRACE_SYSEMU:\n+ case linux.PTRACE_SYSEMU:\nif err := target.ptraceUnstop(ptraceSyscallEmu, false, linux.Signal(data)); err != nil {\ntarget.ptraceUnfreeze()\nreturn err\n}\nreturn nil\n- case syscall.PTRACE_SYSEMU_SINGLESTEP:\n+ case linux.PTRACE_SYSEMU_SINGLESTEP:\nif err := target.ptraceUnstop(ptraceSyscallEmu, true, linux.Signal(data)); err != nil {\ntarget.ptraceUnfreeze()\nreturn err\n@@ -853,7 +839,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\ndefer target.ptraceUnfreeze()\nswitch req {\n- case syscall.PTRACE_PEEKTEXT, syscall.PTRACE_PEEKDATA:\n+ case linux.PTRACE_PEEKTEXT, linux.PTRACE_PEEKDATA:\n// \"At the system call level, the PTRACE_PEEKTEXT, PTRACE_PEEKDATA, and\n// PTRACE_PEEKUSER requests have a different API: they store the result\n// at the address specified by the data parameter, and the return value\n@@ -867,13 +853,13 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\n_, err := t.CopyOut(data, word)\nreturn err\n- case syscall.PTRACE_POKETEXT, syscall.PTRACE_POKEDATA:\n+ case linux.PTRACE_POKETEXT, linux.PTRACE_POKEDATA:\n_, err := usermem.CopyObjectOut(t, target.MemoryManager(), addr, t.Arch().Native(uintptr(data)), usermem.IOOpts{\nIgnorePermissions: true,\n})\nreturn err\n- case syscall.PTRACE_PEEKUSR: // aka PTRACE_PEEKUSER\n+ case linux.PTRACE_PEEKUSR: // aka PTRACE_PEEKUSER\nn, err := target.Arch().PtracePeekUser(uintptr(addr))\nif err != nil {\nreturn err\n@@ -881,10 +867,10 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\n_, err = t.CopyOut(data, n)\nreturn err\n- case syscall.PTRACE_POKEUSR: // aka PTRACE_POKEUSER\n+ case linux.PTRACE_POKEUSR: // aka PTRACE_POKEUSER\nreturn target.Arch().PtracePokeUser(uintptr(addr), uintptr(data))\n- case syscall.PTRACE_GETREGS:\n+ case linux.PTRACE_GETREGS:\n// \"Copy the tracee's general-purpose ... registers ... to the address\n// data in the tracer. ... (addr is ignored.) Note that SPARC systems\n// have the meaning of data and addr reversed ...\"\n@@ -898,7 +884,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\n})\nreturn err\n- case syscall.PTRACE_GETFPREGS:\n+ case linux.PTRACE_GETFPREGS:\n_, err := target.Arch().PtraceGetFPRegs(&usermem.IOReadWriter{\nCtx: t,\nIO: t.MemoryManager(),\n@@ -909,7 +895,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\n})\nreturn err\n- case syscall.PTRACE_GETREGSET:\n+ case linux.PTRACE_GETREGSET:\n// \"Read the tracee's registers. addr specifies, in an\n// architecture-dependent way, the type of registers to be read. ...\n// data points to a struct iovec, which describes the destination\n@@ -934,7 +920,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\nar.End -= usermem.Addr(n)\nreturn t.CopyOutIovecs(data, usermem.AddrRangeSeqOf(ar))\n- case syscall.PTRACE_SETREGS:\n+ case linux.PTRACE_SETREGS:\n_, err := target.Arch().PtraceSetRegs(&usermem.IOReadWriter{\nCtx: t,\nIO: t.MemoryManager(),\n@@ -945,7 +931,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\n})\nreturn err\n- case syscall.PTRACE_SETFPREGS:\n+ case linux.PTRACE_SETFPREGS:\n_, err := target.Arch().PtraceSetFPRegs(&usermem.IOReadWriter{\nCtx: t,\nIO: t.MemoryManager(),\n@@ -956,7 +942,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\n})\nreturn err\n- case syscall.PTRACE_SETREGSET:\n+ case linux.PTRACE_SETREGSET:\nars, err := t.CopyInIovecs(data, 1)\nif err != nil {\nreturn err\n@@ -976,7 +962,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\nar.End -= usermem.Addr(n)\nreturn t.CopyOutIovecs(data, usermem.AddrRangeSeqOf(ar))\n- case syscall.PTRACE_GETSIGINFO:\n+ case linux.PTRACE_GETSIGINFO:\nt.tg.pidns.owner.mu.RLock()\ndefer t.tg.pidns.owner.mu.RUnlock()\nif target.ptraceSiginfo == nil {\n@@ -985,7 +971,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\n_, err := t.CopyOut(data, target.ptraceSiginfo)\nreturn err\n- case syscall.PTRACE_SETSIGINFO:\n+ case linux.PTRACE_SETSIGINFO:\nvar info arch.SignalInfo\nif _, err := t.CopyIn(data, &info); err != nil {\nreturn err\n@@ -998,7 +984,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\ntarget.ptraceSiginfo = &info\nreturn nil\n- case PTRACE_GETSIGMASK:\n+ case linux.PTRACE_GETSIGMASK:\nif addr != linux.SignalSetSize {\nreturn syserror.EINVAL\n}\n@@ -1007,7 +993,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\n_, err := t.CopyOut(data, target.tr.SignalMask)\nreturn err\n- case PTRACE_SETSIGMASK:\n+ case linux.PTRACE_SETSIGMASK:\nif addr != linux.SignalSetSize {\nreturn syserror.EINVAL\n}\n@@ -1019,29 +1005,35 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {\ntarget.SetSignalMask(mask &^ UnblockableSignals)\nreturn nil\n- case syscall.PTRACE_SETOPTIONS:\n+ case linux.PTRACE_SETOPTIONS:\nt.tg.pidns.owner.mu.Lock()\ndefer t.tg.pidns.owner.mu.Unlock()\n- validOpts := uintptr(_PTRACE_O_EXITKILL | syscall.PTRACE_O_TRACESYSGOOD | syscall.PTRACE_O_TRACECLONE |\n- syscall.PTRACE_O_TRACEEXEC | syscall.PTRACE_O_TRACEEXIT | syscall.PTRACE_O_TRACEFORK |\n- _PTRACE_O_TRACESECCOMP | syscall.PTRACE_O_TRACEVFORK | syscall.PTRACE_O_TRACEVFORKDONE)\n+ validOpts := uintptr(linux.PTRACE_O_EXITKILL |\n+ linux.PTRACE_O_TRACESYSGOOD |\n+ linux.PTRACE_O_TRACECLONE |\n+ linux.PTRACE_O_TRACEEXEC |\n+ linux.PTRACE_O_TRACEEXIT |\n+ linux.PTRACE_O_TRACEFORK |\n+ linux.PTRACE_O_TRACESECCOMP |\n+ linux.PTRACE_O_TRACEVFORK |\n+ linux.PTRACE_O_TRACEVFORKDONE)\nif uintptr(data)&^validOpts != 0 {\nreturn syserror.EINVAL\n}\ntarget.ptraceOpts = ptraceOptions{\n- ExitKill: data&_PTRACE_O_EXITKILL != 0,\n- SysGood: data&syscall.PTRACE_O_TRACESYSGOOD != 0,\n- TraceClone: data&syscall.PTRACE_O_TRACECLONE != 0,\n- TraceExec: data&syscall.PTRACE_O_TRACEEXEC != 0,\n- TraceExit: data&syscall.PTRACE_O_TRACEEXIT != 0,\n- TraceFork: data&syscall.PTRACE_O_TRACEFORK != 0,\n- TraceSeccomp: data&_PTRACE_O_TRACESECCOMP != 0,\n- TraceVfork: data&syscall.PTRACE_O_TRACEVFORK != 0,\n- TraceVforkDone: data&syscall.PTRACE_O_TRACEVFORKDONE != 0,\n+ ExitKill: data&linux.PTRACE_O_EXITKILL != 0,\n+ SysGood: data&linux.PTRACE_O_TRACESYSGOOD != 0,\n+ TraceClone: data&linux.PTRACE_O_TRACECLONE != 0,\n+ TraceExec: data&linux.PTRACE_O_TRACEEXEC != 0,\n+ TraceExit: data&linux.PTRACE_O_TRACEEXIT != 0,\n+ TraceFork: data&linux.PTRACE_O_TRACEFORK != 0,\n+ TraceSeccomp: data&linux.PTRACE_O_TRACESECCOMP != 0,\n+ TraceVfork: data&linux.PTRACE_O_TRACEVFORK != 0,\n+ TraceVforkDone: data&linux.PTRACE_O_TRACEVFORKDONE != 0,\n}\nreturn nil\n- case syscall.PTRACE_GETEVENTMSG:\n+ case linux.PTRACE_GETEVENTMSG:\nt.tg.pidns.owner.mu.RLock()\ndefer t.tg.pidns.owner.mu.RUnlock()\n_, err := t.CopyOut(usermem.Addr(data), target.ptraceEventMsg)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/ptrace.go",
"new_path": "pkg/sentry/strace/ptrace.go",
"diff": "package strace\nimport (\n- \"syscall\"\n-\n\"gvisor.googlesource.com/gvisor/pkg/abi\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n)\n// PtraceRequestSet are the possible ptrace(2) requests.\nvar PtraceRequestSet = abi.ValueSet{\n{\n- Value: syscall.PTRACE_TRACEME,\n+ Value: linux.PTRACE_TRACEME,\nName: \"PTRACE_TRACEME\",\n},\n{\n- Value: syscall.PTRACE_PEEKTEXT,\n+ Value: linux.PTRACE_PEEKTEXT,\nName: \"PTRACE_PEEKTEXT\",\n},\n{\n- Value: syscall.PTRACE_PEEKDATA,\n+ Value: linux.PTRACE_PEEKDATA,\nName: \"PTRACE_PEEKDATA\",\n},\n{\n- Value: syscall.PTRACE_PEEKUSR,\n+ Value: linux.PTRACE_PEEKUSR,\nName: \"PTRACE_PEEKUSR\",\n},\n{\n- Value: syscall.PTRACE_POKETEXT,\n+ Value: linux.PTRACE_POKETEXT,\nName: \"PTRACE_POKETEXT\",\n},\n{\n- Value: syscall.PTRACE_POKEDATA,\n+ Value: linux.PTRACE_POKEDATA,\nName: \"PTRACE_POKEDATA\",\n},\n{\n- Value: syscall.PTRACE_POKEUSR,\n+ Value: linux.PTRACE_POKEUSR,\nName: \"PTRACE_POKEUSR\",\n},\n{\n- Value: syscall.PTRACE_CONT,\n+ Value: linux.PTRACE_CONT,\nName: \"PTRACE_CONT\",\n},\n{\n- Value: syscall.PTRACE_KILL,\n+ Value: linux.PTRACE_KILL,\nName: \"PTRACE_KILL\",\n},\n{\n- Value: syscall.PTRACE_SINGLESTEP,\n+ Value: linux.PTRACE_SINGLESTEP,\nName: \"PTRACE_SINGLESTEP\",\n},\n{\n- Value: syscall.PTRACE_ATTACH,\n+ Value: linux.PTRACE_ATTACH,\nName: \"PTRACE_ATTACH\",\n},\n{\n- Value: syscall.PTRACE_DETACH,\n+ Value: linux.PTRACE_DETACH,\nName: \"PTRACE_DETACH\",\n},\n{\n- Value: syscall.PTRACE_SYSCALL,\n+ Value: linux.PTRACE_SYSCALL,\nName: \"PTRACE_SYSCALL\",\n},\n{\n- Value: syscall.PTRACE_SETOPTIONS,\n+ Value: linux.PTRACE_SETOPTIONS,\nName: \"PTRACE_SETOPTIONS\",\n},\n{\n- Value: syscall.PTRACE_GETEVENTMSG,\n+ Value: linux.PTRACE_GETEVENTMSG,\nName: \"PTRACE_GETEVENTMSG\",\n},\n{\n- Value: syscall.PTRACE_GETSIGINFO,\n+ Value: linux.PTRACE_GETSIGINFO,\nName: \"PTRACE_GETSIGINFO\",\n},\n{\n- Value: syscall.PTRACE_SETSIGINFO,\n+ Value: linux.PTRACE_SETSIGINFO,\nName: \"PTRACE_SETSIGINFO\",\n},\n{\n- Value: syscall.PTRACE_GETREGSET,\n+ Value: linux.PTRACE_GETREGSET,\nName: \"PTRACE_GETREGSET\",\n},\n{\n- Value: syscall.PTRACE_SETREGSET,\n+ Value: linux.PTRACE_SETREGSET,\nName: \"PTRACE_SETREGSET\",\n},\n{\n- Value: kernel.PTRACE_SEIZE,\n+ Value: linux.PTRACE_SEIZE,\nName: \"PTRACE_SEIZE\",\n},\n{\n- Value: kernel.PTRACE_INTERRUPT,\n+ Value: linux.PTRACE_INTERRUPT,\nName: \"PTRACE_INTERRUPT\",\n},\n{\n- Value: kernel.PTRACE_LISTEN,\n+ Value: linux.PTRACE_LISTEN,\nName: \"PTRACE_LISTEN\",\n},\n{\n- Value: kernel.PTRACE_PEEKSIGINFO,\n+ Value: linux.PTRACE_PEEKSIGINFO,\nName: \"PTRACE_PEEKSIGINFO\",\n},\n{\n- Value: kernel.PTRACE_GETSIGMASK,\n+ Value: linux.PTRACE_GETSIGMASK,\nName: \"PTRACE_GETSIGMASK\",\n},\n{\n- Value: kernel.PTRACE_SETSIGMASK,\n+ Value: linux.PTRACE_SETSIGMASK,\nName: \"PTRACE_SETSIGMASK\",\n},\n{\n- Value: syscall.PTRACE_GETREGS,\n+ Value: linux.PTRACE_GETREGS,\nName: \"PTRACE_GETREGS\",\n},\n{\n- Value: syscall.PTRACE_SETREGS,\n+ Value: linux.PTRACE_SETREGS,\nName: \"PTRACE_SETREGS\",\n},\n{\n- Value: syscall.PTRACE_GETFPREGS,\n+ Value: linux.PTRACE_GETFPREGS,\nName: \"PTRACE_GETFPREGS\",\n},\n{\n- Value: syscall.PTRACE_SETFPREGS,\n+ Value: linux.PTRACE_SETFPREGS,\nName: \"PTRACE_SETFPREGS\",\n},\n{\n- Value: syscall.PTRACE_GETFPXREGS,\n+ Value: linux.PTRACE_GETFPXREGS,\nName: \"PTRACE_GETFPXREGS\",\n},\n{\n- Value: syscall.PTRACE_SETFPXREGS,\n+ Value: linux.PTRACE_SETFPXREGS,\nName: \"PTRACE_SETFPXREGS\",\n},\n{\n- Value: syscall.PTRACE_OLDSETOPTIONS,\n+ Value: linux.PTRACE_OLDSETOPTIONS,\nName: \"PTRACE_OLDSETOPTIONS\",\n},\n{\n- Value: syscall.PTRACE_GET_THREAD_AREA,\n+ Value: linux.PTRACE_GET_THREAD_AREA,\nName: \"PTRACE_GET_THREAD_AREA\",\n},\n{\n- Value: syscall.PTRACE_SET_THREAD_AREA,\n+ Value: linux.PTRACE_SET_THREAD_AREA,\nName: \"PTRACE_SET_THREAD_AREA\",\n},\n{\n- Value: syscall.PTRACE_ARCH_PRCTL,\n+ Value: linux.PTRACE_ARCH_PRCTL,\nName: \"PTRACE_ARCH_PRCTL\",\n},\n{\n- Value: syscall.PTRACE_SYSEMU,\n+ Value: linux.PTRACE_SYSEMU,\nName: \"PTRACE_SYSEMU\",\n},\n{\n- Value: syscall.PTRACE_SYSEMU_SINGLESTEP,\n+ Value: linux.PTRACE_SYSEMU_SINGLESTEP,\nName: \"PTRACE_SYSEMU_SINGLESTEP\",\n},\n{\n- Value: syscall.PTRACE_SINGLEBLOCK,\n+ Value: linux.PTRACE_SINGLEBLOCK,\nName: \"PTRACE_SINGLEBLOCK\",\n},\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move ptrace constants to abi/linux.
PiperOrigin-RevId: 204188763
Change-Id: I5596ab7abb3ec9e210a7f57b3fc420e836fa43f3 |
259,948 | 12.07.2018 13:38:26 | 25,200 | cc34a90fb46348fd4588d4191ddba0a1d27c1132 | netstack: do not defer panicable logic in tcp main loop. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -828,7 +828,7 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\nvar closeTimer *time.Timer\nvar closeWaker sleep.Waker\n- defer func() {\n+ epilogue := func() {\n// e.mu is expected to be hold upon entering this section.\nif e.snd != nil {\n@@ -849,7 +849,7 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\n// When the protocol loop exits we should wake up our waiters.\ne.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.EventIn | waiter.EventOut)\n- }()\n+ }\nif handshake {\n// This is an active connection, so we must initiate the 3-way\n@@ -867,7 +867,8 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\ne.mu.Lock()\ne.state = stateError\ne.hardError = err\n- // Lock released in deferred statement.\n+ // Lock released below.\n+ epilogue()\nreturn err\n}\n@@ -1013,7 +1014,9 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\nif err := funcs[v].f(); err != nil {\ne.mu.Lock()\ne.resetConnectionLocked(err)\n- // Lock released in deferred statement.\n+ // Lock released below.\n+ epilogue()\n+\nreturn nil\n}\n}\n@@ -1021,7 +1024,8 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\n// Mark endpoint as closed.\ne.mu.Lock()\ne.state = stateClosed\n- // Lock released in deferred statement.\n+ // Lock released below.\n+ epilogue()\nreturn nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | netstack: do not defer panicable logic in tcp main loop.
PiperOrigin-RevId: 204355026
Change-Id: I1a8229879ea3b58aa861a4eb4456fd7aff99863d |
259,948 | 12.07.2018 13:48:18 | 25,200 | 45c50eb12436bcc477a0fbc2616bfd664a07c43f | netstack: save tcp endpoint accepted channel directly. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -206,11 +206,7 @@ type endpoint struct {\n// acceptedChan is used by a listening endpoint protocol goroutine to\n// send newly accepted connections to the endpoint so that they can be\n// read by Accept() calls.\n- acceptedChan chan *endpoint `state:\"manual\"`\n-\n- // acceptedEndpoints is only used to save / restore the channel buffer.\n- // FIXME\n- acceptedEndpoints []*endpoint\n+ acceptedChan chan *endpoint `state:\".([]*endpoint)\"`\n// The following are only used from the protocol goroutine, and\n// therefore don't need locks to protect them.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint_state.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint_state.go",
"diff": "@@ -88,18 +88,33 @@ func (e *endpoint) beforeSave() {\nif !((e.state == stateBound || e.state == stateListen) == e.isPortReserved) {\npanic(\"endpoint port must and must only be reserved in bound or listen state\")\n}\n+}\n- if e.acceptedChan != nil {\n+// saveAcceptedChan is invoked by stateify.\n+func (e *endpoint) saveAcceptedChan() []*endpoint {\n+ if e.acceptedChan == nil {\n+ return nil\n+ }\nclose(e.acceptedChan)\n- e.acceptedEndpoints = make([]*endpoint, len(e.acceptedChan), cap(e.acceptedChan))\n+ acceptedEndpoints := make([]*endpoint, len(e.acceptedChan), cap(e.acceptedChan))\ni := 0\nfor ep := range e.acceptedChan {\n- e.acceptedEndpoints[i] = ep\n+ acceptedEndpoints[i] = ep\ni++\n}\n- if i != len(e.acceptedEndpoints) {\n+ if i != len(acceptedEndpoints) {\npanic(\"endpoint acceptedChan buffer got consumed by background context\")\n}\n+ return acceptedEndpoints\n+}\n+\n+// loadAcceptedChan is invoked by stateify.\n+func (e *endpoint) loadAcceptedChan(acceptedEndpoints []*endpoint) {\n+ if cap(acceptedEndpoints) > 0 {\n+ e.acceptedChan = make(chan *endpoint, cap(acceptedEndpoints))\n+ for _, ep := range acceptedEndpoints {\n+ e.acceptedChan <- ep\n+ }\n}\n}\n@@ -134,17 +149,6 @@ func (e *endpoint) loadState(state endpointState) {\n// afterLoad is invoked by stateify.\nfunc (e *endpoint) afterLoad() {\n- // We load acceptedChan buffer indirectly here. Note that closed\n- // endpoints might not need to allocate the channel.\n- // FIXME\n- if cap(e.acceptedEndpoints) > 0 {\n- e.acceptedChan = make(chan *endpoint, cap(e.acceptedEndpoints))\n- for _, ep := range e.acceptedEndpoints {\n- e.acceptedChan <- ep\n- }\n- e.acceptedEndpoints = nil\n- }\n-\ne.stack = stack.StackFromEnv\ne.segmentQueue.setLimit(2 * e.rcvBufSize)\ne.workMu.Init()\n"
}
] | Go | Apache License 2.0 | google/gvisor | netstack: save tcp endpoint accepted channel directly.
PiperOrigin-RevId: 204356873
Change-Id: I5e2f885f58678e693aae1a69e8bf8084a685af28 |
259,948 | 12.07.2018 14:18:11 | 25,200 | bb41ad808a75b8a945d82df51f0e322d98edf951 | sentry: save inet stacks in proc files. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/net.go",
"new_path": "pkg/sentry/fs/proc/net.go",
"diff": "@@ -55,7 +55,7 @@ func (p *proc) newNetDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\n// ifinet6 implements seqfile.SeqSource for /proc/net/if_inet6.\ntype ifinet6 struct {\n- s inet.Stack `state:\"nosave\"` // S/R-FIXME\n+ s inet.Stack\n}\nfunc (n *ifinet6) contents() []string {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/sys_net.go",
"new_path": "pkg/sentry/fs/proc/sys_net.go",
"diff": "@@ -102,7 +102,7 @@ func (m *tcpMem) DeprecatedPwritev(ctx context.Context, src usermem.IOSequence,\ntype tcpSack struct {\nramfs.Entry\n- s inet.Stack `state:\"nosave\"` // S/R-FIXME\n+ s inet.Stack\n}\nfunc newTCPSackInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode {\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: save inet stacks in proc files.
PiperOrigin-RevId: 204362791
Change-Id: If85ea7442741e299f0d7cddbc3d6b415e285da81 |
259,948 | 12.07.2018 15:07:59 | 25,200 | 1cd46c8dd1a92dd0ad3eeb60a763278f2e98d0b4 | sentry: wait for restore clock instead of panicing in Timekeeper. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/timekeeper.go",
"new_path": "pkg/sentry/kernel/timekeeper.go",
"diff": "@@ -44,14 +44,14 @@ type Timekeeper struct {\n// It is set only once, by SetClocks.\nmonotonicOffset int64 `state:\"nosave\"`\n- // restored indicates that this Timekeeper was restored from a state\n- // file.\n- restored bool `state:\"nosave\"`\n+ // restored, if non-nil, indicates that this Timekeeper was restored\n+ // from a state file. The clocks are not set until restored is closed.\n+ restored chan struct{} `state:\"nosave\"`\n// saveMonotonic is the (offset) value of the monotonic clock at the\n// time of save.\n//\n- // It is only valid if restored is true.\n+ // It is only valid if restored is non-nil.\n//\n// It is only used in SetClocks after restore to compute the new\n// monotonicOffset.\n@@ -59,7 +59,7 @@ type Timekeeper struct {\n// saveRealtime is the value of the realtime clock at the time of save.\n//\n- // It is only valid if restored is true.\n+ // It is only valid if restored is non-nil.\n//\n// It is only used in SetClocks after restore to compute the new\n// monotonicOffset.\n@@ -98,7 +98,7 @@ func NewTimekeeper(platform platform.Platform, paramPage platform.FileRange) (*T\nfunc (t *Timekeeper) SetClocks(c sentrytime.Clocks) {\n// Update the params, marking them \"not ready\", as we may need to\n// restart calibration on this new machine.\n- if t.restored {\n+ if t.restored != nil {\nif err := t.params.Write(func() vdsoParams {\nreturn vdsoParams{}\n}); err != nil {\n@@ -135,7 +135,7 @@ func (t *Timekeeper) SetClocks(c sentrytime.Clocks) {\npanic(\"Unable to get current realtime: \" + err.Error())\n}\n- if t.restored {\n+ if t.restored != nil {\nwantMonotonic = t.saveMonotonic\nelapsed := nowRealtime - t.saveRealtime\nif elapsed > 0 {\n@@ -145,7 +145,7 @@ func (t *Timekeeper) SetClocks(c sentrytime.Clocks) {\nt.monotonicOffset = wantMonotonic - nowMonotonic\n- if !t.restored {\n+ if t.restored == nil {\n// Hold on to the initial \"boot\" time.\nt.bootTime = ktime.FromNanoseconds(nowRealtime)\n}\n@@ -153,6 +153,10 @@ func (t *Timekeeper) SetClocks(c sentrytime.Clocks) {\nt.mu.Lock()\ndefer t.mu.Unlock()\nt.startUpdater()\n+\n+ if t.restored != nil {\n+ close(t.restored)\n+ }\n}\n// startUpdater starts an update goroutine that keeps the clocks updated.\n@@ -255,8 +259,11 @@ func (t *Timekeeper) ResumeUpdates() {\n// GetTime returns the current time in nanoseconds.\nfunc (t *Timekeeper) GetTime(c sentrytime.ClockID) (int64, error) {\nif t.clocks == nil {\n+ if t.restored == nil {\npanic(\"Timekeeper used before initialized with SetClocks\")\n}\n+ <-t.restored\n+ }\nnow, err := t.clocks.GetTime(c)\nif err == nil && c == sentrytime.Monotonic {\nnow += t.monotonicOffset\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/timekeeper_state.go",
"new_path": "pkg/sentry/kernel/timekeeper_state.go",
"diff": "@@ -37,5 +37,5 @@ func (t *Timekeeper) beforeSave() {\n// afterLoad is invoked by stateify.\nfunc (t *Timekeeper) afterLoad() {\n- t.restored = true\n+ t.restored = make(chan struct{})\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/timekeeper_test.go",
"new_path": "pkg/sentry/kernel/timekeeper_test.go",
"diff": "@@ -107,7 +107,7 @@ func TestTimekeeperMonotonicForward(t *testing.T) {\n}\ntk := stateTestClocklessTimekeeper(t)\n- tk.restored = true\n+ tk.restored = make(chan struct{})\ntk.saveMonotonic = 100000\ntk.saveRealtime = 400000\ntk.SetClocks(c)\n@@ -135,7 +135,7 @@ func TestTimekeeperMonotonicJumpBackwards(t *testing.T) {\n}\ntk := stateTestClocklessTimekeeper(t)\n- tk.restored = true\n+ tk.restored = make(chan struct{})\ntk.saveMonotonic = 100000\ntk.saveRealtime = 600000\ntk.SetClocks(c)\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: wait for restore clock instead of panicing in Timekeeper.
PiperOrigin-RevId: 204372296
Change-Id: If1ed9843b93039806e0c65521f30177dc8036979 |
259,881 | 12.07.2018 17:13:41 | 25,200 | a28b274abb3ac0ce652ee395d5a48e7b7fdfb3ad | Fix aio eventfd lookup
We're failing to set eventFile in the outer scope. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_aio.go",
"new_path": "pkg/sentry/syscalls/linux/sys_aio.go",
"diff": "@@ -300,7 +300,7 @@ func submitCallback(t *kernel.Task, id uint64, cb *ioCallback, cbAddr usermem.Ad\n// Was there an eventFD? Extract it.\nvar eventFile *fs.File\nif cb.Flags&_IOCB_FLAG_RESFD != 0 {\n- eventFile := t.FDMap().GetFile(kdefs.FD(cb.ResFD))\n+ eventFile = t.FDMap().GetFile(kdefs.FD(cb.ResFD))\nif eventFile == nil {\n// Bad FD.\nreturn syserror.EBADF\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix aio eventfd lookup
We're failing to set eventFile in the outer scope.
PiperOrigin-RevId: 204392995
Change-Id: Ib9b04f839599ef552d7b5951d08223e2b1d5f6ad |
259,881 | 13.07.2018 10:23:16 | 25,200 | f09ebd9c71eecdfb79f64b6abb26db3b66b8156b | Note that Mount errors do not require translations | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/filesystems.go",
"new_path": "pkg/sentry/fs/filesystems.go",
"diff": "@@ -48,6 +48,8 @@ type Filesystem interface {\n// Mount generates a mountable Inode backed by device and configured\n// using file system independent flags and file system dependent\n// data options.\n+ //\n+ // Mount may return arbitrary errors. They do not need syserr translations.\nMount(ctx context.Context, device string, flags MountSourceFlags, data string) (*Inode, error)\n// AllowUserMount determines whether mount(2) is allowed to mount a\n"
}
] | Go | Apache License 2.0 | google/gvisor | Note that Mount errors do not require translations
PiperOrigin-RevId: 204490639
Change-Id: I0fe26306bae9320c6aa4f854fe0ef25eebd93233 |
260,008 | 13.07.2018 12:10:01 | 25,200 | 5b09ec3b890141959aa6a6a73b1ee4e26490c5cc | Allow a filesystem to control its visibility in /proc/filesystems. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dev/fs.go",
"new_path": "pkg/sentry/fs/dev/fs.go",
"diff": "@@ -49,6 +49,11 @@ func (*filesystem) AllowUserMount() bool {\nreturn true\n}\n+// AllowUserList allows this filesystem to be listed in /proc/filesystems.\n+func (*filesystem) AllowUserList() bool {\n+ return true\n+}\n+\n// Flags returns that there is nothing special about this file system.\n//\n// In Linux, devtmpfs does the same thing.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/filesystems.go",
"new_path": "pkg/sentry/fs/filesystems.go",
"diff": "@@ -55,6 +55,10 @@ type Filesystem interface {\n// AllowUserMount determines whether mount(2) is allowed to mount a\n// file system of this type.\nAllowUserMount() bool\n+\n+ // AllowUserList determines whether this filesystem is listed in\n+ // /proc/filesystems\n+ AllowUserList() bool\n}\n// filesystems is the global set of registered file systems. It does not need\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/gofer/fs.go",
"new_path": "pkg/sentry/fs/gofer/fs.go",
"diff": "@@ -103,6 +103,11 @@ func (*filesystem) AllowUserMount() bool {\nreturn false\n}\n+// AllowUserList allows this filesystem to be listed in /proc/filesystems.\n+func (*filesystem) AllowUserList() bool {\n+ return true\n+}\n+\n// Flags returns that there is nothing special about this file system.\n//\n// The 9p Linux client returns FS_RENAME_DOES_D_MOVE, see fs/9p/vfs_super.c.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/host/fs.go",
"new_path": "pkg/sentry/fs/host/fs.go",
"diff": "@@ -66,6 +66,11 @@ func (*Filesystem) AllowUserMount() bool {\nreturn false\n}\n+// AllowUserList allows this filesystem to be listed in /proc/filesystems.\n+func (*Filesystem) AllowUserList() bool {\n+ return true\n+}\n+\n// Flags returns that there is nothing special about this file system.\nfunc (*Filesystem) Flags() fs.FilesystemFlags {\nreturn 0\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/mount_overlay.go",
"new_path": "pkg/sentry/fs/mount_overlay.go",
"diff": "@@ -89,6 +89,11 @@ func (ofs *overlayFilesystem) AllowUserMount() bool {\nreturn false\n}\n+// AllowUserList implements Filesystem.AllowUserList.\n+func (*overlayFilesystem) AllowUserList() bool {\n+ return true\n+}\n+\n// Mount implements Filesystem.Mount.\nfunc (ofs *overlayFilesystem) Mount(ctx context.Context, device string, flags MountSourceFlags, data string) (*Inode, error) {\npanic(\"overlayFilesystem.Mount should not be called!\")\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/filesystems.go",
"new_path": "pkg/sentry/fs/proc/filesystems.go",
"diff": "@@ -43,6 +43,9 @@ func (*filesystemsData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle\n// Generate the file contents.\nvar buf bytes.Buffer\nfor _, sys := range fs.GetFilesystems() {\n+ if !sys.AllowUserList() {\n+ continue\n+ }\nnodev := \"nodev\"\nif sys.Flags()&fs.FilesystemRequiresDev != 0 {\nnodev = \"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/fs.go",
"new_path": "pkg/sentry/fs/proc/fs.go",
"diff": "@@ -42,6 +42,11 @@ func (*filesystem) AllowUserMount() bool {\nreturn true\n}\n+// AllowUserList allows this filesystem to be listed in /proc/filesystems.\n+func (*filesystem) AllowUserList() bool {\n+ return true\n+}\n+\n// Flags returns that there is nothing special about this file system.\n//\n// In Linux, proc returns FS_USERNS_VISIBLE | FS_USERNS_MOUNT, see fs/proc/root.c.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/sys/fs.go",
"new_path": "pkg/sentry/fs/sys/fs.go",
"diff": "@@ -40,6 +40,11 @@ func (*filesystem) AllowUserMount() bool {\nreturn true\n}\n+// AllowUserList allows this filesystem to be listed in /proc/filesystems.\n+func (*filesystem) AllowUserList() bool {\n+ return true\n+}\n+\n// Flags returns that there is nothing special about this file system.\n//\n// In Linux, sysfs returns FS_USERNS_VISIBLE | FS_USERNS_MOUNT, see fs/sysfs/mount.c.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tmpfs/fs.go",
"new_path": "pkg/sentry/fs/tmpfs/fs.go",
"diff": "@@ -67,6 +67,11 @@ func (*Filesystem) AllowUserMount() bool {\nreturn true\n}\n+// AllowUserList allows this filesystem to be listed in /proc/filesystems.\n+func (*Filesystem) AllowUserList() bool {\n+ return true\n+}\n+\n// Flags returns that there is nothing special about this file system.\n//\n// In Linux, tmpfs returns FS_USERNS_MOUNT, see mm/shmem.c.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tty/fs.go",
"new_path": "pkg/sentry/fs/tty/fs.go",
"diff": "@@ -46,6 +46,11 @@ func (*filesystem) AllowUserMount() bool {\nreturn false\n}\n+// AllowUserList allows this filesystem to be listed in /proc/filesystems.\n+func (*filesystem) AllowUserList() bool {\n+ return true\n+}\n+\n// Flags returns that there is nothing special about this file system.\nfunc (*filesystem) Flags() fs.FilesystemFlags {\nreturn 0\n"
}
] | Go | Apache License 2.0 | google/gvisor | Allow a filesystem to control its visibility in /proc/filesystems.
PiperOrigin-RevId: 204508520
Change-Id: I09e5f8b6e69413370e1a0d39dbb7dc1ee0b6192d |
260,008 | 16.07.2018 12:19:02 | 25,200 | 8f21c0bb2807888d812318def43c2405c9b13f5a | Add EventOperations.HostFD()
This method allows an eventfd inside the Sentry to be registered with with
the host kernel.
Update comment about memory mapping host fds via CachingInodeOperations. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/BUILD",
"new_path": "pkg/abi/linux/BUILD",
"diff": "@@ -30,6 +30,7 @@ go_library(\n\"dev.go\",\n\"elf.go\",\n\"errors.go\",\n+ \"eventfd.go\",\n\"exec.go\",\n\"fcntl.go\",\n\"file.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/abi/linux/eventfd.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+// Constants for eventfd2(2).\n+const (\n+ EFD_SEMAPHORE = 0x1\n+ EFD_CLOEXEC = O_CLOEXEC\n+ EFD_NONBLOCK = O_NONBLOCK\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/fsutil/inode_cached.go",
"new_path": "pkg/sentry/fs/fsutil/inode_cached.go",
"diff": "@@ -44,8 +44,7 @@ import (\n//\n// CachingInodeOperations implements Mappable for the CachedFileObject:\n//\n-// - If CachedFileObject.FD returns a value >= 0 and the current platform shares\n-// a host fd table with the sentry, then the value of CachedFileObject.FD\n+// - If CachedFileObject.FD returns a value >= 0 then the file descriptor\n// will be memory mapped on the host.\n//\n// - Otherwise, the contents of CachedFileObject are buffered into memory\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/eventfd/BUILD",
"new_path": "pkg/sentry/kernel/eventfd/BUILD",
"diff": "@@ -21,6 +21,7 @@ go_library(\nimportpath = \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/eventfd\",\nvisibility = [\"//pkg/sentry:internal\"],\ndeps = [\n+ \"//pkg/abi/linux\",\n\"//pkg/refs\",\n\"//pkg/sentry/context\",\n\"//pkg/sentry/fs\",\n@@ -30,6 +31,7 @@ go_library(\n\"//pkg/state\",\n\"//pkg/syserror\",\n\"//pkg/waiter\",\n+ \"//pkg/waiter/fdnotifier\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/eventfd/eventfd.go",
"new_path": "pkg/sentry/kernel/eventfd/eventfd.go",
"diff": "@@ -21,6 +21,7 @@ import (\n\"sync\"\n\"syscall\"\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/anon\"\n@@ -28,10 +29,12 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n\"gvisor.googlesource.com/gvisor/pkg/waiter\"\n+ \"gvisor.googlesource.com/gvisor/pkg/waiter/fdnotifier\"\n)\n// EventOperations represents an event with the semantics of Linux's file-based event\n-// notification (eventfd).\n+// notification (eventfd). Eventfds are usually internal to the Sentry but in certain\n+// situations they may be converted into a host-backed eventfd.\ntype EventOperations struct {\nfsutil.NoopRelease `state:\"nosave\"`\nfsutil.PipeSeek `state:\"nosave\"`\n@@ -46,13 +49,16 @@ type EventOperations struct {\n// Queue is used to notify interested parties when the event object\n// becomes readable or writable.\n- waiter.Queue `state:\"nosave\"`\n+ wq waiter.Queue `state:\"nosave\"`\n// val is the current value of the event counter.\nval uint64\n// semMode specifies whether the event is in \"semaphore\" mode.\nsemMode bool\n+\n+ // hostfd indicates whether this eventfd is passed through to the host.\n+ hostfd int\n}\n// New creates a new event object with the supplied initial value and mode.\n@@ -62,9 +68,48 @@ func New(ctx context.Context, initVal uint64, semMode bool) *fs.File {\nreturn fs.NewFile(ctx, dirent, fs.FileFlags{Read: true, Write: true}, &EventOperations{\nval: initVal,\nsemMode: semMode,\n+ hostfd: -1,\n})\n}\n+// HostFD returns the host eventfd associated with this event.\n+func (e *EventOperations) HostFD() (int, error) {\n+ e.mu.Lock()\n+ defer e.mu.Unlock()\n+ if e.hostfd >= 0 {\n+ return e.hostfd, nil\n+ }\n+\n+ flags := linux.EFD_NONBLOCK\n+ if e.semMode {\n+ flags |= linux.EFD_SEMAPHORE\n+ }\n+\n+ fd, _, err := syscall.Syscall(syscall.SYS_EVENTFD2, uintptr(e.val), uintptr(flags), 0)\n+ if err != 0 {\n+ return -1, err\n+ }\n+\n+ if err := fdnotifier.AddFD(int32(fd), &e.wq); err != nil {\n+ syscall.Close(int(fd))\n+ return -1, err\n+ }\n+\n+ e.hostfd = int(fd)\n+ return e.hostfd, nil\n+}\n+\n+// Release implements fs.FileOperations.Release.\n+func (e *EventOperations) Release() {\n+ e.mu.Lock()\n+ defer e.mu.Unlock()\n+ if e.hostfd >= 0 {\n+ fdnotifier.RemoveFD(int32(e.hostfd))\n+ syscall.Close(e.hostfd)\n+ e.hostfd = -1\n+ }\n+}\n+\n// Read implements fs.FileOperations.Read.\nfunc (e *EventOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) {\nif dst.NumBytes() < 8 {\n@@ -87,9 +132,29 @@ func (e *EventOperations) Write(ctx context.Context, _ *fs.File, src usermem.IOS\nreturn 8, nil\n}\n+// Must be called with e.mu locked.\n+func (e *EventOperations) hostRead(ctx context.Context, dst usermem.IOSequence) error {\n+ var buf [8]byte\n+\n+ if _, err := syscall.Read(e.hostfd, buf[:]); err != nil {\n+ if err == syscall.EWOULDBLOCK {\n+ return syserror.ErrWouldBlock\n+ }\n+ return err\n+ }\n+\n+ _, err := dst.CopyOut(ctx, buf[:])\n+ return err\n+}\n+\nfunc (e *EventOperations) read(ctx context.Context, dst usermem.IOSequence) error {\ne.mu.Lock()\n+ if e.hostfd >= 0 {\n+ defer e.mu.Unlock()\n+ return e.hostRead(ctx, dst)\n+ }\n+\n// We can't complete the read if the value is currently zero.\nif e.val == 0 {\ne.mu.Unlock()\n@@ -112,7 +177,7 @@ func (e *EventOperations) read(ctx context.Context, dst usermem.IOSequence) erro\n// Notify writers. We do this even if we were already writable because\n// it is possible that a writer is waiting to write the maximum value\n// to the event.\n- e.Notify(waiter.EventOut)\n+ e.wq.Notify(waiter.EventOut)\nvar buf [8]byte\nusermem.ByteOrder.PutUint64(buf[:], val)\n@@ -120,6 +185,17 @@ func (e *EventOperations) read(ctx context.Context, dst usermem.IOSequence) erro\nreturn err\n}\n+// Must be called with e.mu locked.\n+func (e *EventOperations) hostWrite(val uint64) error {\n+ var buf [8]byte\n+ usermem.ByteOrder.PutUint64(buf[:], val)\n+ _, err := syscall.Write(e.hostfd, buf[:])\n+ if err == syscall.EWOULDBLOCK {\n+ return syserror.ErrWouldBlock\n+ }\n+ return err\n+}\n+\nfunc (e *EventOperations) write(ctx context.Context, src usermem.IOSequence) error {\nvar buf [8]byte\nif _, err := src.CopyIn(ctx, buf[:]); err != nil {\n@@ -138,6 +214,11 @@ func (e *EventOperations) Signal(val uint64) error {\ne.mu.Lock()\n+ if e.hostfd >= 0 {\n+ defer e.mu.Unlock()\n+ return e.hostWrite(val)\n+ }\n+\n// We only allow writes that won't cause the value to go over the max\n// uint64 minus 1.\nif val > math.MaxUint64-1-e.val {\n@@ -149,16 +230,20 @@ func (e *EventOperations) Signal(val uint64) error {\ne.mu.Unlock()\n// Always trigger a notification.\n- e.Notify(waiter.EventIn)\n+ e.wq.Notify(waiter.EventIn)\nreturn nil\n}\n// Readiness returns the ready events for the event fd.\nfunc (e *EventOperations) Readiness(mask waiter.EventMask) waiter.EventMask {\n- ready := waiter.EventMask(0)\n-\ne.mu.Lock()\n+ if e.hostfd >= 0 {\n+ defer e.mu.Unlock()\n+ return fdnotifier.NonBlockingPoll(int32(e.hostfd), mask)\n+ }\n+\n+ ready := waiter.EventMask(0)\nif e.val > 0 {\nready |= waiter.EventIn\n}\n@@ -170,3 +255,25 @@ func (e *EventOperations) Readiness(mask waiter.EventMask) waiter.EventMask {\nreturn mask & ready\n}\n+\n+// EventRegister implements waiter.Waitable.EventRegister.\n+func (e *EventOperations) EventRegister(entry *waiter.Entry, mask waiter.EventMask) {\n+ e.wq.EventRegister(entry, mask)\n+\n+ e.mu.Lock()\n+ defer e.mu.Unlock()\n+ if e.hostfd >= 0 {\n+ fdnotifier.UpdateFD(int32(e.hostfd))\n+ }\n+}\n+\n+// EventUnregister implements waiter.Waitable.EventUnregister.\n+func (e *EventOperations) EventUnregister(entry *waiter.Entry) {\n+ e.wq.EventUnregister(entry)\n+\n+ e.mu.Lock()\n+ defer e.mu.Unlock()\n+ if e.hostfd >= 0 {\n+ fdnotifier.UpdateFD(int32(e.hostfd))\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add EventOperations.HostFD()
This method allows an eventfd inside the Sentry to be registered with with
the host kernel.
Update comment about memory mapping host fds via CachingInodeOperations.
PiperOrigin-RevId: 204784859
Change-Id: I55823321e2d84c17ae0f7efaabc6b55b852ae257 |
259,858 | 16.07.2018 22:02:03 | 25,200 | 29e00c943a61dfcfd4ac8d3f6f526eab641c44a6 | Add CPUID faulting for ptrace and KVM. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/prctl.go",
"new_path": "pkg/abi/linux/prctl.go",
"diff": "@@ -69,4 +69,5 @@ const (\nARCH_SET_FS = 0x1002\nARCH_GET_FS = 0x1003\nARCH_GET_GS = 0x1004\n+ ARCH_SET_CPUID = 0x1012\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_run.go",
"new_path": "pkg/sentry/kernel/task_run.go",
"diff": "@@ -221,6 +221,24 @@ func (*runApp) execute(t *Task) taskRunState {\n// loop to figure out why.\nreturn (*runApp)(nil)\n+ case platform.ErrContextSignalCPUID:\n+ // Is this a CPUID instruction?\n+ expected := arch.CPUIDInstruction[:]\n+ found := make([]byte, len(expected))\n+ _, err := t.CopyIn(usermem.Addr(t.Arch().IP()), &found)\n+ if err == nil && bytes.Equal(expected, found) {\n+ // Skip the cpuid instruction.\n+ t.Arch().CPUIDEmulate(t)\n+ t.Arch().SetIP(t.Arch().IP() + uintptr(len(expected)))\n+\n+ // Resume execution.\n+ return (*runApp)(nil)\n+ }\n+\n+ // The instruction at the given RIP was not a CPUID, and we\n+ // fallthrough to the default signal deliver behavior below.\n+ fallthrough\n+\ncase platform.ErrContextSignal:\n// Looks like a signal has been delivered to us. If it's a synchronous\n// signal (SEGV, SIGBUS, etc.), it should be sent to the application\n@@ -266,28 +284,7 @@ func (*runApp) execute(t *Task) taskRunState {\n}\nswitch sig {\n- case linux.SIGILL:\n- // N.B. The debug stuff here is arguably\n- // expensive. Don't fret. This gets called\n- // about 5 times for a typical application, if\n- // that.\n- t.Debugf(\"SIGILL @ %x\", t.Arch().IP())\n-\n- // Is this a CPUID instruction?\n- expected := arch.CPUIDInstruction[:]\n- found := make([]byte, len(expected))\n- _, err := t.CopyIn(usermem.Addr(t.Arch().IP()), &found)\n- if err == nil && bytes.Equal(expected, found) {\n- // Skip the cpuid instruction.\n- t.Arch().CPUIDEmulate(t)\n- t.Arch().SetIP(t.Arch().IP() + uintptr(len(expected)))\n- break\n- }\n-\n- // Treat it like any other synchronous signal.\n- fallthrough\n-\n- case linux.SIGSEGV, linux.SIGBUS, linux.SIGFPE, linux.SIGTRAP:\n+ case linux.SIGILL, linux.SIGSEGV, linux.SIGBUS, linux.SIGFPE, linux.SIGTRAP:\n// Synchronous signal. Send it to ourselves. Assume the signal is\n// legitimate and force it (work around the signal being ignored or\n// blocked) like Linux does. Conveniently, this is even the correct\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine.go",
"new_path": "pkg/sentry/platform/kvm/machine.go",
"diff": "@@ -141,11 +141,6 @@ func (m *machine) newVCPU() *vCPU {\npanic(fmt.Sprintf(\"error setting signal mask: %v\", err))\n}\n- // Initialize architecture state.\n- if err := c.initArchState(); err != nil {\n- panic(fmt.Sprintf(\"error initialization vCPU state: %v\", err))\n- }\n-\n// Map the run data.\nrunData, err := mapRunData(int(fd))\nif err != nil {\n@@ -153,6 +148,11 @@ func (m *machine) newVCPU() *vCPU {\n}\nc.runData = runData\n+ // Initialize architecture state.\n+ if err := c.initArchState(); err != nil {\n+ panic(fmt.Sprintf(\"error initialization vCPU state: %v\", err))\n+ }\n+\nreturn c // Done.\n}\n@@ -168,12 +168,6 @@ func newMachine(vm int) (*machine, error) {\nPageTables: pagetables.New(newAllocator()),\n})\n- // Initialize architecture state.\n- if err := m.initArchState(); err != nil {\n- m.Destroy()\n- return nil, err\n- }\n-\n// Apply the physical mappings. Note that these mappings may point to\n// guest physical addresses that are not actually available. These\n// physical pages are mapped on demand, see kernel_unsafe.go.\n@@ -221,6 +215,12 @@ func newMachine(vm int) (*machine, error) {\n}\n})\n+ // Initialize architecture state.\n+ if err := m.initArchState(); err != nil {\n+ m.Destroy()\n+ return nil, err\n+ }\n+\n// Ensure the machine is cleaned up properly.\nruntime.SetFinalizer(m, (*machine).Destroy)\nreturn m, nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/machine_amd64.go",
"new_path": "pkg/sentry/platform/kvm/machine_amd64.go",
"diff": "@@ -19,6 +19,7 @@ package kvm\nimport (\n\"fmt\"\n\"reflect\"\n+ \"runtime/debug\"\n\"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n@@ -39,6 +40,21 @@ func (m *machine) initArchState() error {\nuintptr(reservedMemory-(3*usermem.PageSize))); errno != 0 {\nreturn errno\n}\n+\n+ // Enable CPUID faulting, if possible. Note that this also serves as a\n+ // basic platform sanity tests, since we will enter guest mode for the\n+ // first time here. The recovery is necessary, since if we fail to read\n+ // the platform info register, we will retry to host mode and\n+ // ultimately need to handle a segmentation fault.\n+ old := debug.SetPanicOnFault(true)\n+ defer func() {\n+ recover()\n+ debug.SetPanicOnFault(old)\n+ }()\n+ m.retryInGuest(func() {\n+ ring0.SetCPUIDFaulting(true)\n+ })\n+\nreturn nil\n}\n@@ -238,6 +254,12 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts) (*arch.SignalInfo, user\nCode: arch.SignalInfoKernel,\n}\ninfo.SetAddr(switchOpts.Registers.Rip) // Include address.\n+ if vector == ring0.GeneralProtectionFault {\n+ // When CPUID faulting is enabled, we will generate a #GP(0) when\n+ // userspace executes a CPUID instruction. This is handled above,\n+ // because we need to be able to map and read user memory.\n+ return info, usermem.AccessType{}, platform.ErrContextSignalCPUID\n+ }\nreturn info, usermem.AccessType{}, platform.ErrContextSignal\ncase ring0.InvalidOpcode:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/platform.go",
"new_path": "pkg/sentry/platform/platform.go",
"diff": "@@ -154,6 +154,13 @@ var (\n// Context was interrupted by a signal.\nErrContextSignal = fmt.Errorf(\"interrupted by signal\")\n+ // ErrContextSignalCPUID is equivalent to ErrContextSignal, except that\n+ // a check should be done for execution of the CPUID instruction. If\n+ // the current instruction pointer is a CPUID instruction, then this\n+ // should be emulated appropriately. If not, then the given signal\n+ // should be handled per above.\n+ ErrContextSignalCPUID = fmt.Errorf(\"interrupted by signal, possible CPUID\")\n+\n// ErrContextInterrupt is returned by Context.Switch() to indicate that the\n// Context was interrupted by a call to Context.Interrupt().\nErrContextInterrupt = fmt.Errorf(\"interrupted by platform.Context.Interrupt()\")\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/ptrace.go",
"new_path": "pkg/sentry/platform/ptrace/ptrace.go",
"diff": "@@ -101,9 +101,11 @@ func (c *context) Switch(as platform.AddressSpace, ac arch.Context, cpu int32) (\ns := as.(*subprocess)\nisSyscall := s.switchToApp(c, ac)\n- var faultSP *subprocess\n- var faultAddr usermem.Addr\n- var faultIP usermem.Addr\n+ var (\n+ faultSP *subprocess\n+ faultAddr usermem.Addr\n+ faultIP usermem.Addr\n+ )\nif !isSyscall && linux.Signal(c.signalInfo.Signo) == linux.SIGSEGV {\nfaultSP = s\nfaultAddr = usermem.Addr(c.signalInfo.Addr())\n@@ -161,7 +163,12 @@ func (c *context) Switch(as platform.AddressSpace, ac arch.Context, cpu int32) (\nlastFaultIP == faultIP {\nat.Write = true\n}\n- return &c.signalInfo, at, platform.ErrContextSignal\n+\n+ // Unfortunately, we have to unilaterally return ErrContextSignalCPUID\n+ // here, in case this fault was generated by a CPUID exception. There\n+ // is no way to distinguish between CPUID-generated faults and regular\n+ // page faults.\n+ return &c.signalInfo, at, platform.ErrContextSignalCPUID\n}\n// Interrupt interrupts the running guest application associated with this context.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ptrace/subprocess_linux.go",
"new_path": "pkg/sentry/platform/ptrace/subprocess_linux.go",
"diff": "@@ -20,6 +20,7 @@ import (\n\"fmt\"\n\"syscall\"\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/platform/procid\"\n)\n@@ -85,6 +86,10 @@ func createStub() (*thread, error) {\nsyscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0)\n}\n+ // Enable cpuid-faulting; this may fail on older kernels or hardware,\n+ // so we just disregard the result. Host CPUID will be enabled.\n+ syscall.RawSyscall(syscall.SYS_ARCH_PRCTL, linux.ARCH_SET_CPUID, 0, 0)\n+\n// Call the stub; should not return.\nstubCall(stubStart, ppid)\npanic(\"unreachable\")\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/kernel_amd64.go",
"new_path": "pkg/sentry/platform/ring0/kernel_amd64.go",
"diff": "@@ -163,7 +163,6 @@ func IsCanonical(addr uint64) bool {\n// the case for amd64, but may not be the case for other architectures.\n//\n// Precondition: the Rip, Rsp, Fs and Gs registers must be canonical.\n-\n//\n//go:nosplit\nfunc (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {\n@@ -237,6 +236,27 @@ func start(c *CPU) {\nwrmsr(_MSR_CSTAR, kernelFunc(sysenter))\n}\n+// SetCPUIDFaulting sets CPUID faulting per the boolean value.\n+//\n+// True is returned if faulting could be set.\n+//\n+//go:nosplit\n+func SetCPUIDFaulting(on bool) bool {\n+ // Per the SDM (Vol 3, Table 2-43), PLATFORM_INFO bit 31 denotes support\n+ // for CPUID faulting, and we enable and disable via the MISC_FEATURES MSR.\n+ if rdmsr(_MSR_PLATFORM_INFO)&_PLATFORM_INFO_CPUID_FAULT != 0 {\n+ features := rdmsr(_MSR_MISC_FEATURES)\n+ if on {\n+ features |= _MISC_FEATURE_CPUID_TRAP\n+ } else {\n+ features &^= _MISC_FEATURE_CPUID_TRAP\n+ }\n+ wrmsr(_MSR_MISC_FEATURES, features)\n+ return true // Setting successful.\n+ }\n+ return false\n+}\n+\n// ReadCR2 reads the current CR2 value.\n//\n//go:nosplit\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/x86.go",
"new_path": "pkg/sentry/platform/ring0/x86.go",
"diff": "@@ -54,6 +54,12 @@ const (\n_MSR_LSTAR = 0xc0000082\n_MSR_CSTAR = 0xc0000083\n_MSR_SYSCALL_MASK = 0xc0000084\n+ _MSR_PLATFORM_INFO = 0xce\n+ _MSR_MISC_FEATURES = 0x140\n+\n+ _PLATFORM_INFO_CPUID_FAULT = 1 << 31\n+\n+ _MISC_FEATURE_CPUID_TRAP = 0x1\n)\nconst (\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add CPUID faulting for ptrace and KVM.
PiperOrigin-RevId: 204858314
Change-Id: I8252bf8de3232a7a27af51076139b585e73276d4 |
259,948 | 17.07.2018 10:13:57 | 25,200 | beb89bb75749620969b0e1dea65240bf5d4324b2 | netstack: update goroutine save / restore safety comments. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint.go",
"diff": "@@ -108,7 +108,10 @@ func New(opts *Options) tcpip.LinkEndpointID {\n// dispatches them via the provided dispatcher.\nfunc (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) {\ne.attached = true\n- go e.dispatchLoop(dispatcher) // S/R-FIXME\n+ // Link endpoints are not savable. When transportation endpoints are\n+ // saved, they stop sending outgoing packets and all incoming packets\n+ // are rejected.\n+ go e.dispatchLoop(dispatcher) // S/R-SAFE: See above.\n}\n// IsAttached implements stack.LinkEndpoint.IsAttached.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"new_path": "pkg/tcpip/link/sharedmem/sharedmem.go",
"diff": "@@ -142,7 +142,10 @@ func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) {\nif !e.workerStarted && atomic.LoadUint32(&e.stopRequested) == 0 {\ne.workerStarted = true\ne.completed.Add(1)\n- go e.dispatchLoop(dispatcher) // S/R-FIXME\n+ // Link endpoints are not savable. When transportation endpoints\n+ // are saved, they stop sending outgoing packets and all\n+ // incoming packets are rejected.\n+ go e.dispatchLoop(dispatcher) // S/R-SAFE: see above.\n}\ne.mu.Unlock()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/sample/tun_tcp_connect/main.go",
"new_path": "pkg/tcpip/sample/tun_tcp_connect/main.go",
"diff": "@@ -187,7 +187,7 @@ func main() {\n// Start the writer in its own goroutine.\nwriterCompletedCh := make(chan struct{})\n- go writer(writerCompletedCh, ep) // S/R-FIXME\n+ go writer(writerCompletedCh, ep) // S/R-SAFE: sample code.\n// Read data and write to standard output until the peer closes the\n// connection from its side.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/sample/tun_tcp_echo/main.go",
"new_path": "pkg/tcpip/sample/tun_tcp_echo/main.go",
"diff": "@@ -187,6 +187,6 @@ func main() {\nlog.Fatal(\"Accept() failed:\", err)\n}\n- go echo(wq, n) // S/R-FIXME\n+ go echo(wq, n) // S/R-SAFE: sample code.\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/linkaddrcache.go",
"new_path": "pkg/tcpip/stack/linkaddrcache.go",
"diff": "@@ -262,7 +262,7 @@ func (c *linkAddrCache) startAddressResolution(k tcpip.FullAddress, linkRes Link\ne := c.makeAndAddEntry(k, \"\")\ne.addWaker(waker)\n- go func() { // S/R-FIXME\n+ go func() { // S/R-SAFE: link non-savable; wakers dropped synchronously.\nfor i := 0; ; i++ {\n// Send link request, then wait for the timeout limit and check\n// whether the request succeeded.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/forwarder.go",
"new_path": "pkg/tcpip/transport/tcp/forwarder.go",
"diff": "@@ -90,7 +90,7 @@ func (f *Forwarder) HandlePacket(r *stack.Route, id stack.TransportEndpointID, v\n// Launch a new goroutine to handle the request.\nf.inFlight[id] = struct{}{}\ns.incRef()\n- go f.handler(&ForwarderRequest{ // S/R-FIXME\n+ go f.handler(&ForwarderRequest{ // S/R-SAFE: not used by Sentry.\nforwarder: f,\nsegment: s,\nsynOptions: opts,\n"
}
] | Go | Apache License 2.0 | google/gvisor | netstack: update goroutine save / restore safety comments.
PiperOrigin-RevId: 204930314
Change-Id: Ifc4c41ed28616cd57fafbf7c92e87141a945c41f |
260,008 | 17.07.2018 10:50:02 | 25,200 | ed2e03d3780b8d96b189c1311c92b9db2fbcb35a | Add API to decode 'stat.st_rdev' into major and minor numbers. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/dev.go",
"new_path": "pkg/abi/linux/dev.go",
"diff": "@@ -25,6 +25,13 @@ func MakeDeviceID(major uint16, minor uint32) uint32 {\nreturn (minor & 0xff) | ((uint32(major) & 0xfff) << 8) | ((minor >> 8) << 20)\n}\n+// DecodeDeviceID decodes a device ID into major and minor device numbers.\n+func DecodeDeviceID(rdev uint32) (uint16, uint32) {\n+ major := uint16((rdev >> 8) & 0xfff)\n+ minor := (rdev & 0xff) | ((rdev >> 20) << 8)\n+ return major, minor\n+}\n+\n// Character device IDs.\n//\n// See Documentations/devices.txt and uapi/linux/major.h.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add API to decode 'stat.st_rdev' into major and minor numbers.
PiperOrigin-RevId: 204936533
Change-Id: Ib060920077fc914f97c4a0548a176d1368510c7b |
259,948 | 19.07.2018 09:36:34 | 25,200 | a95640b1e9fb8c3751c54c80f6c04f0dff233aed | sentry: save stack in proc net dev. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/net.go",
"new_path": "pkg/sentry/fs/proc/net.go",
"diff": "@@ -109,7 +109,7 @@ func (n *ifinet6) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]s\n// netDev implements seqfile.SeqSource for /proc/net/dev.\ntype netDev struct {\n- s inet.Stack `state:\"nosave\"` // S/R-FIXME\n+ s inet.Stack\n}\n// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: save stack in proc net dev.
PiperOrigin-RevId: 205253858
Change-Id: Iccdc493b66d1b4d39de44afb1184952183b1283f |
260,013 | 19.07.2018 12:41:00 | 25,200 | df5a5d388e1fc3349ee70c3476fdffb195fbce9c | Add AT_UID, AT_EUID, AT_GID, AT_EGID to aux vector.
With musl libc when these entries are missing from the aux vector
it's forcing libc.secure (effectively AT_SECURE). This mode prevents
RPATH and LD_LIBRARY_PATH from working.
As the first entry is a mask of all the aux fields set: | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/BUILD",
"new_path": "pkg/sentry/loader/BUILD",
"diff": "@@ -46,6 +46,7 @@ go_library(\n\"//pkg/sentry/fs\",\n\"//pkg/sentry/fs/anon\",\n\"//pkg/sentry/fs/fsutil\",\n+ \"//pkg/sentry/kernel/auth\",\n\"//pkg/sentry/limits\",\n\"//pkg/sentry/memmap\",\n\"//pkg/sentry/mm\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/loader.go",
"new_path": "pkg/sentry/loader/loader.go",
"diff": "@@ -27,6 +27,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/mm\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n@@ -247,8 +248,14 @@ func Load(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, r\nreturn 0, nil, \"\", err\n}\n- // Add generic auxv entries\n+ c := auth.CredentialsFromContext(ctx)\n+\n+ // Add generic auxv entries.\nauxv := append(loaded.auxv, arch.Auxv{\n+ arch.AuxEntry{linux.AT_UID, usermem.Addr(c.RealKUID.In(c.UserNamespace).OrOverflow())},\n+ arch.AuxEntry{linux.AT_EUID, usermem.Addr(c.EffectiveKUID.In(c.UserNamespace).OrOverflow())},\n+ arch.AuxEntry{linux.AT_GID, usermem.Addr(c.RealKGID.In(c.UserNamespace).OrOverflow())},\n+ arch.AuxEntry{linux.AT_EGID, usermem.Addr(c.EffectiveKGID.In(c.UserNamespace).OrOverflow())},\narch.AuxEntry{linux.AT_CLKTCK, linux.CLOCKS_PER_SEC},\narch.AuxEntry{linux.AT_EXECFN, execfn},\narch.AuxEntry{linux.AT_RANDOM, random},\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add AT_UID, AT_EUID, AT_GID, AT_EGID to aux vector.
With musl libc when these entries are missing from the aux vector
it's forcing libc.secure (effectively AT_SECURE). This mode prevents
RPATH and LD_LIBRARY_PATH from working.
https://git.musl-libc.org/cgit/musl/tree/ldso/dynlink.c#n1488
As the first entry is a mask of all the aux fields set:
https://git.musl-libc.org/cgit/musl/tree/ldso/dynlink.c#n187
PiperOrigin-RevId: 205284684
Change-Id: I04de7bab241043306b4f732306a81d74edfdff26 |
259,858 | 19.07.2018 15:48:08 | 25,200 | 8b8aad91d581ee5f600f5ec0b7fb407b36d07db1 | kernel: mutations on creds now require a copy. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/fasync/fasync.go",
"new_path": "pkg/sentry/kernel/fasync/fasync.go",
"diff": "@@ -35,7 +35,7 @@ func New() fs.FileAsync {\ntype FileAsync struct {\nmu sync.Mutex\ne waiter.Entry\n- requester auth.Credentials\n+ requester *auth.Credentials\n// Only one of the following is allowed to be non-nil.\nrecipientPG *kernel.ProcessGroup\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task.go",
"new_path": "pkg/sentry/kernel/task.go",
"diff": "@@ -334,7 +334,9 @@ type Task struct {\n// creds is the task's credentials.\n//\n- // creds is protected by mu.\n+ // creds is protected by mu, however the value itself is immutable and\n+ // can only be changed by a copy. After reading the pointer, access\n+ // will proceed outside the scope of mu.\ncreds *auth.Credentials\n// utsns is the task's UTS namespace.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_identity.go",
"new_path": "pkg/sentry/kernel/task_identity.go",
"diff": "@@ -20,11 +20,13 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n)\n-// Credentials returns t's credentials by value.\n-func (t *Task) Credentials() auth.Credentials {\n+// Credentials returns t's credentials.\n+//\n+// This value must be considered immutable.\n+func (t *Task) Credentials() *auth.Credentials {\nt.mu.Lock()\ndefer t.mu.Unlock()\n- return *t.creds // Copy out with lock held.\n+ return t.creds\n}\n// UserNamespace returns the user namespace associated with the task.\n@@ -162,6 +164,7 @@ func (t *Task) SetRESUID(r, e, s auth.UID) error {\nfunc (t *Task) setKUIDsUncheckedLocked(newR, newE, newS auth.KUID) {\nroot := t.creds.UserNamespace.MapToKUID(auth.RootUID)\noldR, oldE, oldS := t.creds.RealKUID, t.creds.EffectiveKUID, t.creds.SavedKUID\n+ t.creds = t.creds.Fork() // See doc for creds.\nt.creds.RealKUID, t.creds.EffectiveKUID, t.creds.SavedKUID = newR, newE, newS\n// \"1. If one or more of the real, effective or saved set user IDs was\n@@ -297,6 +300,7 @@ func (t *Task) SetRESGID(r, e, s auth.GID) error {\nfunc (t *Task) setKGIDsUncheckedLocked(newR, newE, newS auth.KGID) {\noldE := t.creds.EffectiveKGID\n+ t.creds = t.creds.Fork() // See doc for creds.\nt.creds.RealKGID, t.creds.EffectiveKGID, t.creds.SavedKGID = newR, newE, newS\n// Not documented, but compare Linux's kernel/cred.c:commit_creds().\n@@ -321,6 +325,7 @@ func (t *Task) SetExtraGIDs(gids []auth.GID) error {\n}\nkgids[i] = kgid\n}\n+ t.creds = t.creds.Fork() // See doc for creds.\nt.creds.ExtraKGIDs = kgids\nreturn nil\n}\n@@ -352,6 +357,7 @@ func (t *Task) SetCapabilitySets(permitted, inheritable, effective auth.Capabili\nif inheritable & ^(t.creds.InheritableCaps|t.creds.BoundingCaps) != 0 {\nreturn syserror.EPERM\n}\n+ t.creds = t.creds.Fork() // See doc for creds.\nt.creds.PermittedCaps = permitted\nt.creds.InheritableCaps = inheritable\nt.creds.EffectiveCaps = effective\n@@ -384,6 +390,7 @@ func (t *Task) SetUserNamespace(ns *auth.UserNamespace) error {\nreturn syserror.EPERM\n}\n+ t.creds = t.creds.Fork() // See doc for creds.\nt.creds.UserNamespace = ns\n// \"The child process created by clone(2) with the CLONE_NEWUSER flag\n// starts out with a complete set of capabilities in the new user\n@@ -407,6 +414,7 @@ func (t *Task) SetUserNamespace(ns *auth.UserNamespace) error {\nfunc (t *Task) SetKeepCaps(k bool) {\nt.mu.Lock()\ndefer t.mu.Unlock()\n+ t.creds = t.creds.Fork() // See doc for creds.\nt.creds.KeepCaps = k\n}\n@@ -491,6 +499,8 @@ func (t *Task) updateCredsForExecLocked() {\n}\n}\n+ t.creds = t.creds.Fork() // See doc for creds.\n+\n// Now we enter poorly-documented, somewhat confusing territory. (The\n// accompanying comment in Linux's security/commoncap.c:cap_bprm_set_creds\n// is not very helpful.) My reading of it is:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_file.go",
"new_path": "pkg/sentry/syscalls/linux/sys_file.go",
"diff": "@@ -415,14 +415,14 @@ func Creat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n// accessContext should only be used for access(2).\ntype accessContext struct {\ncontext.Context\n- creds auth.Credentials\n+ creds *auth.Credentials\n}\n// Value implements context.Context.\nfunc (ac accessContext) Value(key interface{}) interface{} {\nswitch key {\ncase auth.CtxCredentials:\n- return &ac.creds\n+ return ac.creds\ndefault:\nreturn ac.Context.Value(key)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | kernel: mutations on creds now require a copy.
PiperOrigin-RevId: 205315612
Change-Id: I9a0a1e32c8abfb7467a38743b82449cc92830316 |
259,881 | 20.07.2018 12:58:59 | 25,200 | 5f134b3c0a08c0e170aa50ad3342df59832b4356 | Format getcwd path | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/linux64.go",
"new_path": "pkg/sentry/strace/linux64.go",
"diff": "@@ -96,7 +96,7 @@ var linuxAMD64 = SyscallMap{\n76: makeSyscallInfo(\"truncate\", Path, Hex),\n77: makeSyscallInfo(\"ftruncate\", Hex, Hex),\n78: makeSyscallInfo(\"getdents\", Hex, Hex, Hex),\n- 79: makeSyscallInfo(\"getcwd\", Hex, Hex),\n+ 79: makeSyscallInfo(\"getcwd\", PostPath, Hex),\n80: makeSyscallInfo(\"chdir\", Path),\n81: makeSyscallInfo(\"fchdir\", Hex),\n82: makeSyscallInfo(\"rename\", Path, Path),\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/strace.go",
"new_path": "pkg/sentry/strace/strace.go",
"diff": "@@ -346,6 +346,8 @@ func (i *SyscallInfo) post(t *kernel.Task, args arch.SyscallArguments, rval uint\noutput[arg] = msghdr(t, args[arg].Pointer(), false /* content */, uint64(maximumBlobSize))\ncase RecvMsgHdr:\noutput[arg] = msghdr(t, args[arg].Pointer(), true /* content */, uint64(maximumBlobSize))\n+ case PostPath:\n+ output[arg] = path(t, args[arg].Pointer())\ncase PipeFDs:\noutput[arg] = fdpair(t, args[arg].Pointer())\ncase Uname:\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/syscalls.go",
"new_path": "pkg/sentry/strace/syscalls.go",
"diff": "@@ -75,6 +75,10 @@ const (\n// Path is a pointer to a char* path.\nPath\n+ // PostPath is a pointer to a char* path, formatted after syscall\n+ // execution.\n+ PostPath\n+\n// ExecveStringVector is a NULL-terminated array of strings. Enforces\n// the maximum execve array length.\nExecveStringVector\n"
}
] | Go | Apache License 2.0 | google/gvisor | Format getcwd path
PiperOrigin-RevId: 205440332
Change-Id: I2a838f363e079164c83da88e1b0b8769844fe79b |
259,991 | 20.07.2018 16:17:00 | 25,200 | f543ada15005e6e2d31a63148a74fbdc43d070de | Removed a now incorrect reference to restoreFile. | [
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -54,8 +54,6 @@ type Sandbox struct {\n}\n// Create creates the sandbox process.\n-//\n-// If restoreFile is not empty, the sandbox will be restored from file.\nfunc Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket string) (*Sandbox, error) {\ns := &Sandbox{ID: id}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Removed a now incorrect reference to restoreFile.
PiperOrigin-RevId: 205470108
Change-Id: I226878a887fe1133561005357a9e3b09428b06b6 |
259,992 | 23.07.2018 13:30:29 | 25,200 | d7a34790a0cc3cfdef9d9e54f17c4bc0a6819900 | Add KVM and overlay dimensions to container_test | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/kvm.go",
"new_path": "pkg/sentry/platform/kvm/kvm.go",
"diff": "@@ -66,7 +66,7 @@ func New() (*KVM, error) {\nring0.Init(cpuid.HostFeatureSet())\n})\nif globalErr != nil {\n- return nil, err\n+ return nil, globalErr\n}\n// Create a new VM fd.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/BUILD",
"new_path": "runsc/container/BUILD",
"diff": "@@ -26,17 +26,21 @@ go_library(\ngo_test(\nname = \"container_test\",\n- size = \"small\",\n+ size = \"medium\",\nsrcs = [\"container_test.go\"],\ndata = [\n\"//runsc\",\n],\n+ tags = [\n+ \"requires-kvm\",\n+ ],\ndeps = [\n\"//pkg/abi/linux\",\n\"//pkg/log\",\n\"//pkg/sentry/control\",\n\"//pkg/sentry/kernel/auth\",\n\"//pkg/unet\",\n+ \"//runsc/boot\",\n\"//runsc/container\",\n\"//runsc/specutils\",\n\"//runsc/test/testutil\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -36,6 +36,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/control\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.googlesource.com/gvisor/pkg/unet\"\n+ \"gvisor.googlesource.com/gvisor/runsc/boot\"\n\"gvisor.googlesource.com/gvisor/runsc/container\"\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n\"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n@@ -159,8 +160,8 @@ func readOutputNum(f *os.File, first bool) (int, error) {\n// run starts the sandbox and waits for it to exit, checking that the\n// application succeeded.\n-func run(spec *specs.Spec) error {\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+func run(spec *specs.Spec, conf *boot.Config) error {\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nreturn fmt.Errorf(\"error setting up container: %v\", err)\n}\n@@ -186,15 +187,45 @@ func run(spec *specs.Spec) error {\nreturn nil\n}\n+type configOptions int\n+\n+const (\n+ overlay configOptions = 1 << iota\n+ kvm\n+)\n+const all = overlay | kvm\n+\n+// configs generates different configurations to run tests.\n+func configs(opts configOptions) []*boot.Config {\n+ cs := []*boot.Config{testutil.TestConfig()}\n+\n+ if opts&overlay != 0 {\n+ c := testutil.TestConfig()\n+ c.Overlay = true\n+ cs = append(cs, c)\n+ }\n+\n+ // TODO: KVM doesn't work with --race.\n+ if !testutil.RaceEnabled && opts&kvm != 0 {\n+ c := testutil.TestConfig()\n+ c.Platform = boot.PlatformKVM\n+ cs = append(cs, c)\n+ }\n+\n+ return cs\n+}\n+\n// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle.\n// It verifies after each step that the container can be loaded from disk, and\n// has the correct status.\nfunc TestLifecycle(t *testing.T) {\n+ for _, conf := range configs(all) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n// The container will just sleep for a long time. We will kill it before\n// it finishes sleeping.\nspec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -318,9 +349,12 @@ func TestLifecycle(t *testing.T) {\nt.Errorf(\"expected loading destroyed container to fail, but it did not\")\n}\n}\n+}\n// Test the we can execute the application with different path formats.\nfunc TestExePath(t *testing.T) {\n+ for _, conf := range configs(overlay) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\nfor _, test := range []struct {\npath string\nsuccess bool\n@@ -333,7 +367,7 @@ func TestExePath(t *testing.T) {\n{path: \"/bin/thisfiledoesntexit\", success: false},\n} {\nspec := testutil.NewSpecWithArgs(test.path)\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"exec: %s, error setting up container: %v\", test.path, err)\n}\n@@ -357,13 +391,14 @@ func TestExePath(t *testing.T) {\n}\n}\n}\n+}\n// Test the we can retrieve the application exit status from the container.\nfunc TestAppExitStatus(t *testing.T) {\n// First container will succeed.\nsuccSpec := testutil.NewSpecWithArgs(\"true\")\n-\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(succSpec)\n+ conf := testutil.TestConfig()\n+ rootDir, bundleDir, err := testutil.SetupContainer(succSpec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -382,7 +417,7 @@ func TestAppExitStatus(t *testing.T) {\nwantStatus := 123\nerrSpec := testutil.NewSpecWithArgs(\"bash\", \"-c\", fmt.Sprintf(\"exit %d\", wantStatus))\n- rootDir2, bundleDir2, conf, err := testutil.SetupContainer(errSpec)\n+ rootDir2, bundleDir2, err := testutil.SetupContainer(errSpec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -400,10 +435,13 @@ func TestAppExitStatus(t *testing.T) {\n// TestExec verifies that a container can exec a new program.\nfunc TestExec(t *testing.T) {\n+ for _, conf := range configs(overlay) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\nconst uid = 343\nspec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -479,6 +517,7 @@ func TestExec(t *testing.T) {\n}\n}\n}\n+}\n// TestCheckpointRestore creates a container that continuously writes successive integers\n// to a file. To test checkpoint and restore functionality, the container is\n@@ -486,27 +525,34 @@ func TestExec(t *testing.T) {\n// new containers and the first number printed from these containers is checked. Both should\n// be the next consecutive number after the last number from the checkpointed container.\nfunc TestCheckpointRestore(t *testing.T) {\n- outputPath := filepath.Join(os.TempDir(), \"output\")\n- // Make sure it does not already exist.\n- os.Remove(outputPath)\n+ // Skip overlay because test requires writing to host file.\n+ for _, conf := range configs(kvm) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+ dir, err := ioutil.TempDir(\"\", \"checkpoint-test\")\n+ if err != nil {\n+ t.Fatalf(\"ioutil.TempDir failed: %v\", err)\n+ }\n+ if err := os.Chmod(dir, 0777); err != nil {\n+ t.Fatalf(\"error chmoding file: %q, %v\", dir, err)\n+ }\n+\n+ outputPath := filepath.Join(dir, \"output\")\noutputFile, err := createWriteableOutputFile(outputPath)\nif err != nil {\nt.Fatalf(\"error creating output file: %v\", err)\n}\ndefer outputFile.Close()\n- outputFileSandbox := strings.Replace(outputPath, os.TempDir(), \"/tmp2\", -1)\n-\n- script := fmt.Sprintf(\"for ((i=0; ;i++)); do echo $i >> %s; sleep 1; done\", outputFileSandbox)\n+ script := \"for ((i=0; ;i++)); do echo $i >> /tmp2/output; sleep 1; done\"\nspec := testutil.NewSpecWithArgs(\"bash\", \"-c\", script)\nspec.Mounts = append(spec.Mounts, specs.Mount{\nType: \"bind\",\nDestination: \"/tmp2\",\n- Source: os.TempDir(),\n+ Source: dir,\n})\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -524,7 +570,7 @@ func TestCheckpointRestore(t *testing.T) {\n}\n// Set the image path, which is where the checkpoint image will be saved.\n- imagePath := filepath.Join(os.TempDir(), \"test-image-file\")\n+ imagePath := filepath.Join(dir, \"test-image-file\")\n// Create the image file and open for writing.\nfile, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644)\n@@ -608,7 +654,7 @@ func TestCheckpointRestore(t *testing.T) {\nif lastNum+1 != firstNum2 {\nt.Errorf(\"error numbers not in order, previous: %d, next: %d\", lastNum, firstNum2)\n}\n-\n+ }\n}\n// TestPauseResume tests that we can successfully pause and resume a container.\n@@ -617,10 +663,12 @@ func TestCheckpointRestore(t *testing.T) {\n// It will then unpause and confirm that both processes are running. Then it will\n// wait until one sleep completes and check to make sure the other is running.\nfunc TestPauseResume(t *testing.T) {\n+ for _, conf := range configs(all) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\nconst uid = 343\nspec := testutil.NewSpecWithArgs(\"sleep\", \"20\")\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -715,14 +763,15 @@ func TestPauseResume(t *testing.T) {\nt.Fatal(err)\n}\n}\n+}\n// TestPauseResumeStatus makes sure that the statuses are set correctly\n// with calls to pause and resume and that pausing and resuming only\n// occurs given the correct state.\nfunc TestPauseResumeStatus(t *testing.T) {\nspec := testutil.NewSpecWithArgs(\"sleep\", \"20\")\n-\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ conf := testutil.TestConfig()\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -780,6 +829,10 @@ func TestPauseResumeStatus(t *testing.T) {\nfunc TestCapabilities(t *testing.T) {\nconst uid = 343\nconst gid = 2401\n+\n+ for _, conf := range configs(all) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\nspec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n// We generate files in the host temporary directory.\n@@ -789,7 +842,7 @@ func TestCapabilities(t *testing.T) {\nType: \"bind\",\n})\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -863,11 +916,14 @@ func TestCapabilities(t *testing.T) {\nt.Fatalf(\"container failed to exec %v: %v\", execArgs, err)\n}\n}\n+}\n// Test that an tty FD is sent over the console socket if one is provided.\nfunc TestConsoleSocket(t *testing.T) {\n+ for _, conf := range configs(all) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\nspec := testutil.NewSpecWithArgs(\"true\")\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -942,10 +998,14 @@ func TestConsoleSocket(t *testing.T) {\nt.Fatalf(\"error destroying container: %v\", err)\n}\n}\n+}\n// TestRunNonRoot checks that sandbox can be configured when running as\n// non-privileged user.\nfunc TestRunNonRoot(t *testing.T) {\n+ for _, conf := range configs(kvm) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\nspec := testutil.NewSpecWithArgs(\"/bin/true\")\nspec.Process.User.UID = 343\nspec.Process.User.GID = 2401\n@@ -971,14 +1031,18 @@ func TestRunNonRoot(t *testing.T) {\nType: \"bind\",\n})\n- if err := run(spec); err != nil {\n+ if err := run(spec, conf); err != nil {\nt.Fatalf(\"error running sadbox: %v\", err)\n}\n}\n+}\n// TestMountNewDir checks that runsc will create destination directory if it\n// doesn't exit.\nfunc TestMountNewDir(t *testing.T) {\n+ for _, conf := range configs(overlay) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\nsrcDir := path.Join(os.TempDir(), \"src\", \"newdir\", \"anotherdir\")\nif err := os.MkdirAll(srcDir, 0755); err != nil {\nt.Fatalf(\"os.MkDir(%q) failed: %v\", srcDir, err)\n@@ -998,15 +1062,19 @@ func TestMountNewDir(t *testing.T) {\nType: \"bind\",\n})\n- if err := run(spec); err != nil {\n+ if err := run(spec, conf); err != nil {\nt.Fatalf(\"error running sadbox: %v\", err)\n}\n}\n+}\nfunc TestReadonlyRoot(t *testing.T) {\n+ for _, conf := range configs(overlay) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\nspec := testutil.NewSpecWithArgs(\"/bin/touch\", \"/foo\")\nspec.Root.Readonly = true\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -1032,8 +1100,12 @@ func TestReadonlyRoot(t *testing.T) {\nt.Fatalf(\"container failed, waitStatus: %v\", ws)\n}\n}\n+}\nfunc TestReadonlyMount(t *testing.T) {\n+ for _, conf := range configs(overlay) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\nspec := testutil.NewSpecWithArgs(\"/bin/touch\", \"/foo/file\")\ndir, err := ioutil.TempDir(\"\", \"ro-mount\")\nif err != nil {\n@@ -1047,7 +1119,7 @@ func TestReadonlyMount(t *testing.T) {\n})\nspec.Root.Readonly = false\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -1073,6 +1145,7 @@ func TestReadonlyMount(t *testing.T) {\nt.Fatalf(\"container failed, waitStatus: %v\", ws)\n}\n}\n+}\n// TestAbbreviatedIDs checks that runsc supports using abbreviated container\n// IDs in place of full IDs.\n@@ -1089,7 +1162,8 @@ func TestAbbreviatedIDs(t *testing.T) {\n}\nfor _, cid := range cids {\nspec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n- bundleDir, conf, err := testutil.SetupContainerInRoot(rootDir, spec)\n+ conf := testutil.TestConfig()\n+ bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -1134,6 +1208,9 @@ func TestAbbreviatedIDs(t *testing.T) {\n// TestMultiContainerSanity checks that it is possible to run 2 dead-simple\n// containers in the same sandbox.\nfunc TestMultiContainerSanity(t *testing.T) {\n+ for _, conf := range configs(all) {\n+ t.Logf(\"Running test with conf: %+v\", conf)\n+\ncontainerIDs := []string{\ntestutil.UniqueContainerID(),\ntestutil.UniqueContainerID(),\n@@ -1162,7 +1239,7 @@ func TestMultiContainerSanity(t *testing.T) {\nfor i, annotations := range containerAnnotations {\nspec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\nspec.Annotations = annotations\n- bundleDir, conf, err := testutil.SetupContainerInRoot(rootDir, spec)\n+ bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n@@ -1200,6 +1277,7 @@ func TestMultiContainerSanity(t *testing.T) {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n}\n}\n+}\nfunc TestMultiContainerWait(t *testing.T) {\ncontainerIDs := []string{\n@@ -1238,7 +1316,8 @@ func TestMultiContainerWait(t *testing.T) {\nfor i, annotations := range containerAnnotations {\nspec := testutil.NewSpecWithArgs(args[i][0], args[i][1])\nspec.Annotations = annotations\n- bundleDir, conf, err := testutil.SetupContainerInRoot(rootDir, spec)\n+ conf := testutil.TestConfig()\n+ bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox_test.go",
"new_path": "runsc/sandbox/sandbox_test.go",
"diff": "@@ -31,7 +31,8 @@ func init() {\nfunc TestGoferExits(t *testing.T) {\nspec := testutil.NewSpecWithArgs(\"/bin/sleep\", \"10000\")\n- rootDir, bundleDir, conf, err := testutil.SetupContainer(spec)\n+ conf := testutil.TestConfig()\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/BUILD",
"new_path": "runsc/test/testutil/BUILD",
"diff": "@@ -7,6 +7,7 @@ go_library(\nsrcs = [\n\"docker.go\",\n\"testutil.go\",\n+ \"testutil_race.go\",\n],\nimportpath = \"gvisor.googlesource.com/gvisor/runsc/test/testutil\",\nvisibility = [\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -29,6 +29,9 @@ import (\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n)\n+// RaceEnabled is set to true if it was built with '--race' option.\n+var RaceEnabled = false\n+\n// ConfigureExePath configures the executable for runsc in the test environment.\nfunc ConfigureExePath() error {\n@@ -66,6 +69,18 @@ func ConfigureExePath() error {\nreturn nil\n}\n+// TestConfig return the default configuration to use in tests.\n+func TestConfig() *boot.Config {\n+ return &boot.Config{\n+ Debug: true,\n+ LogFormat: \"text\",\n+ LogPackets: true,\n+ Network: boot.NetworkNone,\n+ Strace: true,\n+ MultiContainer: true,\n+ }\n+}\n+\n// NewSpecWithArgs creates a simple spec with the given args suitable for use\n// in tests.\nfunc NewSpecWithArgs(args ...string) *specs.Spec {\n@@ -96,38 +111,29 @@ func SetupRootDir() (string, error) {\n// SetupContainer creates a bundle and root dir for the container, generates a\n// test config, and writes the spec to config.json in the bundle dir.\n-func SetupContainer(spec *specs.Spec) (rootDir, bundleDir string, conf *boot.Config, err error) {\n+func SetupContainer(spec *specs.Spec, conf *boot.Config) (rootDir, bundleDir string, err error) {\nrootDir, err = SetupRootDir()\nif err != nil {\n- return \"\", \"\", nil, err\n+ return \"\", \"\", err\n}\n- bundleDir, conf, err = SetupContainerInRoot(rootDir, spec)\n- return rootDir, bundleDir, conf, err\n+ bundleDir, err = SetupContainerInRoot(rootDir, spec, conf)\n+ return rootDir, bundleDir, err\n}\n// SetupContainerInRoot creates a bundle for the container, generates a test\n// config, and writes the spec to config.json in the bundle dir.\n-func SetupContainerInRoot(rootDir string, spec *specs.Spec) (bundleDir string, conf *boot.Config, err error) {\n+func SetupContainerInRoot(rootDir string, spec *specs.Spec, conf *boot.Config) (bundleDir string, err error) {\nbundleDir, err = ioutil.TempDir(\"\", \"bundle\")\nif err != nil {\n- return \"\", nil, fmt.Errorf(\"error creating bundle dir: %v\", err)\n+ return \"\", fmt.Errorf(\"error creating bundle dir: %v\", err)\n}\nif err = writeSpec(bundleDir, spec); err != nil {\n- return \"\", nil, fmt.Errorf(\"error writing spec: %v\", err)\n- }\n-\n- conf = &boot.Config{\n- Debug: true,\n- LogFormat: \"text\",\n- LogPackets: true,\n- Network: boot.NetworkNone,\n- RootDir: rootDir,\n- Strace: true,\n- MultiContainer: true,\n+ return \"\", fmt.Errorf(\"error writing spec: %v\", err)\n}\n- return bundleDir, conf, nil\n+ conf.RootDir = rootDir\n+ return bundleDir, nil\n}\n// writeSpec writes the spec to disk in the given directory.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/testutil/testutil_race.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// +build race\n+\n+package testutil\n+\n+func init() {\n+ RaceEnabled = true\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add KVM and overlay dimensions to container_test
PiperOrigin-RevId: 205714667
Change-Id: I317a2ca98ac3bdad97c4790fcc61b004757d99ef |
259,991 | 23.07.2018 13:54:33 | 25,200 | b5113574feb79b2266d603aa760a9df468725d87 | Created a docker integration test for a tomcat image. | [
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/BUILD",
"new_path": "runsc/test/image/BUILD",
"diff": "@@ -8,6 +8,7 @@ go_test(\nsrcs = [\n\"image_test.go\",\n\"python_test.go\",\n+ \"tomcat_test.go\",\n],\ndata = [\n\"latin10k.txt\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "runsc/test/image/tomcat_test.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package image\n+\n+import (\n+ \"fmt\"\n+ \"net/http\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n+)\n+\n+func TestTomcat(t *testing.T) {\n+ d := testutil.MakeDocker(\"tomcat-test\")\n+ if out, err := d.Run(\"-p\", \"8080\", \"tomcat:8.0\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ }\n+ defer d.CleanUp()\n+\n+ // Find where port 8080 is mapped to.\n+ port, err := d.FindPort(8080)\n+ if err != nil {\n+ t.Fatalf(\"docker.FindPort(8080) failed: %v\", err)\n+ }\n+\n+ // Wait until it's up and running.\n+ if err := d.WaitForHTTP(port, 10*time.Second); err != nil {\n+ t.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n+ }\n+\n+ // Ensure that content is being served.\n+ url := fmt.Sprintf(\"http://localhost:%d\", port)\n+ resp, err := http.Get(url)\n+ if err != nil {\n+ t.Errorf(\"Error reaching http server: %v\", err)\n+ }\n+ if want := http.StatusOK; resp.StatusCode != want {\n+ t.Errorf(\"Wrong response code, got: %d, want: %d\", resp.StatusCode, want)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Created a docker integration test for a tomcat image.
PiperOrigin-RevId: 205718733
Change-Id: I200b23af064d256f157baf9da5005ab16cc55928 |
259,962 | 23.07.2018 15:14:19 | 25,200 | da48c04d0df4bb044624cc3e7003ab3e973336de | Refactor new reno congestion control logic out of sender.
This CL also puts the congestion control logic behind an
interface so that we can easily swap it out for say CUBIC
in the future. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/BUILD",
"new_path": "pkg/tcpip/transport/tcp/BUILD",
"diff": "@@ -10,6 +10,7 @@ go_stateify(\n\"endpoint.go\",\n\"endpoint_state.go\",\n\"rcv.go\",\n+ \"reno.go\",\n\"segment.go\",\n\"segment_heap.go\",\n\"segment_queue.go\",\n@@ -44,6 +45,7 @@ go_library(\n\"forwarder.go\",\n\"protocol.go\",\n\"rcv.go\",\n+ \"reno.go\",\n\"sack.go\",\n\"segment.go\",\n\"segment_heap.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/transport/tcp/reno.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp\n+\n+// renoState stores the variables related to TCP New Reno congestion\n+// control algorithm.\n+type renoState struct {\n+ s *sender\n+}\n+\n+// newRenoCC initializes the state for the NewReno congestion control algorithm.\n+func newRenoCC(s *sender) *renoState {\n+ return &renoState{s: s}\n+}\n+\n+// updateSlowStart will update the congestion window as per the slow-start\n+// algorithm used by NewReno. If after adjusting the congestion window\n+// we cross the SSthreshold then it will return the number of packets that\n+// must be consumed in congestion avoidance mode.\n+func (r *renoState) updateSlowStart(packetsAcked int) int {\n+ // Don't let the congestion window cross into the congestion\n+ // avoidance range.\n+ newcwnd := r.s.sndCwnd + packetsAcked\n+ if newcwnd >= r.s.sndSsthresh {\n+ newcwnd = r.s.sndSsthresh\n+ r.s.sndCAAckCount = 0\n+ }\n+\n+ packetsAcked -= newcwnd - r.s.sndCwnd\n+ r.s.sndCwnd = newcwnd\n+ return packetsAcked\n+}\n+\n+// updateCongestionAvoidance will update congestion window in congestion\n+// avoidance mode as described in RFC5681 section 3.1\n+func (r *renoState) updateCongestionAvoidance(packetsAcked int) {\n+ // Consume the packets in congestion avoidance mode.\n+ r.s.sndCAAckCount += packetsAcked\n+ if r.s.sndCAAckCount >= r.s.sndCwnd {\n+ r.s.sndCwnd += r.s.sndCAAckCount / r.s.sndCwnd\n+ r.s.sndCAAckCount = r.s.sndCAAckCount % r.s.sndCwnd\n+ }\n+}\n+\n+// reduceSlowStartThreshold reduces the slow-start threshold per RFC 5681,\n+// page 6, eq. 4. It is called when we detect congestion in the network.\n+func (r *renoState) reduceSlowStartThreshold() {\n+ r.s.sndSsthresh = r.s.outstanding / 2\n+ if r.s.sndSsthresh < 2 {\n+ r.s.sndSsthresh = 2\n+ }\n+\n+}\n+\n+// Update updates the congestion state based on the number of packets that\n+// were acknowledged.\n+// Update implements congestionControl.Update.\n+func (r *renoState) Update(packetsAcked int) {\n+ if r.s.sndCwnd < r.s.sndSsthresh {\n+ packetsAcked = r.updateSlowStart(packetsAcked)\n+ if packetsAcked == 0 {\n+ return\n+ }\n+ }\n+ r.updateCongestionAvoidance(packetsAcked)\n+}\n+\n+// HandleNDupAcks implements congestionControl.HandleNDupAcks.\n+func (r *renoState) HandleNDupAcks() {\n+ // A retransmit was triggered due to nDupAckThreshold\n+ // being hit. Reduce our slow start threshold.\n+ r.reduceSlowStartThreshold()\n+}\n+\n+// HandleRTOExpired implements congestionControl.HandleRTOExpired.\n+func (r *renoState) HandleRTOExpired() {\n+ // We lost a packet, so reduce ssthresh.\n+ r.reduceSlowStartThreshold()\n+\n+ // Reduce the congestion window to 1, i.e., enter slow-start. Per\n+ // RFC 5681, page 7, we must use 1 regardless of the value of the\n+ // initial congestion window.\n+ r.s.sndCwnd = 1\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/snd.go",
"new_path": "pkg/tcpip/transport/tcp/snd.go",
"diff": "@@ -31,8 +31,28 @@ const (\n// InitialCwnd is the initial congestion window.\nInitialCwnd = 10\n+\n+ // nDupAckThreshold is the number of duplicate ACK's required\n+ // before fast-retransmit is entered.\n+ nDupAckThreshold = 3\n)\n+// congestionControl is an interface that must be implemented by any supported\n+// congestion control algorithm.\n+type congestionControl interface {\n+ // HandleNDupAcks is invoked when sender.dupAckCount >= nDupAckThreshold\n+ // just before entering fast retransmit.\n+ HandleNDupAcks()\n+\n+ // HandleRTOExpired is invoked when the retransmit timer expires.\n+ HandleRTOExpired()\n+\n+ // Update is invoked when processing inbound acks. It's passed the\n+ // number of packet's that were acked by the most recent cumulative\n+ // acknowledgement.\n+ Update(packetsAcked int)\n+}\n+\n// sender holds the state necessary to send TCP segments.\ntype sender struct {\nep *endpoint\n@@ -107,6 +127,9 @@ type sender struct {\n// maxSentAck is the maxium acknowledgement actually sent.\nmaxSentAck seqnum.Value\n+\n+ // cc is the congestion control algorithm in use for this sender.\n+ cc congestionControl\n}\n// fastRecovery holds information related to fast recovery from a packet loss.\n@@ -147,6 +170,8 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint\n},\n}\n+ s.cc = newRenoCC(s)\n+\n// A negative sndWndScale means that no scaling is in use, otherwise we\n// store the scaling value.\nif sndWndScale > 0 {\n@@ -251,15 +276,6 @@ func (s *sender) resendSegment() {\n}\n}\n-// reduceSlowStartThreshold reduces the slow-start threshold per RFC 5681,\n-// page 6, eq. 4. It is called when we detect congestion in the network.\n-func (s *sender) reduceSlowStartThreshold() {\n- s.sndSsthresh = s.outstanding / 2\n- if s.sndSsthresh < 2 {\n- s.sndSsthresh = 2\n- }\n-}\n-\n// retransmitTimerExpired is called when the retransmit timer expires, and\n// unacknowledged segments are assumed lost, and thus need to be resent.\n// Returns true if the connection is still usable, or false if the connection\n@@ -292,13 +308,7 @@ func (s *sender) retransmitTimerExpired() bool {\n// we were not in fast recovery.\ns.fr.last = s.sndNxt - 1\n- // We lost a packet, so reduce ssthresh.\n- s.reduceSlowStartThreshold()\n-\n- // Reduce the congestion window to 1, i.e., enter slow-start. Per\n- // RFC 5681, page 7, we must use 1 regardless of the value of the\n- // initial congestion window.\n- s.sndCwnd = 1\n+ s.cc.HandleRTOExpired()\n// Mark the next segment to be sent as the first unacknowledged one and\n// start sending again. Set the number of outstanding packets to 0 so\n@@ -395,8 +405,6 @@ func (s *sender) sendData() {\n}\nfunc (s *sender) enterFastRecovery() {\n- // Save state to reflect we're now in fast recovery.\n- s.reduceSlowStartThreshold()\n// Save state to reflect we're now in fast recovery.\n// See : https://tools.ietf.org/html/rfc5681#section-3.2 Step 3.\n// We inflat the cwnd by 3 to account for the 3 packets which triggered\n@@ -474,9 +482,9 @@ func (s *sender) checkDuplicateAck(seg *segment) bool {\nreturn false\n}\n- // Enter fast recovery when we reach 3 dups.\ns.dupAckCount++\n- if s.dupAckCount != 3 {\n+ // Do not enter fast recovery until we reach nDupAckThreshold.\n+ if s.dupAckCount < nDupAckThreshold {\nreturn false\n}\n@@ -489,6 +497,8 @@ func (s *sender) checkDuplicateAck(seg *segment) bool {\ns.dupAckCount = 0\nreturn false\n}\n+\n+ s.cc.HandleNDupAcks()\ns.enterFastRecovery()\ns.dupAckCount = 0\nreturn true\n@@ -497,29 +507,6 @@ func (s *sender) checkDuplicateAck(seg *segment) bool {\n// updateCwnd updates the congestion window based on the number of packets that\n// were acknowledged.\nfunc (s *sender) updateCwnd(packetsAcked int) {\n- if s.sndCwnd < s.sndSsthresh {\n- // Don't let the congestion window cross into the congestion\n- // avoidance range.\n- newcwnd := s.sndCwnd + packetsAcked\n- if newcwnd >= s.sndSsthresh {\n- newcwnd = s.sndSsthresh\n- s.sndCAAckCount = 0\n- }\n-\n- packetsAcked -= newcwnd - s.sndCwnd\n- s.sndCwnd = newcwnd\n- if packetsAcked == 0 {\n- // We've consumed all ack'd packets.\n- return\n- }\n- }\n-\n- // Consume the packets in congestion avoidance mode.\n- s.sndCAAckCount += packetsAcked\n- if s.sndCAAckCount >= s.sndCwnd {\n- s.sndCwnd += s.sndCAAckCount / s.sndCwnd\n- s.sndCAAckCount = s.sndCAAckCount % s.sndCwnd\n- }\n}\n// handleRcvdSegment is called when a segment is received; it is responsible for\n@@ -580,7 +567,7 @@ func (s *sender) handleRcvdSegment(seg *segment) {\n// If we are not in fast recovery then update the congestion\n// window based on the number of acknowledged packets.\nif !s.fr.active {\n- s.updateCwnd(originalOutstanding - s.outstanding)\n+ s.cc.Update(originalOutstanding - s.outstanding)\n}\n// It is possible for s.outstanding to drop below zero if we get\n"
}
] | Go | Apache License 2.0 | google/gvisor | Refactor new reno congestion control logic out of sender.
This CL also puts the congestion control logic behind an
interface so that we can easily swap it out for say CUBIC
in the future.
PiperOrigin-RevId: 205732848
Change-Id: I891cdfd17d4d126b658b5faa0c6bd6083187944b |
259,881 | 25.07.2018 11:05:59 | 25,200 | 7cd9405b9cc112ebe352af0e5f13b7b57628001b | Format openat flags | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/strace/linux64.go",
"new_path": "pkg/sentry/strace/linux64.go",
"diff": "@@ -274,7 +274,7 @@ var linuxAMD64 = SyscallMap{\n254: makeSyscallInfo(\"inotify_add_watch\", Hex, Hex, Hex),\n255: makeSyscallInfo(\"inotify_rm_watch\", Hex, Hex),\n256: makeSyscallInfo(\"migrate_pages\", Hex, Hex, Hex, Hex),\n- 257: makeSyscallInfo(\"openat\", Hex, Path, Hex, Mode),\n+ 257: makeSyscallInfo(\"openat\", Hex, Path, OpenFlags, Mode),\n258: makeSyscallInfo(\"mkdirat\", Hex, Path, Hex),\n259: makeSyscallInfo(\"mknodat\", Hex, Path, Mode, Hex),\n260: makeSyscallInfo(\"fchownat\", Hex, Path, Hex, Hex, Hex),\n"
}
] | Go | Apache License 2.0 | google/gvisor | Format openat flags
PiperOrigin-RevId: 206021774
Change-Id: I447b6c751c28a8d8d4d78468b756b6ad8c61e169 |
259,992 | 25.07.2018 17:36:52 | 25,200 | e5adf42f66a3090f6124bceb5487238bf7526302 | Replace sleeps with waits in tests - part I | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -122,18 +122,24 @@ func createWriteableOutputFile(path string) (*os.File, error) {\nreturn outputFile, nil\n}\n-func readOutputNum(f *os.File, first bool) (int, error) {\n- var num int\n- time.Sleep(1 * time.Second)\n-\n- // Check that f exists and contains counting data.\n- fileInfo, err := f.Stat()\n+func waitForFile(f *os.File) error {\n+ op := func() error {\n+ fi, err := f.Stat()\nif err != nil {\n- return 0, fmt.Errorf(\"error creating output file: %v\", err)\n+ return err\n+ }\n+ if fi.Size() == 0 {\n+ return fmt.Errorf(\"file %q is empty\", f.Name())\n+ }\n+ return nil\n+ }\n+ return testutil.Poll(op, 5*time.Second)\n}\n- if fileInfo.Size() == 0 {\n- return 0, fmt.Errorf(\"failed to write to file, file still appears empty\")\n+func readOutputNum(f *os.File, first bool) (int, error) {\n+ // Wait until file has contents.\n+ if err := waitForFile(f); err != nil {\n+ return 0, err\n}\n// Read the first number in the new file\n@@ -147,6 +153,7 @@ func readOutputNum(f *os.File, first bool) (int, error) {\nnums := strings.Split(string(b), \"\\n\")\n+ var num int\nif first {\nnum, err = strconv.Atoi(nums[0])\n} else {\n@@ -579,7 +586,10 @@ func TestCheckpointRestore(t *testing.T) {\n}\ndefer file.Close()\n- time.Sleep(1 * time.Second)\n+ // Wait until application has ran.\n+ if err := waitForFile(outputFile); err != nil {\n+ t.Fatalf(\"Failed to wait for output file: %v\", err)\n+ }\n// Checkpoint running container; save state into new file.\nif err := cont.Checkpoint(file); err != nil {\n@@ -727,7 +737,7 @@ func TestPauseResume(t *testing.T) {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n- time.Sleep(10 * time.Second)\n+ time.Sleep(6 * time.Second)\n// Verify that the two processes still exist. Sleep 5 is paused so\n// it should still be in the process list after 10 seconds.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/BUILD",
"new_path": "runsc/test/testutil/BUILD",
"diff": "@@ -16,6 +16,7 @@ go_library(\ndeps = [\n\"//runsc/boot\",\n\"//runsc/specutils\",\n+ \"@com_github_cenkalti_backoff//:go_default_library\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "package testutil\nimport (\n+ \"context\"\n\"encoding/json\"\n\"fmt\"\n\"io\"\n@@ -24,6 +25,7 @@ import (\n\"path/filepath\"\n\"time\"\n+ \"github.com/cenkalti/backoff\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/runsc/boot\"\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n@@ -172,3 +174,11 @@ func Copy(src, dst string) error {\n_, err = io.Copy(out, in)\nreturn err\n}\n+\n+// Poll is a shorthand function to poll for something with given timeout.\n+func Poll(cb func() error, timeout time.Duration) error {\n+ ctx, cancel := context.WithTimeout(context.Background(), timeout)\n+ defer cancel()\n+ b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\n+ return backoff.Retry(cb, b)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Replace sleeps with waits in tests - part I
PiperOrigin-RevId: 206084473
Change-Id: I44e1b64b9cdd2964357799dca27cc0cbc19ce07d |
259,992 | 27.07.2018 10:08:59 | 25,200 | b8f96a9d0b9868060025e7a89e99e1b30d17fa8b | Replace sleeps with waits in tests - part II | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -673,11 +673,26 @@ func TestCheckpointRestore(t *testing.T) {\n// It will then unpause and confirm that both processes are running. Then it will\n// wait until one sleep completes and check to make sure the other is running.\nfunc TestPauseResume(t *testing.T) {\n- for _, conf := range configs(all) {\n+ for _, conf := range configs(kvm) {\nt.Logf(\"Running test with conf: %+v\", conf)\nconst uid = 343\nspec := testutil.NewSpecWithArgs(\"sleep\", \"20\")\n+ dir, err := ioutil.TempDir(\"\", \"pause-test\")\n+ if err != nil {\n+ t.Fatalf(\"ioutil.TempDir failed: %v\", err)\n+ }\n+ lock, err := ioutil.TempFile(dir, \"lock\")\n+ if err != nil {\n+ t.Fatalf(\"error creating output file: %v\", err)\n+ }\n+ defer lock.Close()\n+ spec.Mounts = append(spec.Mounts, specs.Mount{\n+ Type: \"bind\",\n+ Destination: \"/tmp2\",\n+ Source: dir,\n+ })\n+\nrootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n@@ -709,19 +724,20 @@ func TestPauseResume(t *testing.T) {\nPID: 2,\nPPID: 0,\nC: 0,\n- Cmd: \"sleep\",\n+ Cmd: \"bash\",\n},\n}\n+ script := fmt.Sprintf(\"while [[ -f /tmp2/%s ]]; do sleep 0.1; done\", filepath.Base(lock.Name()))\nexecArgs := control.ExecArgs{\n- Filename: \"/bin/sleep\",\n- Argv: []string{\"sleep\", \"5\"},\n+ Filename: \"/bin/bash\",\n+ Argv: []string{\"bash\", \"-c\", script},\nEnvv: []string{\"PATH=\" + os.Getenv(\"PATH\")},\nWorkingDirectory: \"/\",\nKUID: uid,\n}\n- // First, start running exec (whick blocks).\n+ // First, start running exec (which blocks).\ngo cont.Execute(&execArgs)\n// Verify that \"sleep 5\" is running.\n@@ -737,10 +753,14 @@ func TestPauseResume(t *testing.T) {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n- time.Sleep(6 * time.Second)\n+ if err := os.Remove(lock.Name()); err != nil {\n+ t.Fatalf(\"os.Remove(lock) failed: %v\", err)\n+ }\n+ // Script loops and sleeps for 100ms. Give a bit a time for it to exit in\n+ // case pause didn't work.\n+ time.Sleep(200 * time.Millisecond)\n- // Verify that the two processes still exist. Sleep 5 is paused so\n- // it should still be in the process list after 10 seconds.\n+ // Verify that the two processes still exist.\nif err := getAndCheckProcLists(cont, expectedPL); err != nil {\nt.Fatal(err)\n}\n@@ -753,10 +773,6 @@ func TestPauseResume(t *testing.T) {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n- if err := getAndCheckProcLists(cont, expectedPL); err != nil {\n- t.Fatal(err)\n- }\n-\nexpectedPL2 := []*control.Process{\n{\nUID: 0,\n@@ -767,8 +783,7 @@ func TestPauseResume(t *testing.T) {\n},\n}\n- // Verify there is only one process left since we waited 10 at most seconds for\n- // sleep 5 to end.\n+ // Verify that deleting the file triggered the process to exit.\nif err := waitForProcessList(cont, expectedPL2); err != nil {\nt.Fatal(err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Replace sleeps with waits in tests - part II
PiperOrigin-RevId: 206333130
Change-Id: Ic85874dbd53c5de2164a7bb75769d52d43666c2a |
259,991 | 27.07.2018 12:26:42 | 25,200 | 2793f7ac5f96b474decfff68cfde86bb5c2ed0a4 | Added the O_LARGEFILE flag.
This flag will always be true for gVisor files. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/fcntl.go",
"new_path": "pkg/abi/linux/fcntl.go",
"diff": "@@ -27,3 +27,8 @@ const (\nF_SETLKW = 7\nF_SETOWN = 8\n)\n+\n+// Flags for fcntl.\n+const (\n+ FD_CLOEXEC = 00000001\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/file.go",
"new_path": "pkg/abi/linux/file.go",
"diff": "@@ -23,8 +23,18 @@ import (\n// Constants for open(2).\nconst (\n+ O_ACCMODE = 00000003\n+ O_RDONLY = 00000000\n+ O_WRONLY = 00000001\n+ O_RDWR = 00000002\n+ O_APPEND = 00002000\nO_NONBLOCK = 00004000\n+ O_ASYNC = 00020000\n+ O_DIRECT = 00040000\n+ O_LARGEFILE = 00100000\n+ O_DIRECTORY = 00200000\nO_CLOEXEC = 02000000\n+ O_SYNC = 04010000\nO_PATH = 010000000\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/flags.go",
"new_path": "pkg/sentry/fs/flags.go",
"diff": "@@ -45,6 +45,12 @@ type FileFlags struct {\n// Async indicates that this file sends signals on IO events.\nAsync bool\n+\n+ // LargeFile indicates that this file should be opened even if it has\n+ // size greater than linux's off_t. When running in 64-bit mode,\n+ // Linux sets this flag for all files. Since gVisor is only compatible\n+ // with 64-bit Linux, it also sets this flag for all files.\n+ LargeFile bool\n}\n// SettableFileFlags is a subset of FileFlags above that can be changed\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/flags.go",
"new_path": "pkg/sentry/syscalls/linux/flags.go",
"diff": "package linux\nimport (\n- \"syscall\"\n-\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n)\n@@ -24,13 +23,13 @@ import (\n// flagsToPermissions returns a Permissions object from Linux flags.\n// This includes truncate permission if O_TRUNC is set in the mask.\nfunc flagsToPermissions(mask uint) (p fs.PermMask) {\n- switch mask & syscall.O_ACCMODE {\n- case syscall.O_WRONLY:\n+ switch mask & linux.O_ACCMODE {\n+ case linux.O_WRONLY:\np.Write = true\n- case syscall.O_RDWR:\n+ case linux.O_RDWR:\np.Write = true\np.Read = true\n- case syscall.O_RDONLY:\n+ case linux.O_RDONLY:\np.Read = true\n}\nreturn\n@@ -39,7 +38,7 @@ func flagsToPermissions(mask uint) (p fs.PermMask) {\n// fdFlagsToLinux converts a kernel.FDFlags object to a Linux representation.\nfunc fdFlagsToLinux(flags kernel.FDFlags) (mask uint) {\nif flags.CloseOnExec {\n- mask |= syscall.FD_CLOEXEC\n+ mask |= linux.FD_CLOEXEC\n}\nreturn\n}\n@@ -47,30 +46,33 @@ func fdFlagsToLinux(flags kernel.FDFlags) (mask uint) {\n// flagsToLinux converts a FileFlags object to a Linux representation.\nfunc flagsToLinux(flags fs.FileFlags) (mask uint) {\nif flags.Direct {\n- mask |= syscall.O_DIRECT\n+ mask |= linux.O_DIRECT\n}\nif flags.NonBlocking {\n- mask |= syscall.O_NONBLOCK\n+ mask |= linux.O_NONBLOCK\n}\nif flags.Sync {\n- mask |= syscall.O_SYNC\n+ mask |= linux.O_SYNC\n}\nif flags.Append {\n- mask |= syscall.O_APPEND\n+ mask |= linux.O_APPEND\n}\nif flags.Directory {\n- mask |= syscall.O_DIRECTORY\n+ mask |= linux.O_DIRECTORY\n}\nif flags.Async {\n- mask |= syscall.O_ASYNC\n+ mask |= linux.O_ASYNC\n+ }\n+ if flags.LargeFile {\n+ mask |= linux.O_LARGEFILE\n}\nswitch {\ncase flags.Read && flags.Write:\n- mask |= syscall.O_RDWR\n+ mask |= linux.O_RDWR\ncase flags.Write:\n- mask |= syscall.O_WRONLY\n+ mask |= linux.O_WRONLY\ncase flags.Read:\n- mask |= syscall.O_RDONLY\n+ mask |= linux.O_RDONLY\n}\nreturn\n}\n@@ -78,23 +80,24 @@ func flagsToLinux(flags fs.FileFlags) (mask uint) {\n// linuxToFlags converts linux file flags to a FileFlags object.\nfunc linuxToFlags(mask uint) (flags fs.FileFlags) {\nreturn fs.FileFlags{\n- Direct: mask&syscall.O_DIRECT != 0,\n- Sync: mask&syscall.O_SYNC != 0,\n- NonBlocking: mask&syscall.O_NONBLOCK != 0,\n- Read: (mask & syscall.O_ACCMODE) != syscall.O_WRONLY,\n- Write: (mask & syscall.O_ACCMODE) != syscall.O_RDONLY,\n- Append: mask&syscall.O_APPEND != 0,\n- Directory: mask&syscall.O_DIRECTORY != 0,\n- Async: mask&syscall.O_ASYNC != 0,\n+ Direct: mask&linux.O_DIRECT != 0,\n+ Sync: mask&linux.O_SYNC != 0,\n+ NonBlocking: mask&linux.O_NONBLOCK != 0,\n+ Read: (mask & linux.O_ACCMODE) != linux.O_WRONLY,\n+ Write: (mask & linux.O_ACCMODE) != linux.O_RDONLY,\n+ Append: mask&linux.O_APPEND != 0,\n+ Directory: mask&linux.O_DIRECTORY != 0,\n+ Async: mask&linux.O_ASYNC != 0,\n+ LargeFile: mask&linux.O_LARGEFILE != 0,\n}\n}\n// linuxToSettableFlags converts linux file flags to a SettableFileFlags object.\nfunc linuxToSettableFlags(mask uint) fs.SettableFileFlags {\nreturn fs.SettableFileFlags{\n- Direct: mask&syscall.O_DIRECT != 0,\n- NonBlocking: mask&syscall.O_NONBLOCK != 0,\n- Append: mask&syscall.O_APPEND != 0,\n- Async: mask&syscall.O_ASYNC != 0,\n+ Direct: mask&linux.O_DIRECT != 0,\n+ NonBlocking: mask&linux.O_NONBLOCK != 0,\n+ Append: mask&linux.O_APPEND != 0,\n+ Async: mask&linux.O_ASYNC != 0,\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_file.go",
"new_path": "pkg/sentry/syscalls/linux/sys_file.go",
"diff": "@@ -148,6 +148,8 @@ func openAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint) (fd u\n}\nfileFlags := linuxToFlags(flags)\n+ // Linux always adds the O_LARGEFILE flag when running in 64-bit mode.\n+ fileFlags.LargeFile = true\nif fs.IsDir(d.Inode.StableAttr) {\n// Don't allow directories to be opened writable.\nif fileFlags.Write {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Added the O_LARGEFILE flag.
This flag will always be true for gVisor files.
PiperOrigin-RevId: 206355963
Change-Id: I2f03d2412e2609042df43b06d1318cba674574d0 |
259,858 | 27.07.2018 17:52:21 | 25,200 | 3c673659998ecf51a22025886c92cd2d161d85e9 | Simplify Kokoro configuration. | [
{
"change_type": "RENAME",
"old_path": "kokoro/gcp_ubuntu/continuous.cfg",
"new_path": "kokoro/continuous.cfg",
"diff": "# Location of bash script that runs the test. The first directory in the path\n# is the directory where Kokoro will check out the repo. The rest is the path\n# is the path to the test script.\n-build_file: \"repo/kokoro/gcp_ubuntu/run_tests.sh\"\n+build_file: \"repo/kokoro/run_tests.sh\"\naction {\ndefine_artifacts {\n"
},
{
"change_type": "RENAME",
"old_path": "kokoro/gcp_ubuntu/presubmit.cfg",
"new_path": "kokoro/presubmit.cfg",
"diff": "# Location of bash script that runs the test. The first directory in the path\n# is the directory where Kokoro will check out the repo. The rest is the path\n# is the path to the test script.\n-build_file: \"repo/kokoro/gcp_ubuntu/run_tests.sh\"\n+build_file: \"repo/kokoro/run_tests.sh\"\naction {\ndefine_artifacts {\n"
},
{
"change_type": "RENAME",
"old_path": "kokoro/gcp_ubuntu/release-nightly.cfg",
"new_path": "kokoro/release-nightly.cfg",
"diff": "# Location of bash script that builds a release.\n-build_file: \"repo/kokoro/gcp_ubuntu/run_build.sh\"\n+build_file: \"repo/kokoro/run_build.sh\"\naction {\n# Upload runsc binary and its checksum. It may be in multiple paths, so we\n"
},
{
"change_type": "RENAME",
"old_path": "kokoro/gcp_ubuntu/run_build.sh",
"new_path": "kokoro/run_build.sh",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "kokoro/gcp_ubuntu/run_tests.sh",
"new_path": "kokoro/run_tests.sh",
"diff": ""
}
] | Go | Apache License 2.0 | google/gvisor | Simplify Kokoro configuration.
PiperOrigin-RevId: 206401009
Change-Id: I26644d1fe637b5a40db013fedf9fc063cc87ce6a |
259,948 | 30.07.2018 15:42:01 | 25,200 | 0a55f8c1c11dc6d2dfb1bed02489f92bab437ea1 | netstack: support disconnect-on-save option per fdbased link. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/fdbased/endpoint.go",
"new_path": "pkg/tcpip/link/fdbased/endpoint.go",
"diff": "@@ -69,6 +69,8 @@ type Options struct {\nChecksumOffload bool\nClosedFunc func(*tcpip.Error)\nAddress tcpip.LinkAddress\n+ SaveRestore bool\n+ DisconnectOk bool\n}\n// New creates a new fd-based endpoint.\n@@ -89,6 +91,14 @@ func New(opts *Options) tcpip.LinkEndpointID {\ncaps |= stack.CapabilityResolutionRequired\n}\n+ if opts.SaveRestore {\n+ caps |= stack.CapabilitySaveRestore\n+ }\n+\n+ if opts.DisconnectOk {\n+ caps |= stack.CapabilityDisconnectOk\n+ }\n+\ne := &endpoint{\nfd: opts.FD,\nmtu: opts.MTU,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -204,6 +204,7 @@ const (\nCapabilityChecksumOffload LinkEndpointCapabilities = 1 << iota\nCapabilityResolutionRequired\nCapabilitySaveRestore\n+ CapabilityDisconnectOk\n)\n// LinkEndpoint is the interface implemented by data link layer protocols (e.g.,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -1023,7 +1023,9 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error {\n// Mark endpoint as closed.\ne.mu.Lock()\n+ if e.state != stateError {\ne.state = stateClosed\n+ }\n// Lock released below.\nepilogue()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint_state.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint_state.go",
"diff": "@@ -52,10 +52,17 @@ func (e *endpoint) beforeSave() {\ncase stateInitial, stateBound:\ncase stateConnected:\nif e.route.Capabilities()&stack.CapabilitySaveRestore == 0 {\n+ if e.route.Capabilities()&stack.CapabilityDisconnectOk == 0 {\npanic(tcpip.ErrSaveRejection{fmt.Errorf(\"endpoint cannot be saved in connected state: local %v:%d, remote %v:%d\", e.id.LocalAddress, e.id.LocalPort, e.id.RemoteAddress, e.id.RemotePort)})\n}\n+ e.resetConnectionLocked(tcpip.ErrConnectionAborted)\n+ e.mu.Unlock()\n+ e.Close()\n+ e.mu.Lock()\n+ }\nif !e.workerRunning {\n- // The endpoint must be in acceptedChan.\n+ // The endpoint must be in acceptedChan or has been just\n+ // disconnected and closed.\nbreak\n}\nfallthrough\n"
}
] | Go | Apache License 2.0 | google/gvisor | netstack: support disconnect-on-save option per fdbased link.
PiperOrigin-RevId: 206659972
Change-Id: I5e0e035f97743b6525ad36bed2c802791609beaf |
259,881 | 31.07.2018 11:37:51 | 25,200 | 6cad96f38a6de187d2aa3640c492bdfbdbdc589b | Drop dup2 filter
It is unused. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/config.go",
"new_path": "runsc/boot/filter/config.go",
"diff": "@@ -30,7 +30,6 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_CLONE: {},\nsyscall.SYS_CLOSE: {},\nsyscall.SYS_DUP: {},\n- syscall.SYS_DUP2: {},\nsyscall.SYS_EPOLL_CREATE1: {},\nsyscall.SYS_EPOLL_CTL: {},\nsyscall.SYS_EPOLL_PWAIT: {},\n"
}
] | Go | Apache License 2.0 | google/gvisor | Drop dup2 filter
It is unused.
PiperOrigin-RevId: 206798328
Change-Id: I2d7d27c0e4a0ef51264b900f14f1b3fdad17f2c4 |
259,992 | 31.07.2018 15:06:36 | 25,200 | 413bfb39a940455cb116c7d0ca715b2ced78a11c | Use backoff package for retry logic | [
{
"change_type": "MODIFY",
"old_path": "runsc/specutils/BUILD",
"new_path": "runsc/specutils/BUILD",
"diff": "@@ -13,6 +13,7 @@ go_library(\n\"//pkg/abi/linux\",\n\"//pkg/log\",\n\"//pkg/sentry/kernel/auth\",\n+ \"@com_github_cenkalti_backoff//:go_default_library\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/specutils/specutils.go",
"new_path": "runsc/specutils/specutils.go",
"diff": "@@ -26,6 +26,7 @@ import (\n\"syscall\"\n\"time\"\n+ \"github.com/cenkalti/backoff\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n@@ -313,33 +314,30 @@ func SandboxID(spec *specs.Spec) (string, bool) {\n// the 'ready' function returns true. It continues to wait if 'ready' returns\n// false. It returns error on timeout, if the process stops or if 'ready' fails.\nfunc WaitForReady(pid int, timeout time.Duration, ready func() (bool, error)) error {\n- backoff := 1 * time.Millisecond\n- for start := time.Now(); time.Now().Sub(start) < timeout; {\n+ b := backoff.NewExponentialBackOff()\n+ b.InitialInterval = 1 * time.Millisecond\n+ b.MaxInterval = 1 * time.Second\n+ b.MaxElapsedTime = timeout\n+\n+ op := func() error {\nif ok, err := ready(); err != nil {\n- return err\n+ return backoff.Permanent(err)\n} else if ok {\nreturn nil\n}\n// Check if the process is still running.\n- var ws syscall.WaitStatus\n- var ru syscall.Rusage\n-\n// If the process is alive, child is 0 because of the NOHANG option.\n// If the process has terminated, child equals the process id.\n+ var ws syscall.WaitStatus\n+ var ru syscall.Rusage\nchild, err := syscall.Wait4(pid, &ws, syscall.WNOHANG, &ru)\nif err != nil {\n- return fmt.Errorf(\"error waiting for process: %v\", err)\n+ return backoff.Permanent(fmt.Errorf(\"error waiting for process: %v\", err))\n} else if child == pid {\n- return fmt.Errorf(\"process %d has terminated\", pid)\n- }\n-\n- // Process continues to run, backoff and retry.\n- time.Sleep(backoff)\n- backoff *= 2\n- if backoff > 1*time.Second {\n- backoff = 1 * time.Second\n+ return backoff.Permanent(fmt.Errorf(\"process %d has terminated\", pid))\n}\n+ return fmt.Errorf(\"process %d not running yet\", pid)\n}\n- return fmt.Errorf(\"timed out waiting for process (%d)\", pid)\n+ return backoff.Retry(op, b)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/specutils/specutils_test.go",
"new_path": "runsc/specutils/specutils_test.go",
"diff": "@@ -94,8 +94,8 @@ func TestWaitForReadyTimeout(t *testing.T) {\nerr := WaitForReady(cmd.Process.Pid, 50*time.Millisecond, func() (bool, error) {\nreturn false, nil\n})\n- if !strings.Contains(err.Error(), \"timed out\") {\n- t.Errorf(\"ProcessWaitReady got: %v, expected: timed out\", err)\n+ if !strings.Contains(err.Error(), \"not running yet\") {\n+ t.Errorf(\"ProcessWaitReady got: %v, expected: not running yet\", err)\n}\ncmd.Process.Kill()\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Use backoff package for retry logic
PiperOrigin-RevId: 206834838
Change-Id: I9a44c6fa5f4766a01f86e90810f025cefecdf2d4 |
259,854 | 01.08.2018 20:21:00 | 25,200 | 3cd7824410302da00d1c8c8323db8959a124814a | Move stack clock to options struct | [
{
"change_type": "MODIFY",
"old_path": "pkg/dhcp/dhcp_test.go",
"new_path": "pkg/dhcp/dhcp_test.go",
"diff": "@@ -46,7 +46,7 @@ func TestDHCP(t *testing.T) {\n}\n}()\n- s := stack.New(&tcpip.StdClock{}, []string{ipv4.ProtocolName}, []string{udp.ProtocolName})\n+ s := stack.New([]string{ipv4.ProtocolName}, []string{udp.ProtocolName}, stack.Options{})\nconst nicid tcpip.NICID = 1\nif err := s.CreateNIC(nicid, id); err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/adapters/gonet/gonet_test.go",
"new_path": "pkg/tcpip/adapters/gonet/gonet_test.go",
"diff": "@@ -57,7 +57,7 @@ func TestTimeouts(t *testing.T) {\nfunc newLoopbackStack() (*stack.Stack, *tcpip.Error) {\n// Create the stack and add a NIC.\n- s := stack.New(&tcpip.StdClock{}, []string{ipv4.ProtocolName, ipv6.ProtocolName}, []string{tcp.ProtocolName, udp.ProtocolName})\n+ s := stack.New([]string{ipv4.ProtocolName, ipv6.ProtocolName}, []string{tcp.ProtocolName, udp.ProtocolName}, stack.Options{})\nif err := s.CreateNIC(NICID, loopback.New()); err != nil {\nreturn nil, err\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/network/arp/arp_test.go",
"new_path": "pkg/tcpip/network/arp/arp_test.go",
"diff": "@@ -43,7 +43,7 @@ type testContext struct {\n}\nfunc newTestContext(t *testing.T) *testContext {\n- s := stack.New(&tcpip.StdClock{}, []string{ipv4.ProtocolName, arp.ProtocolName}, []string{ping.ProtocolName4})\n+ s := stack.New([]string{ipv4.ProtocolName, arp.ProtocolName}, []string{ping.ProtocolName4}, stack.Options{})\nconst defaultMTU = 65536\nid, linkEP := channel.New(256, defaultMTU, stackLinkAddr)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/sample/tun_tcp_connect/main.go",
"new_path": "pkg/tcpip/sample/tun_tcp_connect/main.go",
"diff": "@@ -123,7 +123,7 @@ func main() {\n// Create the stack with ipv4 and tcp protocols, then add a tun-based\n// NIC and ipv4 address.\n- s := stack.New(&tcpip.StdClock{}, []string{ipv4.ProtocolName}, []string{tcp.ProtocolName})\n+ s := stack.New([]string{ipv4.ProtocolName}, []string{tcp.ProtocolName}, stack.Options{})\nmtu, err := rawfile.GetMTU(tunName)\nif err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/sample/tun_tcp_echo/main.go",
"new_path": "pkg/tcpip/sample/tun_tcp_echo/main.go",
"diff": "@@ -109,7 +109,7 @@ func main() {\n// Create the stack with ip and tcp protocols, then add a tun-based\n// NIC and address.\n- s := stack.New(&tcpip.StdClock{}, []string{ipv4.ProtocolName, ipv6.ProtocolName, arp.ProtocolName}, []string{tcp.ProtocolName})\n+ s := stack.New([]string{ipv4.ProtocolName, ipv6.ProtocolName, arp.ProtocolName}, []string{tcp.ProtocolName}, stack.Options{})\nmtu, err := rawfile.GetMTU(tunName)\nif err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -285,6 +285,14 @@ type Stack struct {\nclock tcpip.Clock\n}\n+// Options contains optional Stack configuration.\n+type Options struct {\n+ // Clock is an optional clock source used for timestampping packets.\n+ //\n+ // If no Clock is specified, the clock source will be time.Now.\n+ Clock tcpip.Clock\n+}\n+\n// New allocates a new networking stack with only the requested networking and\n// transport protocols configured with default options.\n//\n@@ -292,7 +300,12 @@ type Stack struct {\n// SetNetworkProtocolOption/SetTransportProtocolOption methods provided by the\n// stack. Please refer to individual protocol implementations as to what options\n// are supported.\n-func New(clock tcpip.Clock, network []string, transport []string) *Stack {\n+func New(network []string, transport []string, opts Options) *Stack {\n+ clock := opts.Clock\n+ if clock == nil {\n+ clock = &tcpip.StdClock{}\n+ }\n+\ns := &Stack{\ntransportProtocols: make(map[tcpip.TransportProtocolNumber]*transportProtocolState),\nnetworkProtocols: make(map[tcpip.NetworkProtocolNumber]NetworkProtocol),\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack_test.go",
"new_path": "pkg/tcpip/stack/stack_test.go",
"diff": "@@ -186,7 +186,7 @@ func TestNetworkReceive(t *testing.T) {\n// Create a stack with the fake network protocol, one nic, and two\n// addresses attached to it: 1 & 2.\nid, linkEP := channel.New(10, defaultMTU, \"\")\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, nil)\n+ s := stack.New([]string{\"fakeNet\"}, nil, stack.Options{})\nif err := s.CreateNIC(1, id); err != nil {\nt.Fatalf(\"CreateNIC failed: %v\", err)\n}\n@@ -280,7 +280,7 @@ func TestNetworkSend(t *testing.T) {\n// address: 1. The route table sends all packets through the only\n// existing nic.\nid, linkEP := channel.New(10, defaultMTU, \"\")\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, nil)\n+ s := stack.New([]string{\"fakeNet\"}, nil, stack.Options{})\nif err := s.CreateNIC(1, id); err != nil {\nt.Fatalf(\"NewNIC failed: %v\", err)\n}\n@@ -302,7 +302,7 @@ func TestNetworkSendMultiRoute(t *testing.T) {\n// Create a stack with the fake network protocol, two nics, and two\n// addresses per nic, the first nic has odd address, the second one has\n// even addresses.\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, nil)\n+ s := stack.New([]string{\"fakeNet\"}, nil, stack.Options{})\nid1, linkEP1 := channel.New(10, defaultMTU, \"\")\nif err := s.CreateNIC(1, id1); err != nil {\n@@ -381,7 +381,7 @@ func TestRoutes(t *testing.T) {\n// Create a stack with the fake network protocol, two nics, and two\n// addresses per nic, the first nic has odd address, the second one has\n// even addresses.\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, nil)\n+ s := stack.New([]string{\"fakeNet\"}, nil, stack.Options{})\nid1, _ := channel.New(10, defaultMTU, \"\")\nif err := s.CreateNIC(1, id1); err != nil {\n@@ -445,7 +445,7 @@ func TestRoutes(t *testing.T) {\n}\nfunc TestAddressRemoval(t *testing.T) {\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, nil)\n+ s := stack.New([]string{\"fakeNet\"}, nil, stack.Options{})\nid, linkEP := channel.New(10, defaultMTU, \"\")\nif err := s.CreateNIC(1, id); err != nil {\n@@ -489,7 +489,7 @@ func TestAddressRemoval(t *testing.T) {\n}\nfunc TestDelayedRemovalDueToRoute(t *testing.T) {\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, nil)\n+ s := stack.New([]string{\"fakeNet\"}, nil, stack.Options{})\nid, linkEP := channel.New(10, defaultMTU, \"\")\nif err := s.CreateNIC(1, id); err != nil {\n@@ -557,7 +557,7 @@ func TestDelayedRemovalDueToRoute(t *testing.T) {\n}\nfunc TestPromiscuousMode(t *testing.T) {\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, nil)\n+ s := stack.New([]string{\"fakeNet\"}, nil, stack.Options{})\nid, linkEP := channel.New(10, defaultMTU, \"\")\nif err := s.CreateNIC(1, id); err != nil {\n@@ -617,7 +617,7 @@ func TestAddressSpoofing(t *testing.T) {\nsrcAddr := tcpip.Address(\"\\x01\")\ndstAddr := tcpip.Address(\"\\x02\")\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, nil)\n+ s := stack.New([]string{\"fakeNet\"}, nil, stack.Options{})\nid, _ := channel.New(10, defaultMTU, \"\")\nif err := s.CreateNIC(1, id); err != nil {\n@@ -658,7 +658,7 @@ func TestAddressSpoofing(t *testing.T) {\n// Set the subnet, then check that packet is delivered.\nfunc TestSubnetAcceptsMatchingPacket(t *testing.T) {\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, nil)\n+ s := stack.New([]string{\"fakeNet\"}, nil, stack.Options{})\nid, linkEP := channel.New(10, defaultMTU, \"\")\nif err := s.CreateNIC(1, id); err != nil {\n@@ -692,7 +692,7 @@ func TestSubnetAcceptsMatchingPacket(t *testing.T) {\n// Set destination outside the subnet, then check it doesn't get delivered.\nfunc TestSubnetRejectsNonmatchingPacket(t *testing.T) {\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, nil)\n+ s := stack.New([]string{\"fakeNet\"}, nil, stack.Options{})\nid, linkEP := channel.New(10, defaultMTU, \"\")\nif err := s.CreateNIC(1, id); err != nil {\n@@ -724,7 +724,7 @@ func TestSubnetRejectsNonmatchingPacket(t *testing.T) {\n}\nfunc TestNetworkOptions(t *testing.T) {\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, []string{})\n+ s := stack.New([]string{\"fakeNet\"}, []string{}, stack.Options{})\n// Try an unsupported network protocol.\nif err := s.SetNetworkProtocolOption(tcpip.NetworkProtocolNumber(99999), fakeNetGoodOption(false)); err != tcpip.ErrUnknownProtocol {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/transport_test.go",
"new_path": "pkg/tcpip/stack/transport_test.go",
"diff": "@@ -220,7 +220,7 @@ func (f *fakeTransportProtocol) Option(option interface{}) *tcpip.Error {\nfunc TestTransportReceive(t *testing.T) {\nid, linkEP := channel.New(10, defaultMTU, \"\")\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, []string{\"fakeTrans\"})\n+ s := stack.New([]string{\"fakeNet\"}, []string{\"fakeTrans\"}, stack.Options{})\nif err := s.CreateNIC(1, id); err != nil {\nt.Fatalf(\"CreateNIC failed: %v\", err)\n}\n@@ -280,7 +280,7 @@ func TestTransportReceive(t *testing.T) {\nfunc TestTransportControlReceive(t *testing.T) {\nid, linkEP := channel.New(10, defaultMTU, \"\")\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, []string{\"fakeTrans\"})\n+ s := stack.New([]string{\"fakeNet\"}, []string{\"fakeTrans\"}, stack.Options{})\nif err := s.CreateNIC(1, id); err != nil {\nt.Fatalf(\"CreateNIC failed: %v\", err)\n}\n@@ -346,7 +346,7 @@ func TestTransportControlReceive(t *testing.T) {\nfunc TestTransportSend(t *testing.T) {\nid, _ := channel.New(10, defaultMTU, \"\")\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, []string{\"fakeTrans\"})\n+ s := stack.New([]string{\"fakeNet\"}, []string{\"fakeTrans\"}, stack.Options{})\nif err := s.CreateNIC(1, id); err != nil {\nt.Fatalf(\"CreateNIC failed: %v\", err)\n}\n@@ -383,7 +383,7 @@ func TestTransportSend(t *testing.T) {\n}\nfunc TestTransportOptions(t *testing.T) {\n- s := stack.New(&tcpip.StdClock{}, []string{\"fakeNet\"}, []string{\"fakeTrans\"})\n+ s := stack.New([]string{\"fakeNet\"}, []string{\"fakeTrans\"}, stack.Options{})\n// Try an unsupported transport protocol.\nif err := s.SetTransportProtocolOption(tcpip.TransportProtocolNumber(99999), fakeTransportGoodOption(false)); err != tcpip.ErrUnknownProtocol {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"new_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"diff": "@@ -2579,7 +2579,7 @@ func checkSendBufferSize(t *testing.T, ep tcpip.Endpoint, v int) {\n}\nfunc TestDefaultBufferSizes(t *testing.T) {\n- s := stack.New(&tcpip.StdClock{}, []string{ipv4.ProtocolName}, []string{tcp.ProtocolName})\n+ s := stack.New([]string{ipv4.ProtocolName}, []string{tcp.ProtocolName}, stack.Options{})\n// Check the default values.\nep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})\n@@ -2625,7 +2625,7 @@ func TestDefaultBufferSizes(t *testing.T) {\n}\nfunc TestMinMaxBufferSizes(t *testing.T) {\n- s := stack.New(&tcpip.StdClock{}, []string{ipv4.ProtocolName}, []string{tcp.ProtocolName})\n+ s := stack.New([]string{ipv4.ProtocolName}, []string{tcp.ProtocolName}, stack.Options{})\n// Check the default values.\nep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{})\n@@ -2675,7 +2675,7 @@ func TestSelfConnect(t *testing.T) {\n// it checks that if an endpoint binds to say 127.0.0.1:1000 then\n// connects to 127.0.0.1:1000, then it will be connected to itself, and\n// is able to send and receive data through the same endpoint.\n- s := stack.New(&tcpip.StdClock{}, []string{ipv4.ProtocolName}, []string{tcp.ProtocolName})\n+ s := stack.New([]string{ipv4.ProtocolName}, []string{tcp.ProtocolName}, stack.Options{})\nid := loopback.New()\nif testing.Verbose() {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/testing/context/context.go",
"new_path": "pkg/tcpip/transport/tcp/testing/context/context.go",
"diff": "@@ -139,7 +139,7 @@ type Context struct {\n// New allocates and initializes a test context containing a new\n// stack and a link-layer endpoint.\nfunc New(t *testing.T, mtu uint32) *Context {\n- s := stack.New(&tcpip.StdClock{}, []string{ipv4.ProtocolName, ipv6.ProtocolName}, []string{tcp.ProtocolName})\n+ s := stack.New([]string{ipv4.ProtocolName, ipv6.ProtocolName}, []string{tcp.ProtocolName}, stack.Options{})\n// Allow minimum send/receive buffer sizes to be 1 during tests.\nif err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SendBufferSizeOption{1, tcp.DefaultBufferSize, tcp.DefaultBufferSize * 10}); err != nil {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/udp_test.go",
"new_path": "pkg/tcpip/transport/udp/udp_test.go",
"diff": "@@ -66,7 +66,7 @@ type headers struct {\n}\nfunc newDualTestContext(t *testing.T, mtu uint32) *testContext {\n- s := stack.New(&tcpip.StdClock{}, []string{ipv4.ProtocolName, ipv6.ProtocolName}, []string{udp.ProtocolName})\n+ s := stack.New([]string{ipv4.ProtocolName, ipv6.ProtocolName}, []string{udp.ProtocolName}, stack.Options{})\nid, linkEP := channel.New(256, mtu, \"\")\nif testing.Verbose() {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -534,7 +534,7 @@ func newEmptyNetworkStack(conf *Config, clock tcpip.Clock) inet.Stack {\n// NetworkNone sets up loopback using netstack.\nnetProtos := []string{ipv4.ProtocolName, ipv6.ProtocolName, arp.ProtocolName}\nprotoNames := []string{tcp.ProtocolName, udp.ProtocolName, ping.ProtocolName4}\n- return &epsocket.Stack{stack.New(clock, netProtos, protoNames)}\n+ return &epsocket.Stack{stack.New(netProtos, protoNames, stack.Options{Clock: clock})}\ndefault:\npanic(fmt.Sprintf(\"invalid network configuration: %v\", conf.Network))\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move stack clock to options struct
PiperOrigin-RevId: 207039273
Change-Id: Ib8f55a6dc302052ab4a10ccd70b07f0d73b373df |
260,013 | 02.08.2018 08:09:03 | 25,200 | cf44aff6e08b0e19935d5cd98455b4af98fd8794 | Add seccomp(2) support.
Add support for the seccomp syscall and the flag SECCOMP_FILTER_FLAG_TSYNC. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/BUILD",
"new_path": "pkg/sentry/kernel/BUILD",
"diff": "@@ -47,6 +47,7 @@ go_stateify(\n],\nout = \"kernel_autogen_state.go\",\nimports = [\n+ \"gvisor.googlesource.com/gvisor/pkg/bpf\",\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\",\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs\",\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/seccomp.go",
"new_path": "pkg/sentry/kernel/seccomp.go",
"diff": "@@ -144,10 +144,15 @@ func (t *Task) evaluateSyscallFilters(sysno int32, args arch.SyscallArguments, i\ninput := data.asBPFInput()\nret := uint32(linux.SECCOMP_RET_ALLOW)\n+ f := t.syscallFilters.Load()\n+ if f == nil {\n+ return ret\n+ }\n+\n// \"Every filter successfully installed will be evaluated (in reverse\n// order) for each system call the task makes.\" - kernel/seccomp.c\n- for i := len(t.syscallFilters) - 1; i >= 0; i-- {\n- thisRet, err := bpf.Exec(t.syscallFilters[i], input)\n+ for i := len(f.([]bpf.Program)) - 1; i >= 0; i-- {\n+ thisRet, err := bpf.Exec(f.([]bpf.Program)[i], input)\nif err != nil {\nt.Debugf(\"seccomp-bpf filter %d returned error: %v\", i, err)\nthisRet = linux.SECCOMP_RET_KILL\n@@ -180,15 +185,53 @@ func (t *Task) AppendSyscallFilter(p bpf.Program) error {\n// maxSyscallFilterInstructions. (This restriction is inherited from\n// Linux.)\ntotalLength := p.Length()\n- for _, f := range t.syscallFilters {\n+ var newFilters []bpf.Program\n+\n+ // While syscallFilters are an atomic.Value we must take the mutex to\n+ // prevent our read-copy-update from happening while another task\n+ // is syncing syscall filters to us, this keeps the filters in a\n+ // consistent state.\n+ t.mu.Lock()\n+ defer t.mu.Unlock()\n+ if sf := t.syscallFilters.Load(); sf != nil {\n+ oldFilters := sf.([]bpf.Program)\n+ for _, f := range oldFilters {\ntotalLength += f.Length() + 4\n}\n+ newFilters = append(newFilters, oldFilters...)\n+ }\n+\nif totalLength > maxSyscallFilterInstructions {\nreturn syserror.ENOMEM\n}\n- t.mu.Lock()\n- defer t.mu.Unlock()\n- t.syscallFilters = append(t.syscallFilters, p)\n+\n+ newFilters = append(newFilters, p)\n+ t.syscallFilters.Store(newFilters)\n+ return nil\n+}\n+\n+// SyncSyscallFiltersToThreadGroup will copy this task's filters to all other\n+// threads in our thread group.\n+func (t *Task) SyncSyscallFiltersToThreadGroup() error {\n+ f := t.syscallFilters.Load()\n+\n+ t.tg.pidns.owner.mu.RLock()\n+ defer t.tg.pidns.owner.mu.RUnlock()\n+\n+ // Note: No new privs is always assumed to be set.\n+ for ot := t.tg.tasks.Front(); ot != nil; ot = ot.Next() {\n+ if ot.ThreadID() != t.ThreadID() {\n+ // We must take the other task's mutex to prevent it from\n+ // appending to its own syscall filters while we're syncing.\n+ ot.mu.Lock()\n+ var copiedFilters []bpf.Program\n+ if f != nil {\n+ copiedFilters = append(copiedFilters, f.([]bpf.Program)...)\n+ }\n+ ot.syscallFilters.Store(copiedFilters)\n+ ot.mu.Unlock()\n+ }\n+ }\nreturn nil\n}\n@@ -196,9 +239,8 @@ func (t *Task) AppendSyscallFilter(p bpf.Program) error {\n// seccomp syscall filtering mode, appropriate for both prctl(PR_GET_SECCOMP)\n// and /proc/[pid]/status.\nfunc (t *Task) SeccompMode() int {\n- t.mu.Lock()\n- defer t.mu.Unlock()\n- if len(t.syscallFilters) > 0 {\n+ f := t.syscallFilters.Load()\n+ if f != nil && len(f.([]bpf.Program)) > 0 {\nreturn linux.SECCOMP_MODE_FILTER\n}\nreturn linux.SECCOMP_MODE_NONE\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task.go",
"new_path": "pkg/sentry/kernel/task.go",
"diff": "@@ -355,11 +355,11 @@ type Task struct {\nparentDeathSignal linux.Signal\n// syscallFilters is all seccomp-bpf syscall filters applicable to the\n- // task, in the order in which they were installed.\n+ // task, in the order in which they were installed. The type of the atomic\n+ // is []bpf.Program. Writing needs to be protected by mu.\n//\n- // syscallFilters is protected by mu. syscallFilters is owned by the task\n- // goroutine.\n- syscallFilters []bpf.Program\n+ // syscallFilters is owned by the task goroutine.\n+ syscallFilters atomic.Value `state:\".([]bpf.Program)\"`\n// If cleartid is non-zero, treat it as a pointer to a ThreadID in the\n// task's virtual address space; when the task exits, set the pointed-to\n@@ -469,6 +469,17 @@ func (t *Task) loadLogPrefix(prefix string) {\nt.logPrefix.Store(prefix)\n}\n+func (t *Task) saveSyscallFilters() []bpf.Program {\n+ if f := t.syscallFilters.Load(); f != nil {\n+ return f.([]bpf.Program)\n+ }\n+ return nil\n+}\n+\n+func (t *Task) loadSyscallFilters(filters []bpf.Program) {\n+ t.syscallFilters.Store(filters)\n+}\n+\n// afterLoad is invoked by stateify.\nfunc (t *Task) afterLoad() {\nt.interruptChan = make(chan struct{}, 1)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_clone.go",
"new_path": "pkg/sentry/kernel/task_clone.go",
"diff": "@@ -280,7 +280,10 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {\n// \"If fork/clone and execve are allowed by @prog, any child processes will\n// be constrained to the same filters and system call ABI as the parent.\" -\n// Documentation/prctl/seccomp_filter.txt\n- nt.syscallFilters = append([]bpf.Program(nil), t.syscallFilters...)\n+ if f := t.syscallFilters.Load(); f != nil {\n+ copiedFilters := append([]bpf.Program(nil), f.([]bpf.Program)...)\n+ nt.syscallFilters.Store(copiedFilters)\n+ }\nif opts.Vfork {\nnt.vforkParent = t\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task_syscall.go",
"new_path": "pkg/sentry/kernel/task_syscall.go",
"diff": "@@ -194,7 +194,7 @@ func (t *Task) doSyscall() taskRunState {\n// Check seccomp filters. The nil check is for performance (as seccomp use\n// is rare), not needed for correctness.\n- if t.syscallFilters != nil {\n+ if t.syscallFilters.Load() != nil {\nswitch r := t.checkSeccompSyscall(int32(sysno), args, usermem.Addr(t.Arch().IP())); r {\ncase seccompResultDeny:\nt.Debugf(\"Syscall %d: denied by seccomp\", sysno)\n@@ -334,7 +334,7 @@ func (t *Task) doVsyscall(addr usermem.Addr, sysno uintptr) taskRunState {\n// to syscall ABI because they both use RDI, RSI, and RDX for the first three\n// arguments and none of the vsyscalls uses more than two arguments.\nargs := t.Arch().SyscallArgs()\n- if t.syscallFilters != nil {\n+ if t.syscallFilters.Load() != nil {\nswitch r := t.checkSeccompSyscall(int32(sysno), args, addr); r {\ncase seccompResultDeny:\nt.Debugf(\"vsyscall %d, caller %x: denied by seccomp\", sysno, t.Arch().Value(caller))\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/BUILD",
"new_path": "pkg/sentry/syscalls/linux/BUILD",
"diff": "@@ -42,6 +42,7 @@ go_library(\n\"sys_rlimit.go\",\n\"sys_rusage.go\",\n\"sys_sched.go\",\n+ \"sys_seccomp.go\",\n\"sys_sem.go\",\n\"sys_shm.go\",\n\"sys_signal.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64.go",
"diff": "@@ -359,6 +359,7 @@ var AMD64 = &kernel.SyscallTable{\n312: syscalls.CapError(linux.CAP_SYS_PTRACE), // Kcmp, requires cap_sys_ptrace\n313: syscalls.CapError(linux.CAP_SYS_MODULE), // FinitModule, requires cap_sys_module\n// \"Backports.\"\n+ 317: Seccomp,\n318: GetRandom,\n},\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_prctl.go",
"new_path": "pkg/sentry/syscalls/linux/sys_prctl.go",
"diff": "@@ -18,29 +18,13 @@ import (\n\"syscall\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n- \"gvisor.googlesource.com/gvisor/pkg/bpf\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs\"\n- \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n)\n-// userSockFprog is equivalent to Linux's struct sock_fprog on amd64.\n-type userSockFprog struct {\n- // Len is the length of the filter in BPF instructions.\n- Len uint16\n-\n- _ [6]byte // padding for alignment\n-\n- // Filter is a user pointer to the struct sock_filter array that makes up\n- // the filter program. Filter is a uint64 rather than a usermem.Addr\n- // because usermem.Addr is actually uintptr, which is not a fixed-size\n- // type, and encoding/binary.Read objects to this.\n- Filter uint64\n-}\n-\n// Prctl implements linux syscall prctl(2).\n// It has a list of subfunctions which operate on the process. The arguments are\n// all based on each subfunction.\n@@ -143,20 +127,8 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n// Unsupported mode.\nreturn 0, nil, syscall.EINVAL\n}\n- var fprog userSockFprog\n- if _, err := t.CopyIn(args[2].Pointer(), &fprog); err != nil {\n- return 0, nil, err\n- }\n- filter := make([]linux.BPFInstruction, int(fprog.Len))\n- if _, err := t.CopyIn(usermem.Addr(fprog.Filter), &filter); err != nil {\n- return 0, nil, err\n- }\n- compiledFilter, err := bpf.Compile(filter)\n- if err != nil {\n- t.Debugf(\"Invalid seccomp-bpf filter: %v\", err)\n- return 0, nil, syscall.EINVAL\n- }\n- return 0, nil, t.AppendSyscallFilter(compiledFilter)\n+\n+ return 0, nil, seccomp(t, linux.SECCOMP_SET_MODE_FILTER, 0, args[2].Pointer())\ncase linux.PR_GET_SECCOMP:\nreturn uintptr(t.SeccompMode()), nil, nil\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/syscalls/linux/sys_seccomp.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+import (\n+ \"syscall\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/bpf\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n+)\n+\n+// userSockFprog is equivalent to Linux's struct sock_fprog on amd64.\n+type userSockFprog struct {\n+ // Len is the length of the filter in BPF instructions.\n+ Len uint16\n+\n+ _ [6]byte // padding for alignment\n+\n+ // Filter is a user pointer to the struct sock_filter array that makes up\n+ // the filter program. Filter is a uint64 rather than a usermem.Addr\n+ // because usermem.Addr is actually uintptr, which is not a fixed-size\n+ // type, and encoding/binary.Read objects to this.\n+ Filter uint64\n+}\n+\n+// seccomp applies a seccomp policy to the current task.\n+func seccomp(t *kernel.Task, mode, flags uint64, addr usermem.Addr) error {\n+ // We only support SECCOMP_SET_MODE_FILTER at the moment.\n+ if mode != linux.SECCOMP_SET_MODE_FILTER {\n+ // Unsupported mode.\n+ return syscall.EINVAL\n+ }\n+\n+ tsync := flags&linux.SECCOMP_FILTER_FLAG_TSYNC != 0\n+\n+ // The only flag we support now is SECCOMP_FILTER_FLAG_TSYNC.\n+ if flags&^linux.SECCOMP_FILTER_FLAG_TSYNC != 0 {\n+ // Unsupported flag.\n+ return syscall.EINVAL\n+ }\n+\n+ var fprog userSockFprog\n+ if _, err := t.CopyIn(addr, &fprog); err != nil {\n+ return err\n+ }\n+ filter := make([]linux.BPFInstruction, int(fprog.Len))\n+ if _, err := t.CopyIn(usermem.Addr(fprog.Filter), &filter); err != nil {\n+ return err\n+ }\n+ compiledFilter, err := bpf.Compile(filter)\n+ if err != nil {\n+ t.Debugf(\"Invalid seccomp-bpf filter: %v\", err)\n+ return syscall.EINVAL\n+ }\n+\n+ err = t.AppendSyscallFilter(compiledFilter)\n+ if err == nil && tsync {\n+ // Now we must copy this seccomp program to all other threads.\n+ err = t.SyncSyscallFiltersToThreadGroup()\n+ }\n+ return err\n+}\n+\n+// Seccomp implements linux syscall seccomp(2).\n+func Seccomp(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ return 0, nil, seccomp(t, args[0].Uint64(), args[1].Uint64(), args[2].Pointer())\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add seccomp(2) support.
Add support for the seccomp syscall and the flag SECCOMP_FILTER_FLAG_TSYNC.
PiperOrigin-RevId: 207101507
Change-Id: I5eb8ba9d5ef71b0e683930a6429182726dc23175 |
259,992 | 02.08.2018 12:40:29 | 25,200 | 4c1167de4ee2aa7b71729ff8b1c742b4183168d1 | Isolate image pulling time from container startup
mysql image test is timing out sporadically and it's hard to tell
where the slow down in coming from. | [
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/image_test.go",
"new_path": "runsc/test/image/image_test.go",
"diff": "@@ -82,6 +82,9 @@ func testHTTPServer(port int) error {\n}\nfunc TestHttpd(t *testing.T) {\n+ if out, err := testutil.Pull(\"httpd\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\\nout: %s\", err, out)\n+ }\nd := testutil.MakeDocker(\"http-test\")\ndir, err := testutil.PrepareFiles(\"latin10k.txt\")\n@@ -112,6 +115,9 @@ func TestHttpd(t *testing.T) {\n}\nfunc TestNginx(t *testing.T) {\n+ if out, err := testutil.Pull(\"nginx\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\\nout: %s\", err, out)\n+ }\nd := testutil.MakeDocker(\"net-test\")\ndir, err := testutil.PrepareFiles(\"latin10k.txt\")\n@@ -142,6 +148,9 @@ func TestNginx(t *testing.T) {\n}\nfunc TestMysql(t *testing.T) {\n+ if out, err := testutil.Pull(\"mysql\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\\nout: %s\", err, out)\n+ }\nd := testutil.MakeDocker(\"mysql-test\")\n// Start the container.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/python_test.go",
"new_path": "runsc/test/image/python_test.go",
"diff": "@@ -24,6 +24,9 @@ import (\n)\nfunc TestPythonHello(t *testing.T) {\n+ if out, err := testutil.Pull(\"google/python-hello\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\\nout: %s\", err, out)\n+ }\nd := testutil.MakeDocker(\"python-hello-test\")\nif out, err := d.Run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\nt.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/tomcat_test.go",
"new_path": "runsc/test/image/tomcat_test.go",
"diff": "@@ -24,6 +24,9 @@ import (\n)\nfunc TestTomcat(t *testing.T) {\n+ if out, err := testutil.Pull(\"tomcat:8.0\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\\nout: %s\", err, out)\n+ }\nd := testutil.MakeDocker(\"tomcat-test\")\nif out, err := d.Run(\"-p\", \"8080\", \"tomcat:8.0\"); err != nil {\nt.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/docker.go",
"new_path": "runsc/test/testutil/docker.go",
"diff": "@@ -94,6 +94,24 @@ func getLocalPath(file string) string {\nreturn path.Join(\".\", file)\n}\n+// do executes docker command.\n+func do(args ...string) (string, error) {\n+ fmt.Printf(\"Running: docker %s\\n\", args)\n+ cmd := exec.Command(\"docker\", args...)\n+ out, err := cmd.CombinedOutput()\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"error executing docker %s: %v\", args, err)\n+ }\n+ return string(out), nil\n+}\n+\n+// Pull pulls a docker image. This is used in tests to isolate the\n+// time to pull the image off the network from the time to actually\n+// start the container, to avoid timeouts over slow networks.\n+func Pull(image string) (string, error) {\n+ return do(\"pull\", image)\n+}\n+\n// Docker contains the name and the runtime of a docker container.\ntype Docker struct {\nRuntime string\n@@ -107,30 +125,19 @@ func MakeDocker(namePrefix string) Docker {\nreturn Docker{Name: namePrefix + suffix, Runtime: runtime()}\n}\n-// Do executes docker command.\n-func (d *Docker) Do(args ...string) (string, error) {\n- fmt.Printf(\"Running: docker %s\\n\", args)\n- cmd := exec.Command(\"docker\", args...)\n- out, err := cmd.CombinedOutput()\n- if err != nil {\n- return \"\", fmt.Errorf(\"error executing docker %s: %v\", args, err)\n- }\n- return string(out), nil\n-}\n-\n// Run calls 'docker run' with the arguments provided.\nfunc (d *Docker) Run(args ...string) (string, error) {\na := []string{\"run\", \"--runtime\", d.Runtime, \"--name\", d.Name, \"-d\"}\na = append(a, args...)\n- return d.Do(a...)\n+ return do(a...)\n}\n// CleanUp kills and deletes the container.\nfunc (d *Docker) CleanUp() error {\n- if _, err := d.Do(\"kill\", d.Name); err != nil {\n+ if _, err := do(\"kill\", d.Name); err != nil {\nreturn fmt.Errorf(\"error killing container %q: %v\", d.Name, err)\n}\n- if _, err := d.Do(\"rm\", d.Name); err != nil {\n+ if _, err := do(\"rm\", d.Name); err != nil {\nreturn fmt.Errorf(\"error deleting container %q: %v\", d.Name, err)\n}\nreturn nil\n@@ -140,7 +147,7 @@ func (d *Docker) CleanUp() error {\n// docker to allocate a free port in the host and prevent conflicts.\nfunc (d *Docker) FindPort(sandboxPort int) (int, error) {\nformat := fmt.Sprintf(`{{ (index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort }}`, sandboxPort)\n- out, err := d.Do(\"inspect\", \"-f\", format, d.Name)\n+ out, err := do(\"inspect\", \"-f\", format, d.Name)\nif err != nil {\nreturn -1, fmt.Errorf(\"error retrieving port: %v\", err)\n}\n@@ -158,7 +165,7 @@ func (d *Docker) WaitForOutput(pattern string, timeout time.Duration) error {\nvar out string\nfor exp := time.Now().Add(timeout); time.Now().Before(exp); {\nvar err error\n- out, err = d.Do(\"logs\", d.Name)\n+ out, err = do(\"logs\", d.Name)\nif err != nil {\nreturn err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Isolate image pulling time from container startup
mysql image test is timing out sporadically and it's hard to tell
where the slow down in coming from.
PiperOrigin-RevId: 207147237
Change-Id: I05a4d2c116292695d63cf861f3b89cd1c54b6106 |
259,881 | 02.08.2018 15:55:19 | 25,200 | b6a37ab9d96b382e26e3836a42ea485c48a521a8 | Update comment reference | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_prctl.go",
"new_path": "pkg/sentry/syscalls/linux/sys_prctl.go",
"diff": "@@ -113,7 +113,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nreturn 0, nil, syscall.EINVAL\n}\n// no_new_privs is assumed to always be set. See\n- // auth.Credentials.UpdateForExec.\n+ // kernel.Task.updateCredsForExec.\nreturn 0, nil, nil\ncase linux.PR_GET_NO_NEW_PRIVS:\n"
}
] | Go | Apache License 2.0 | google/gvisor | Update comment reference
PiperOrigin-RevId: 207180809
Change-Id: I08c264812919e81b2c56fdd4a9ef06924de8b52f |
259,881 | 02.08.2018 16:00:29 | 25,200 | a3927157c56cc022cefebc30c8a9b6014f5d0412 | Copy creds in access | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_file.go",
"new_path": "pkg/sentry/syscalls/linux/sys_file.go",
"diff": "@@ -453,7 +453,7 @@ func accessAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, resolve bool, m\n// uid/gid. We do this by temporarily clearing all FS-related\n// capabilities and switching the fsuid/fsgid around to the\n// real ones.\" -fs/open.c:faccessat\n- creds := t.Credentials()\n+ creds := t.Credentials().Fork()\ncreds.EffectiveKUID = creds.RealKUID\ncreds.EffectiveKGID = creds.RealKGID\nif creds.EffectiveKUID.In(creds.UserNamespace) == auth.RootUID {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Copy creds in access
PiperOrigin-RevId: 207181631
Change-Id: Ic6205278715a9260fb970efb414fc758ea72c4c6 |
259,948 | 03.08.2018 12:07:57 | 25,200 | 25178ebdf5e881eae8e81eaf2f69d96de42d2250 | stateify: make explicit mode no longer optional. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/host/socket.go",
"new_path": "pkg/sentry/fs/host/socket.go",
"diff": "@@ -37,7 +37,7 @@ import (\n//\n// +stateify savable\ntype endpoint struct {\n- queue waiter.Queue `state:\"nosave\"`\n+ queue waiter.Queue `state:\"zerovalue\"`\n// stype is the type of Unix socket. (Ex: unix.SockStream,\n// unix.SockSeqpacket, unix.SockDgram)\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_stateify/defs.bzl",
"new_path": "tools/go_stateify/defs.bzl",
"diff": "\"\"\"Stateify is a tool for generating state wrappers for Go types.\n-The go_stateify rule is used to generate a file that will appear in a Go\n-target; the output file should appear explicitly in a srcs list. For example:\n+The recommended way is to use the go_library rule defined below with mostly\n+identical configuration as the native go_library rule.\n+\n+load(\"//tools/go_stateify:defs.bzl\", \"go_library\")\n+\n+go_library(\n+ name = \"foo\",\n+ srcs = [\"foo.go\"],\n+)\n+\n+Under the hood, the go_stateify rule is used to generate a file that will\n+appear in a Go target; the output file should appear explicitly in a srcs list.\n+For example (the above is still the preferred way):\n+\n+load(\"//tools/go_stateify:defs.bzl\", \"go_stateify\")\ngo_stateify(\nname = \"foo_state\",\n@@ -35,8 +48,6 @@ def _go_stateify_impl(ctx):\nargs += [\"-statepkg=%s\" % ctx.attr._statepkg]\nif ctx.attr.imports:\nargs += [\"-imports=%s\" % \",\".join(ctx.attr.imports)]\n- if ctx.attr.explicit:\n- args += [\"-explicit=true\"]\nargs += [\"--\"]\nfor src in ctx.attr.srcs:\nargs += [f.path for f in src.files]\n@@ -57,7 +68,6 @@ def _go_stateify_impl(ctx):\n# imports: an optional list of extra non-aliased, Go-style absolute import paths.\n# out: the name of the generated file output. This must not conflict with any other files and must be added to the srcs of the relevant go_library.\n# package: the package name for the input sources.\n-# explicit: only generate for types explicitly annotated as savable.\ngo_stateify = rule(\nimplementation = _go_stateify_impl,\nattrs = {\n@@ -65,7 +75,6 @@ go_stateify = rule(\n\"imports\": attr.string_list(mandatory = False),\n\"package\": attr.string(mandatory = True),\n\"out\": attr.output(mandatory = True),\n- \"explicit\": attr.bool(default = False),\n\"_tool\": attr.label(executable = True, cfg = \"host\", default = Label(\"//tools/go_stateify:stateify\")),\n\"_statepkg\": attr.string(default = \"gvisor.googlesource.com/gvisor/pkg/state\"),\n},\n@@ -81,7 +90,6 @@ def go_library(name, srcs, deps = [], imports = [], **kwargs):\nimports = imports,\npackage = name,\nout = name + \"_state_autogen.go\",\n- explicit = True,\n)\nall_srcs = srcs + [name + \"_state_autogen.go\"]\nif \"//pkg/state\" not in deps:\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/go_stateify/main.go",
"new_path": "tools/go_stateify/main.go",
"diff": "@@ -33,7 +33,6 @@ var (\nimports = flag.String(\"imports\", \"\", \"extra imports for the output file\")\noutput = flag.String(\"output\", \"\", \"output file\")\nstatePkg = flag.String(\"statepkg\", \"\", \"state import package; defaults to empty\")\n- explicit = flag.Bool(\"explicit\", false, \"only generate for types explicitly tagged '// +stateify savable'\")\n)\n// resolveTypeName returns a qualified type name.\n@@ -318,11 +317,9 @@ func main() {\ncontinue\n}\n- if *explicit {\n- // In explicit mode, only generate code for\n- // types explicitly marked\n- // \"// +stateify savable\" in one of the\n- // proceeding comment lines.\n+ // Only generate code for types marked\n+ // \"// +stateify savable\" in one of the proceeding\n+ // comment lines.\nif d.Doc == nil {\ncontinue\n}\n@@ -336,7 +333,6 @@ func main() {\nif !savable {\ncontinue\n}\n- }\nfor _, gs := range d.Specs {\nts := gs.(*ast.TypeSpec)\n"
}
] | Go | Apache License 2.0 | google/gvisor | stateify: make explicit mode no longer optional.
PiperOrigin-RevId: 207303405
Change-Id: I17b6433963d78e3631a862b7ac80f566c8e7d106 |
259,962 | 03.08.2018 17:53:24 | 25,200 | 56fa562dda18260440726a37ea467f6eb6aa6c12 | Cubic implementation for Netstack.
This CL implements CUBIC as described in | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/BUILD",
"new_path": "pkg/tcpip/transport/tcp/BUILD",
"diff": "@@ -19,6 +19,7 @@ go_library(\nsrcs = [\n\"accept.go\",\n\"connect.go\",\n+ \"cubic.go\",\n\"endpoint.go\",\n\"endpoint_state.go\",\n\"forwarder.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/tcpip/transport/tcp/cubic.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp\n+\n+import (\n+ \"math\"\n+ \"time\"\n+)\n+\n+// cubicState stores the variables related to TCP CUBIC congestion\n+// control algorithm state.\n+//\n+// See: https://tools.ietf.org/html/rfc8312.\n+type cubicState struct {\n+ // wLastMax is the previous wMax value.\n+ wLastMax float64\n+\n+ // wMax is the value of the congestion window at the\n+ // time of last congestion event.\n+ wMax float64\n+\n+ // t denotes the time when the current congestion avoidance\n+ // was entered.\n+ t time.Time\n+\n+ // numCongestionEvents tracks the number of congestion events since last\n+ // RTO.\n+ numCongestionEvents int\n+\n+ // c is the cubic constant as specified in RFC8312. It's fixed at 0.4 as\n+ // per RFC.\n+ c float64\n+\n+ // k is the time period that the above function takes to increase the\n+ // current window size to W_max if there are no further congestion\n+ // events and is calculated using the following equation:\n+ //\n+ // K = cubic_root(W_max*(1-beta_cubic)/C) (Eq. 2)\n+ k float64\n+\n+ // beta is the CUBIC multiplication decrease factor. that is, when a\n+ // congestion event is detected, CUBIC reduces its cwnd to\n+ // W_cubic(0)=W_max*beta_cubic.\n+ beta float64\n+\n+ // wC is window computed by CUBIC at time t. It's calculated using the\n+ // formula:\n+ //\n+ // W_cubic(t) = C*(t-K)^3 + W_max (Eq. 1)\n+ wC float64\n+\n+ // wEst is the window computed by CUBIC at time t+RTT i.e\n+ // W_cubic(t+RTT).\n+ wEst float64\n+\n+ s *sender\n+}\n+\n+// newCubicCC returns a partially initialized cubic state with the constants\n+// beta and c set and t set to current time.\n+func newCubicCC(s *sender) *cubicState {\n+ return &cubicState{\n+ t: time.Now(),\n+ beta: 0.7,\n+ c: 0.4,\n+ s: s,\n+ }\n+}\n+\n+// enterCongestionAvoidance is used to initialize cubic in cases where we exit\n+// SlowStart without a real congestion event taking place. This can happen when\n+// a connection goes back to slow start due to a retransmit and we exceed the\n+// previously lowered ssThresh without experiencing packet loss.\n+//\n+// Refer: https://tools.ietf.org/html/rfc8312#section-4.8\n+func (c *cubicState) enterCongestionAvoidance() {\n+ // See: https://tools.ietf.org/html/rfc8312#section-4.7 &\n+ // https://tools.ietf.org/html/rfc8312#section-4.8\n+ if c.numCongestionEvents == 0 {\n+ c.k = 0\n+ c.t = time.Now()\n+ c.wLastMax = c.wMax\n+ c.wMax = float64(c.s.sndCwnd)\n+ }\n+}\n+\n+// updateSlowStart will update the congestion window as per the slow-start\n+// algorithm used by NewReno. If after adjusting the congestion window we cross\n+// the ssThresh then it will return the number of packets that must be consumed\n+// in congestion avoidance mode.\n+func (c *cubicState) updateSlowStart(packetsAcked int) int {\n+ // Don't let the congestion window cross into the congestion\n+ // avoidance range.\n+ newcwnd := c.s.sndCwnd + packetsAcked\n+ enterCA := false\n+ if newcwnd >= c.s.sndSsthresh {\n+ newcwnd = c.s.sndSsthresh\n+ c.s.sndCAAckCount = 0\n+ enterCA = true\n+ }\n+\n+ packetsAcked -= newcwnd - c.s.sndCwnd\n+ c.s.sndCwnd = newcwnd\n+ if enterCA {\n+ c.enterCongestionAvoidance()\n+ }\n+ return packetsAcked\n+}\n+\n+// Update updates cubic's internal state variables. It must be called on every\n+// ACK received.\n+// Refer: https://tools.ietf.org/html/rfc8312#section-4\n+func (c *cubicState) Update(packetsAcked int) {\n+ if c.s.sndCwnd < c.s.sndSsthresh {\n+ packetsAcked = c.updateSlowStart(packetsAcked)\n+ if packetsAcked == 0 {\n+ return\n+ }\n+ } else {\n+ c.s.sndCwnd = c.getCwnd(packetsAcked, c.s.sndCwnd, c.s.srtt)\n+ }\n+}\n+\n+// cubicCwnd computes the CUBIC congestion window after t seconds from last\n+// congestion event.\n+func (c *cubicState) cubicCwnd(t float64) float64 {\n+ return c.c*math.Pow(t, 3.0) + c.wMax\n+}\n+\n+// getCwnd returns the current congestion window as computed by CUBIC.\n+// Refer: https://tools.ietf.org/html/rfc8312#section-4\n+func (c *cubicState) getCwnd(packetsAcked, sndCwnd int, srtt time.Duration) int {\n+ elapsed := time.Since(c.t).Seconds()\n+\n+ // Compute the window as per Cubic after 'elapsed' time\n+ // since last congestion event.\n+ c.wC = c.cubicCwnd(elapsed - c.k)\n+\n+ // Compute the TCP friendly estimate of the congestion window.\n+ c.wEst = c.wMax*c.beta + (3.0*((1.0-c.beta)/(1.0+c.beta)))*(elapsed/srtt.Seconds())\n+\n+ // Make sure in the TCP friendly region CUBIC performs at least\n+ // as well as Reno.\n+ if c.wC < c.wEst && float64(sndCwnd) < c.wEst {\n+ // TCP Friendly region of cubic.\n+ return int(c.wEst)\n+ }\n+\n+ // In Concave/Convex region of CUBIC, calculate what CUBIC window\n+ // will be after 1 RTT and use that to grow congestion window\n+ // for every ack.\n+ tEst := (time.Since(c.t) + srtt).Seconds()\n+ wtRtt := c.cubicCwnd(tEst - c.k)\n+ // As per 4.3 for each received ACK cwnd must be incremented\n+ // by (w_cubic(t+RTT) - cwnd/cwnd.\n+ cwnd := float64(sndCwnd)\n+ for i := 0; i < packetsAcked; i++ {\n+ // Concave/Convex regions of cubic have the same formulas.\n+ // See: https://tools.ietf.org/html/rfc8312#section-4.3\n+ cwnd += (wtRtt - cwnd) / cwnd\n+ }\n+ return int(cwnd)\n+}\n+\n+// HandleNDupAcks implements congestionControl.HandleNDupAcks.\n+func (c *cubicState) HandleNDupAcks() {\n+ // See: https://tools.ietf.org/html/rfc8312#section-4.5\n+ c.numCongestionEvents++\n+ c.t = time.Now()\n+ c.wLastMax = c.wMax\n+ c.wMax = float64(c.s.sndCwnd)\n+\n+ c.fastConvergence()\n+ c.reduceSlowStartThreshold()\n+}\n+\n+// HandleRTOExpired implements congestionContrl.HandleRTOExpired.\n+func (c *cubicState) HandleRTOExpired() {\n+ // See: https://tools.ietf.org/html/rfc8312#section-4.6\n+ c.t = time.Now()\n+ c.numCongestionEvents = 0\n+ c.wLastMax = c.wMax\n+ c.wMax = float64(c.s.sndCwnd)\n+\n+ c.fastConvergence()\n+\n+ // We lost a packet, so reduce ssthresh.\n+ c.reduceSlowStartThreshold()\n+\n+ // Reduce the congestion window to 1, i.e., enter slow-start. Per\n+ // RFC 5681, page 7, we must use 1 regardless of the value of the\n+ // initial congestion window.\n+ c.s.sndCwnd = 1\n+}\n+\n+// fastConvergence implements the logic for Fast Convergence algorithm as\n+// described in https://tools.ietf.org/html/rfc8312#section-4.6.\n+func (c *cubicState) fastConvergence() {\n+ if c.wMax < c.wLastMax {\n+ c.wLastMax = c.wMax\n+ c.wMax = c.wMax * (1.0 + c.beta) / 2.0\n+ } else {\n+ c.wLastMax = c.wMax\n+ }\n+ // Recompute k as wMax may have changed.\n+ c.k = math.Cbrt(c.wMax * (1 - c.beta) / c.c)\n+}\n+\n+// PostRecovery implemements congestionControl.PostRecovery.\n+func (c *cubicState) PostRecovery() {\n+ c.t = time.Now()\n+}\n+\n+// reduceSlowStartThreshold returns new SsThresh as described in\n+// https://tools.ietf.org/html/rfc8312#section-4.7.\n+func (c *cubicState) reduceSlowStartThreshold() {\n+ c.s.sndSsthresh = int(math.Max(float64(c.s.sndCwnd)*c.beta, 2.0))\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -187,6 +187,10 @@ type endpoint struct {\nsndWaker sleep.Waker `state:\"manual\"`\nsndCloseWaker sleep.Waker `state:\"manual\"`\n+ // cc stores the name of the Congestion Control algorithm to use for\n+ // this endpoint.\n+ cc CongestionControlOption\n+\n// The following are used when a \"packet too big\" control packet is\n// received. They are protected by sndBufMu. They are used to\n// communicate to the main protocol goroutine how many such control\n@@ -254,6 +258,11 @@ func newEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waite\ne.rcvBufSize = rs.Default\n}\n+ var cs CongestionControlOption\n+ if err := stack.TransportProtocolOption(ProtocolNumber, &cs); err == nil {\n+ e.cc = cs\n+ }\n+\nif p := stack.GetTCPProbe(); p != nil {\ne.probe = p\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/protocol.go",
"new_path": "pkg/tcpip/transport/tcp/protocol.go",
"diff": "@@ -69,6 +69,11 @@ type ReceiveBufferSizeOption struct {\nMax int\n}\n+const (\n+ ccReno = \"reno\"\n+ ccCubic = \"cubic\"\n+)\n+\n// CongestionControlOption sets the current congestion control algorithm.\ntype CongestionControlOption string\n@@ -227,8 +232,8 @@ func init() {\nreturn &protocol{\nsendBufferSize: SendBufferSizeOption{minBufferSize, DefaultBufferSize, maxBufferSize},\nrecvBufferSize: ReceiveBufferSizeOption{minBufferSize, DefaultBufferSize, maxBufferSize},\n- congestionControl: \"reno\",\n- availableCongestionControl: []string{\"reno\"},\n+ congestionControl: ccReno,\n+ availableCongestionControl: []string{ccReno, ccCubic},\n}\n})\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/reno.go",
"new_path": "pkg/tcpip/transport/tcp/reno.go",
"diff": "@@ -96,3 +96,8 @@ func (r *renoState) HandleRTOExpired() {\n// initial congestion window.\nr.s.sndCwnd = 1\n}\n+\n+// PostRecovery implements congestionControl.PostRecovery.\n+func (r *renoState) PostRecovery() {\n+ // noop.\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/snd.go",
"new_path": "pkg/tcpip/transport/tcp/snd.go",
"diff": "@@ -51,6 +51,11 @@ type congestionControl interface {\n// number of packet's that were acked by the most recent cumulative\n// acknowledgement.\nUpdate(packetsAcked int)\n+\n+ // PostRecovery is invoked when the sender is exiting a fast retransmit/\n+ // recovery phase. This provides congestion control algorithms a way\n+ // to adjust their state when exiting recovery.\n+ PostRecovery()\n}\n// sender holds the state necessary to send TCP segments.\n@@ -174,7 +179,7 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint\n},\n}\n- s.cc = newRenoCC(s)\n+ s.cc = s.initCongestionControl(ep.cc)\n// A negative sndWndScale means that no scaling is in use, otherwise we\n// store the scaling value.\n@@ -189,6 +194,17 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint\nreturn s\n}\n+func (s *sender) initCongestionControl(congestionControlName CongestionControlOption) congestionControl {\n+ switch congestionControlName {\n+ case ccCubic:\n+ return newCubicCC(s)\n+ case ccReno:\n+ fallthrough\n+ default:\n+ return newRenoCC(s)\n+ }\n+}\n+\n// updateMaxPayloadSize updates the maximum payload size based on the given\n// MTU. If this is in response to \"packet too big\" control packets (indicated\n// by the count argument), it also reduces the number of outstanding packets and\n@@ -409,6 +425,7 @@ func (s *sender) sendData() {\n}\nfunc (s *sender) enterFastRecovery() {\n+ s.fr.active = true\n// Save state to reflect we're now in fast recovery.\n// See : https://tools.ietf.org/html/rfc5681#section-3.2 Step 3.\n// We inflat the cwnd by 3 to account for the 3 packets which triggered\n@@ -417,7 +434,6 @@ func (s *sender) enterFastRecovery() {\ns.fr.first = s.sndUna\ns.fr.last = s.sndNxt - 1\ns.fr.maxCwnd = s.sndCwnd + s.outstanding\n- s.fr.active = true\n}\nfunc (s *sender) leaveFastRecovery() {\n@@ -429,12 +445,13 @@ func (s *sender) leaveFastRecovery() {\n// Deflate cwnd. It had been artificially inflated when new dups arrived.\ns.sndCwnd = s.sndSsthresh\n+ s.cc.PostRecovery()\n}\n// checkDuplicateAck is called when an ack is received. It manages the state\n// related to duplicate acks and determines if a retransmit is needed according\n// to the rules in RFC 6582 (NewReno).\n-func (s *sender) checkDuplicateAck(seg *segment) bool {\n+func (s *sender) checkDuplicateAck(seg *segment) (rtx bool) {\nack := seg.ackNumber\nif s.fr.active {\n// We are in fast recovery mode. Ignore the ack if it's out of\n@@ -474,6 +491,7 @@ func (s *sender) checkDuplicateAck(seg *segment) bool {\n//\n// N.B. The retransmit timer will be reset by the caller.\ns.fr.first = ack\n+ s.dupAckCount = 0\nreturn true\n}\n@@ -508,16 +526,11 @@ func (s *sender) checkDuplicateAck(seg *segment) bool {\nreturn true\n}\n-// updateCwnd updates the congestion window based on the number of packets that\n-// were acknowledged.\n-func (s *sender) updateCwnd(packetsAcked int) {\n-}\n-\n// handleRcvdSegment is called when a segment is received; it is responsible for\n// updating the send-related state.\nfunc (s *sender) handleRcvdSegment(seg *segment) {\n// Check if we can extract an RTT measurement from this ack.\n- if s.rttMeasureSeqNum.LessThan(seg.ackNumber) {\n+ if !s.ep.sendTSOk && s.rttMeasureSeqNum.LessThan(seg.ackNumber) {\ns.updateRTO(time.Now().Sub(s.rttMeasureTime))\ns.rttMeasureSeqNum = s.sndNxt\n}\n@@ -534,10 +547,25 @@ func (s *sender) handleRcvdSegment(seg *segment) {\n// Ignore ack if it doesn't acknowledge any new data.\nack := seg.ackNumber\nif (ack - 1).InRange(s.sndUna, s.sndNxt) {\n+ s.dupAckCount = 0\n// When an ack is received we must reset the timer. We stop it\n// here and it will be restarted later if needed.\ns.resendTimer.disable()\n+ // See : https://tools.ietf.org/html/rfc1323#section-3.3.\n+ // Specifically we should only update the RTO using TSEcr if the\n+ // following condition holds:\n+ //\n+ // A TSecr value received in a segment is used to update the\n+ // averaged RTT measurement only if the segment acknowledges\n+ // some new data, i.e., only if it advances the left edge of\n+ // the send window.\n+ if s.ep.sendTSOk && seg.parsedOptions.TSEcr != 0 {\n+ // TSVal/Ecr values sent by Netstack are at a millisecond\n+ // granularity.\n+ elapsed := time.Duration(s.ep.timestamp()-seg.parsedOptions.TSEcr) * time.Millisecond\n+ s.updateRTO(elapsed)\n+ }\n// Remove all acknowledged data from the write list.\nacked := s.sndUna.Size(ack)\ns.sndUna = ack\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"new_path": "pkg/tcpip/transport/tcp/tcp_test.go",
"diff": "@@ -17,6 +17,7 @@ package tcp_test\nimport (\n\"bytes\"\n\"fmt\"\n+ \"math\"\n\"testing\"\n\"time\"\n@@ -2005,7 +2006,7 @@ func TestCongestionAvoidance(t *testing.T) {\n// Check we don't receive any more packets on this iteration.\n// The timeout can't be too high or we'll trigger a timeout.\n- c.CheckNoPacketTimeout(\"More packets received than expected for this cwnd.\", 50*time.Millisecond)\n+ c.CheckNoPacketTimeout(\"More packets received than expected for this cwnd (slow start phase).\", 50*time.Millisecond)\n}\n// Don't acknowledge the first packet of the last packet train. Let's\n@@ -2043,7 +2044,7 @@ func TestCongestionAvoidance(t *testing.T) {\n// Check we don't receive any more packets on this iteration.\n// The timeout can't be too high or we'll trigger a timeout.\n- c.CheckNoPacketTimeout(\"More packets received than expected for this cwnd.\", 50*time.Millisecond)\n+ c.CheckNoPacketTimeout(\"More packets received than expected for this cwnd (congestion avoidance phase).\", 50*time.Millisecond)\n// Acknowledge all the data received so far.\nc.SendAck(790, bytesRead)\n@@ -2054,6 +2055,130 @@ func TestCongestionAvoidance(t *testing.T) {\n}\n}\n+// cubicCwnd returns an estimate of a cubic window given the\n+// originalCwnd, wMax, last congestion event time and sRTT.\n+func cubicCwnd(origCwnd int, wMax int, congEventTime time.Time, sRTT time.Duration) int {\n+ cwnd := float64(origCwnd)\n+ // We wait 50ms between each iteration so sRTT as computed by cubic\n+ // should be close to 50ms.\n+ elapsed := (time.Since(congEventTime) + sRTT).Seconds()\n+ k := math.Cbrt(float64(wMax) * 0.3 / 0.7)\n+ wtRTT := 0.4*math.Pow(elapsed-k, 3) + float64(wMax)\n+ cwnd += (wtRTT - cwnd) / cwnd\n+ return int(cwnd)\n+}\n+\n+func TestCubicCongestionAvoidance(t *testing.T) {\n+ maxPayload := 10\n+ c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))\n+ defer c.Cleanup()\n+\n+ enableCUBIC(t, c)\n+\n+ c.CreateConnected(789, 30000, nil)\n+\n+ const iterations = 7\n+ data := buffer.NewView(2 * maxPayload * (tcp.InitialCwnd << (iterations + 1)))\n+\n+ for i := range data {\n+ data[i] = byte(i)\n+ }\n+\n+ // Write all the data in one shot. Packets will only be written at the\n+ // MTU size though.\n+ if _, err := c.EP.Write(tcpip.SlicePayload(data), tcpip.WriteOptions{}); err != nil {\n+ t.Fatalf(\"Unexpected error from Write: %v\", err)\n+ }\n+\n+ // Do slow start for a few iterations.\n+ expected := tcp.InitialCwnd\n+ bytesRead := 0\n+ for i := 0; i < iterations; i++ {\n+ expected = tcp.InitialCwnd << uint(i)\n+ if i > 0 {\n+ // Acknowledge all the data received so far if not on\n+ // first iteration.\n+ c.SendAck(790, bytesRead)\n+ }\n+\n+ // Read all packets expected on this iteration. Don't\n+ // acknowledge any of them just yet, so that we can measure the\n+ // congestion window.\n+ for j := 0; j < expected; j++ {\n+ c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)\n+ bytesRead += maxPayload\n+ }\n+\n+ // Check we don't receive any more packets on this iteration.\n+ // The timeout can't be too high or we'll trigger a timeout.\n+ c.CheckNoPacketTimeout(\"More packets received than expected for this cwnd (during slow-start phase).\", 50*time.Millisecond)\n+ }\n+\n+ // Don't acknowledge the first packet of the last packet train. Let's\n+ // wait for them to time out, which will trigger a restart of slow\n+ // start, and initialization of ssthresh to cwnd * 0.7.\n+ rtxOffset := bytesRead - maxPayload*expected\n+ c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload)\n+\n+ // Acknowledge all pending data.\n+ c.SendAck(790, bytesRead)\n+\n+ // Store away the time we sent the ACK and assuming a 200ms RTO\n+ // we estimate that the sender will have an RTO 200ms from now\n+ // and go back into slow start.\n+ packetDropTime := time.Now().Add(200 * time.Millisecond)\n+\n+ // This part is tricky: when the timeout happened, we had \"expected\"\n+ // packets pending, cwnd reset to 1, and ssthresh set to expected * 0.7.\n+ // By acknowledging \"expected\" packets, the slow-start part will\n+ // increase cwnd to expected/2 essentially putting the connection\n+ // straight into congestion avoidance.\n+ wMax := expected\n+ // Lower expected as per cubic spec after a congestion event.\n+ expected = int(float64(expected) * 0.7)\n+ cwnd := expected\n+ for i := 0; i < iterations; i++ {\n+ // Cubic grows window independent of ACKs. Cubic Window growth\n+ // is a function of time elapsed since last congestion event.\n+ // As a result the congestion window does not grow\n+ // deterministically in response to ACKs.\n+ //\n+ // We need to roughly estimate what the cwnd of the sender is\n+ // based on when we sent the dupacks.\n+ cwnd := cubicCwnd(cwnd, wMax, packetDropTime, 50*time.Millisecond)\n+\n+ packetsExpected := cwnd\n+ for j := 0; j < packetsExpected; j++ {\n+ c.ReceiveAndCheckPacket(data, bytesRead, maxPayload)\n+ bytesRead += maxPayload\n+ }\n+ t.Logf(\"expected packets received, next trying to receive any extra packets that may come\")\n+\n+ // If our estimate was correct there should be no more pending packets.\n+ // We attempt to read a packet a few times with a short sleep in between\n+ // to ensure that we don't see the sender send any unexpected packets.\n+ packetsUnexpected := 0\n+ for {\n+ gotPacket := c.ReceiveNonBlockingAndCheckPacket(data, bytesRead, maxPayload)\n+ if !gotPacket {\n+ break\n+ }\n+ bytesRead += maxPayload\n+ packetsUnexpected++\n+ time.Sleep(1 * time.Millisecond)\n+ }\n+ if packetsUnexpected != 0 {\n+ t.Fatalf(\"received %d unexpected packets for iteration %d\", packetsUnexpected, i)\n+ }\n+ // Check we don't receive any more packets on this iteration.\n+ // The timeout can't be too high or we'll trigger a timeout.\n+ c.CheckNoPacketTimeout(\"More packets received than expected for this cwnd(congestion avoidance)\", 5*time.Millisecond)\n+\n+ // Acknowledge all the data received so far.\n+ c.SendAck(790, bytesRead)\n+ }\n+}\n+\nfunc TestFastRecovery(t *testing.T) {\nmaxPayload := 10\nc := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload))\n@@ -2864,8 +2989,9 @@ func TestSetCongestionControl(t *testing.T) {\nmustPass bool\n}{\n{\"reno\", true},\n- {\"cubic\", false},\n+ {\"cubic\", true},\n}\n+\nfor _, tc := range testCases {\nt.Run(fmt.Sprintf(\"SetTransportProtocolOption(.., %v)\", tc.cc), func(t *testing.T) {\nc := context.New(t, 1500)\n@@ -2881,7 +3007,7 @@ func TestSetCongestionControl(t *testing.T) {\nif err := s.TransportProtocolOption(tcp.ProtocolNumber, &cc); err != nil {\nt.Fatalf(\"s.TransportProtocolOption(%v, %v) = %v\", tcp.ProtocolNumber, &cc, err)\n}\n- if got, want := cc, tcp.CongestionControlOption(\"reno\"); got != want {\n+ if got, want := cc, tc.cc; got != want {\nt.Fatalf(\"unexpected value for congestion control got: %v, want: %v\", got, want)\n}\n})\n@@ -2899,7 +3025,7 @@ func TestAvailableCongestionControl(t *testing.T) {\nif err := s.TransportProtocolOption(tcp.ProtocolNumber, &aCC); err != nil {\nt.Fatalf(\"s.TransportProtocolOption(%v, %v) = %v\", tcp.ProtocolNumber, &aCC, err)\n}\n- if got, want := aCC, tcp.AvailableCongestionControlOption(\"reno\"); got != want {\n+ if got, want := aCC, tcp.AvailableCongestionControlOption(\"reno cubic\"); got != want {\nt.Fatalf(\"unexpected value for AvailableCongestionControlOption: got: %v, want: %v\", got, want)\n}\n}\n@@ -2917,11 +3043,19 @@ func TestSetAvailableCongestionControl(t *testing.T) {\n}\n// Verify that we still get the expected list of congestion control options.\n- var cc tcp.CongestionControlOption\n+ var cc tcp.AvailableCongestionControlOption\nif err := s.TransportProtocolOption(tcp.ProtocolNumber, &cc); err != nil {\nt.Fatalf(\"s.TransportProtocolOption(%v, %v) = %v\", tcp.ProtocolNumber, &cc, err)\n}\n- if got, want := cc, tcp.CongestionControlOption(\"reno\"); got != want {\n- t.Fatalf(\"unexpected value for congestion control got: %v, want: %v\", got, want)\n+ if got, want := cc, tcp.AvailableCongestionControlOption(\"reno cubic\"); got != want {\n+ t.Fatalf(\"unexpected value for available congestion control got: %v, want: %v\", got, want)\n+ }\n+}\n+\n+func enableCUBIC(t *testing.T, c *context.Context) {\n+ t.Helper()\n+ opt := tcp.CongestionControlOption(\"cubic\")\n+ if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, opt); err != nil {\n+ t.Fatalf(\"c.s.SetTransportProtocolOption(tcp.ProtocolNumber, %v = %v\", opt, err)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/testing/context/context.go",
"new_path": "pkg/tcpip/transport/tcp/testing/context/context.go",
"diff": "@@ -242,6 +242,27 @@ func (c *Context) GetPacket() []byte {\nreturn nil\n}\n+// GetPacketNonBlocking reads a packet from the link layer endpoint\n+// and verifies that it is an IPv4 packet with the expected source\n+// and destination address. If no packet is available it will return\n+// nil immediately.\n+func (c *Context) GetPacketNonBlocking() []byte {\n+ select {\n+ case p := <-c.linkEP.C:\n+ if p.Proto != ipv4.ProtocolNumber {\n+ c.t.Fatalf(\"Bad network protocol: got %v, wanted %v\", p.Proto, ipv4.ProtocolNumber)\n+ }\n+ b := make([]byte, len(p.Header)+len(p.Payload))\n+ copy(b, p.Header)\n+ copy(b[len(p.Header):], p.Payload)\n+\n+ checker.IPv4(c.t, b, checker.SrcAddr(StackAddr), checker.DstAddr(TestAddr))\n+ return b\n+ default:\n+ return nil\n+ }\n+}\n+\n// SendICMPPacket builds and sends an ICMPv4 packet via the link layer endpoint.\nfunc (c *Context) SendICMPPacket(typ header.ICMPv4Type, code uint8, p1, p2 []byte, maxTotalSize int) {\n// Allocate a buffer data and headers.\n@@ -355,6 +376,32 @@ func (c *Context) ReceiveAndCheckPacket(data []byte, offset, size int) {\n}\n}\n+// ReceiveNonBlockingAndCheckPacket reads a packet from the link layer endpoint\n+// and verifies that the packet packet payload of packet matches the slice of\n+// data indicated by offset & size. It returns true if a packet was received and\n+// processed.\n+func (c *Context) ReceiveNonBlockingAndCheckPacket(data []byte, offset, size int) bool {\n+ b := c.GetPacketNonBlocking()\n+ if b == nil {\n+ return false\n+ }\n+ checker.IPv4(c.t, b,\n+ checker.PayloadLen(size+header.TCPMinimumSize),\n+ checker.TCP(\n+ checker.DstPort(TestPort),\n+ checker.SeqNum(uint32(c.IRS.Add(seqnum.Size(1+offset)))),\n+ checker.AckNum(uint32(seqnum.Value(testInitialSequenceNumber).Add(1))),\n+ checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),\n+ ),\n+ )\n+\n+ pdata := data[offset:][:size]\n+ if p := b[header.IPv4MinimumSize+header.TCPMinimumSize:]; bytes.Compare(pdata, p) != 0 {\n+ c.t.Fatalf(\"Data is different: expected %v, got %v\", pdata, p)\n+ }\n+ return true\n+}\n+\n// CreateV6Endpoint creates and initializes c.ep as a IPv6 Endpoint. If v6Only\n// is true then it sets the IP_V6ONLY option on the socket to make it a IPv6\n// only endpoint instead of a default dual stack socket.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Cubic implementation for Netstack.
This CL implements CUBIC as described in https://tools.ietf.org/html/rfc8312.
PiperOrigin-RevId: 207353142
Change-Id: I329cbf3277f91127e99e488f07d906f6779c6603 |
259,989 | 06.08.2018 10:10:25 | 25,200 | 3ec074897f9d0aba21bc9f41be18f52bfbeb599e | Fix a bug in PCIDs.Assign
Store the new assigned pcid in p.cache[pt]. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/pagetables/pcids_x86.go",
"new_path": "pkg/sentry/platform/ring0/pagetables/pcids_x86.go",
"diff": "@@ -64,6 +64,7 @@ func (p *PCIDs) Assign(pt *PageTables) (uint16, bool) {\nif len(p.avail) > 0 {\npcid := p.avail[len(p.avail)-1]\np.avail = p.avail[:len(p.avail)-1]\n+ p.cache[pt] = pcid\n// We need to flush because while this is in the available\n// pool, it may have been used previously.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix a bug in PCIDs.Assign
Store the new assigned pcid in p.cache[pt].
Signed-off-by: ShiruRen <[email protected]>
Change-Id: I4aee4e06559e429fb5e90cb9fe28b36139e3b4b6
PiperOrigin-RevId: 207563833 |
259,992 | 06.08.2018 11:47:07 | 25,200 | bc9a1fca23870b21e16e024220e0c87e236c6cf5 | Tiny reordering to network code | [
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/network.go",
"new_path": "runsc/sandbox/network.go",
"diff": "@@ -221,12 +221,6 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string) error {\ncontinue\n}\n- // Get the link for the interface.\n- ifaceLink, err := netlink.LinkByName(iface.Name)\n- if err != nil {\n- return fmt.Errorf(\"error getting link for interface %q: %v\", iface.Name, err)\n- }\n-\n// Create the socket.\nconst protocol = 0x0300 // htons(ETH_P_ALL)\nfd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, protocol)\n@@ -238,7 +232,7 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string) error {\n// Bind to the appropriate device.\nll := syscall.SockaddrLinklayer{\nProtocol: protocol,\n- Ifindex: ifaceLink.Attrs().Index,\n+ Ifindex: iface.Index,\nHatype: 0, // No ARP type.\nPkttype: syscall.PACKET_OTHERHOST,\n}\n@@ -266,6 +260,12 @@ func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string) error {\nRoutes: routes,\n}\n+ // Get the link for the interface.\n+ ifaceLink, err := netlink.LinkByName(iface.Name)\n+ if err != nil {\n+ return fmt.Errorf(\"error getting link for interface %q: %v\", iface.Name, err)\n+ }\n+\n// Collect the addresses for the interface, enable forwarding,\n// and remove them from the host.\nfor _, addr := range ip4addrs {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Tiny reordering to network code
PiperOrigin-RevId: 207581723
Change-Id: I6e4eb1227b5ed302de5e6c891040b670955f1eea |
259,992 | 06.08.2018 18:07:15 | 25,200 | 9752174a7f211328c0ff59f8ed6c51325a6fc23d | Disable KVM dimension because it's making the test flaky | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -212,12 +212,13 @@ func configs(opts configOptions) []*boot.Config {\ncs = append(cs, c)\n}\n- // TODO: KVM doesn't work with --race.\n- if !testutil.RaceEnabled && opts&kvm != 0 {\n- c := testutil.TestConfig()\n- c.Platform = boot.PlatformKVM\n- cs = append(cs, c)\n- }\n+ // TODO: KVM tests are flaky. Disable until fixed.\n+ // // TODO: KVM doesn't work with --race.\n+ // if !testutil.RaceEnabled && opts&kvm != 0 {\n+ // c := testutil.TestConfig()\n+ // c.Platform = boot.PlatformKVM\n+ // cs = append(cs, c)\n+ // }\nreturn cs\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Disable KVM dimension because it's making the test flaky
PiperOrigin-RevId: 207642348
Change-Id: Iacec9f097ab93b91c0c8eea61b1347e864f57a8b |
260,013 | 07.08.2018 07:56:08 | 25,200 | d839dc13c689f853fd87c495c26208048a540919 | Netstack doesn't handle sending after SHUT_WR correctly. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/ping/endpoint.go",
"new_path": "pkg/tcpip/transport/ping/endpoint.go",
"diff": "@@ -71,6 +71,8 @@ type endpoint struct {\n// The following fields are protected by the mu mutex.\nmu sync.RWMutex `state:\"nosave\"`\nsndBufSize int\n+ // shutdownFlags represent the current shutdown state of the endpoint.\n+ shutdownFlags tcpip.ShutdownFlags\nid stack.TransportEndpointID\nstate endpointState\nbindNICID tcpip.NICID\n@@ -93,7 +95,7 @@ func newEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waite\n// associated with it.\nfunc (e *endpoint) Close() {\ne.mu.Lock()\n-\n+ e.shutdownFlags = tcpip.ShutdownRead | tcpip.ShutdownWrite\nswitch e.state {\ncase stateBound, stateConnected:\ne.stack.UnregisterTransportEndpoint(e.regNICID, []tcpip.NetworkProtocolNumber{e.netProto}, ProtocolNumber4, e.id)\n@@ -205,6 +207,11 @@ func (e *endpoint) Write(p tcpip.Payload, opts tcpip.WriteOptions) (uintptr, *tc\ne.mu.RLock()\ndefer e.mu.RUnlock()\n+ // If we've shutdown with SHUT_WR we are in an invalid state for sending.\n+ if e.shutdownFlags&tcpip.ShutdownWrite != 0 {\n+ return 0, tcpip.ErrClosedForSend\n+ }\n+\n// Prepare for write.\nfor {\nretry, err := e.prepareForWrite(to)\n@@ -465,8 +472,9 @@ func (*endpoint) ConnectEndpoint(tcpip.Endpoint) *tcpip.Error {\n// Shutdown closes the read and/or write end of the endpoint connection\n// to its peer.\nfunc (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error {\n- e.mu.RLock()\n- defer e.mu.RUnlock()\n+ e.mu.Lock()\n+ defer e.mu.Unlock()\n+ e.shutdownFlags |= flags\nif e.state != stateConnected {\nreturn tcpip.ErrNotConnected\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -1024,7 +1024,7 @@ func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error {\n}\ndefault:\n- return tcpip.ErrInvalidEndpointState\n+ return tcpip.ErrNotConnected\n}\nreturn nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/udp/endpoint.go",
"new_path": "pkg/tcpip/transport/udp/endpoint.go",
"diff": "@@ -80,6 +80,9 @@ type endpoint struct {\ndstPort uint16\nv6only bool\n+ // shutdownFlags represent the current shutdown state of the endpoint.\n+ shutdownFlags tcpip.ShutdownFlags\n+\n// effectiveNetProtos contains the network protocols actually in use. In\n// most cases it will only contain \"netProto\", but in cases like IPv6\n// endpoints with v6only set to false, this could include multiple\n@@ -124,6 +127,7 @@ func NewConnectedEndpoint(stack *stack.Stack, r *stack.Route, id stack.Transport\n// associated with it.\nfunc (e *endpoint) Close() {\ne.mu.Lock()\n+ e.shutdownFlags = tcpip.ShutdownRead | tcpip.ShutdownWrite\nswitch e.state {\ncase stateBound, stateConnected:\n@@ -236,6 +240,11 @@ func (e *endpoint) Write(p tcpip.Payload, opts tcpip.WriteOptions) (uintptr, *tc\ne.mu.RLock()\ndefer e.mu.RUnlock()\n+ // If we've shutdown with SHUT_WR we are in an invalid state for sending.\n+ if e.shutdownFlags&tcpip.ShutdownWrite != 0 {\n+ return 0, tcpip.ErrClosedForSend\n+ }\n+\n// Prepare for write.\nfor {\nretry, err := e.prepareForWrite(to)\n@@ -562,13 +571,15 @@ func (*endpoint) ConnectEndpoint(tcpip.Endpoint) *tcpip.Error {\n// Shutdown closes the read and/or write end of the endpoint connection\n// to its peer.\nfunc (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error {\n- e.mu.RLock()\n- defer e.mu.RUnlock()\n+ e.mu.Lock()\n+ defer e.mu.Unlock()\nif e.state != stateConnected {\nreturn tcpip.ErrNotConnected\n}\n+ e.shutdownFlags |= flags\n+\nif flags&tcpip.ShutdownRead != 0 {\ne.rcvMu.Lock()\nwasClosed := e.rcvClosed\n"
}
] | Go | Apache License 2.0 | google/gvisor | Netstack doesn't handle sending after SHUT_WR correctly.
PiperOrigin-RevId: 207715032
Change-Id: I7b6690074c5be283145192895d706a92e921b22c |
259,948 | 07.08.2018 10:26:17 | 25,200 | c348d0786388ded1a4bad3c98000b4653724c764 | sentry: make epoll.pollEntry wait for the file operation in restore. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/file.go",
"new_path": "pkg/sentry/fs/file.go",
"diff": "@@ -91,7 +91,7 @@ type File struct {\nmu amutex.AbortableMutex `state:\"nosave\"`\n// FileOperations implements file system specific behavior for this File.\n- FileOperations FileOperations\n+ FileOperations FileOperations `state:\"wait\"`\n// offset is the File's offset. Updating offset is protected by mu but\n// can be read atomically via File.Offset() outside of mu.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/host/inode.go",
"new_path": "pkg/sentry/fs/host/inode.go",
"diff": "@@ -77,7 +77,7 @@ type inodeFileState struct {\ndescriptor *descriptor `state:\"wait\"`\n// Event queue for blocking operations.\n- queue waiter.Queue `state:\"nosave\"`\n+ queue waiter.Queue `state:\"zerovalue\"`\n// sattr is used to restore the inodeOperations.\nsattr fs.StableAttr `state:\"wait\"`\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/epoll/epoll.go",
"new_path": "pkg/sentry/kernel/epoll/epoll.go",
"diff": "@@ -61,7 +61,7 @@ const (\n//\n// +stateify savable\ntype FileIdentifier struct {\n- File *fs.File\n+ File *fs.File `state:\"wait\"`\nFd kdefs.FD\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: make epoll.pollEntry wait for the file operation in restore.
PiperOrigin-RevId: 207737935
Change-Id: I3a301ece1f1d30909715f36562474e3248b6a0d5 |
259,962 | 07.08.2018 11:48:37 | 25,200 | 7d3684aadf71255d3d8442ae1ed0b0f0048f95a3 | Adds support to dump out cubic internal state. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -56,6 +56,20 @@ type transportProtocolState struct {\n// passed to stack.AddTCPProbe.\ntype TCPProbeFunc func(s TCPEndpointState)\n+// TCPCubicState is used to hold a copy of the internal cubic state when the\n+// TCPProbeFunc is invoked.\n+type TCPCubicState struct {\n+ WLastMax float64\n+ WMax float64\n+ T time.Time\n+ TimeSinceLastCongestion time.Duration\n+ C float64\n+ K float64\n+ Beta float64\n+ WC float64\n+ WEst float64\n+}\n+\n// TCPEndpointID is the unique 4 tuple that identifies a given endpoint.\ntype TCPEndpointID struct {\n// LocalPort is the local port associated with the endpoint.\n@@ -180,6 +194,9 @@ type TCPSenderState struct {\n// FastRecovery holds the fast recovery state for the endpoint.\nFastRecovery TCPFastRecoveryState\n+\n+ // Cubic holds the state related to CUBIC congestion control.\n+ Cubic TCPCubicState\n}\n// TCPSACKInfo holds TCP SACK related information for a given TCP endpoint.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -1452,5 +1452,19 @@ func (e *endpoint) completeState() stack.TCPEndpointState {\nSndWndScale: e.snd.sndWndScale,\nMaxSentAck: e.snd.maxSentAck,\n}\n+\n+ if cubic, ok := e.snd.cc.(*cubicState); ok {\n+ s.Sender.Cubic = stack.TCPCubicState{\n+ WMax: cubic.wMax,\n+ WLastMax: cubic.wLastMax,\n+ T: cubic.t,\n+ TimeSinceLastCongestion: time.Since(cubic.t),\n+ C: cubic.c,\n+ K: cubic.k,\n+ Beta: cubic.beta,\n+ WC: cubic.wC,\n+ WEst: cubic.wEst,\n+ }\n+ }\nreturn s\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Adds support to dump out cubic internal state.
PiperOrigin-RevId: 207754087
Change-Id: I83abce64348ea93f8692da81a881b364dae2158b |
259,885 | 07.08.2018 13:08:37 | 25,200 | c036da5dffdf6cad912abe2723e69c04b59430b7 | Hold TaskSet.mu in Task.Parent. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/threads.go",
"new_path": "pkg/sentry/kernel/threads.go",
"diff": "@@ -441,6 +441,8 @@ func (t *Task) Timekeeper() *Timekeeper {\n// Parent returns t's parent.\nfunc (t *Task) Parent() *Task {\n+ t.tg.pidns.owner.mu.RLock()\n+ defer t.tg.pidns.owner.mu.RUnlock()\nreturn t.parent\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Hold TaskSet.mu in Task.Parent.
PiperOrigin-RevId: 207766238
Change-Id: Id3b66d8fe1f44c3570f67fa5ae7ba16021e35be1 |
259,992 | 07.08.2018 13:47:16 | 25,200 | cb23232c37c092b60d7e3ee91cb8dd8bed855028 | Fix build break in test
integration_test runs manually and breakage wasn't detected. Added test to
kokoro to ensure breakages are detected in the future. | [
{
"change_type": "MODIFY",
"old_path": "kokoro/run_tests.sh",
"new_path": "kokoro/run_tests.sh",
"diff": "@@ -44,10 +44,14 @@ bazel test --test_output=errors //...\nexit_code=${?}\nif [[ ${exit_code} -eq 0 ]]; then\n+ # These names are used to exclude tests not supported in certain\n+ # configuration, e.g. save/restore not supported with hostnet.\ndeclare -a variations=(\"\" \"-kvm\" \"-hostnet\" \"-overlay\")\nfor v in \"${variations[@]}\"; do\n- # image_test is tagged manual\n- bazel test --test_output=errors --test_env=RUNSC_RUNTIME=${runtime}${v} //runsc/test/image:image_test\n+ # Run runsc tests with docker that are tagged manual.\n+ bazel test --test_output=errors --test_env=RUNSC_RUNTIME=${runtime}${v} \\\n+ //runsc/test/image:image_test \\\n+ //runsc/test/integration:integration_test\nexit_code=${?}\nif [[ ${exit_code} -ne 0 ]]; then\nbreak\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/image_test.go",
"new_path": "runsc/test/image/image_test.go",
"diff": "@@ -39,8 +39,8 @@ import (\nfunc TestHelloWorld(t *testing.T) {\nd := testutil.MakeDocker(\"hello-test\")\n- if out, err := d.Run(\"hello-world\"); err != nil {\n- t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ if _, err := d.Run(\"hello-world\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n@@ -82,8 +82,8 @@ func testHTTPServer(port int) error {\n}\nfunc TestHttpd(t *testing.T) {\n- if out, err := testutil.Pull(\"httpd\"); err != nil {\n- t.Fatalf(\"docker pull failed: %v\\nout: %s\", err, out)\n+ if err := testutil.Pull(\"httpd\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\", err)\n}\nd := testutil.MakeDocker(\"http-test\")\n@@ -93,8 +93,8 @@ func TestHttpd(t *testing.T) {\n}\n// Start the container.\n- if out, err := d.Run(\"-p\", \"80\", \"-v\", testutil.MountArg(dir, \"/usr/local/apache2/htdocs:ro\"), \"httpd\"); err != nil {\n- t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ if _, err := d.Run(\"-p\", \"80\", \"-v\", testutil.MountArg(dir, \"/usr/local/apache2/htdocs:ro\"), \"httpd\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n@@ -105,8 +105,8 @@ func TestHttpd(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := d.WaitForHTTP(port, 5*time.Second); err != nil {\n- t.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n+ if err := testutil.WaitForHTTP(port, 5*time.Second); err != nil {\n+ t.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\nif err := testHTTPServer(port); err != nil {\n@@ -115,8 +115,8 @@ func TestHttpd(t *testing.T) {\n}\nfunc TestNginx(t *testing.T) {\n- if out, err := testutil.Pull(\"nginx\"); err != nil {\n- t.Fatalf(\"docker pull failed: %v\\nout: %s\", err, out)\n+ if err := testutil.Pull(\"nginx\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\", err)\n}\nd := testutil.MakeDocker(\"net-test\")\n@@ -126,8 +126,8 @@ func TestNginx(t *testing.T) {\n}\n// Start the container.\n- if out, err := d.Run(\"-p\", \"80\", \"-v\", testutil.MountArg(dir, \"/usr/share/nginx/html:ro\"), \"nginx\"); err != nil {\n- t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ if _, err := d.Run(\"-p\", \"80\", \"-v\", testutil.MountArg(dir, \"/usr/share/nginx/html:ro\"), \"nginx\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n@@ -138,8 +138,8 @@ func TestNginx(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := d.WaitForHTTP(port, 5*time.Second); err != nil {\n- t.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n+ if err := testutil.WaitForHTTP(port, 5*time.Second); err != nil {\n+ t.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\nif err := testHTTPServer(port); err != nil {\n@@ -148,14 +148,14 @@ func TestNginx(t *testing.T) {\n}\nfunc TestMysql(t *testing.T) {\n- if out, err := testutil.Pull(\"mysql\"); err != nil {\n- t.Fatalf(\"docker pull failed: %v\\nout: %s\", err, out)\n+ if err := testutil.Pull(\"mysql\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\", err)\n}\nd := testutil.MakeDocker(\"mysql-test\")\n// Start the container.\n- if out, err := d.Run(\"-e\", \"MYSQL_ROOT_PASSWORD=foobar123\", \"mysql\"); err != nil {\n- t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ if _, err := d.Run(\"-e\", \"MYSQL_ROOT_PASSWORD=foobar123\", \"mysql\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n@@ -178,8 +178,8 @@ func TestMysql(t *testing.T) {\n\"mysql\",\n\"mysql\", \"-hmysql\", \"-uroot\", \"-pfoobar123\", \"-v\", \"-e\", \"source /sql/mysql.sql\",\n}\n- if out, err := client.Run(args...); err != nil {\n- t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ if _, err := client.Run(args...); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n}\ndefer client.CleanUp()\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/python_test.go",
"new_path": "runsc/test/image/python_test.go",
"diff": "@@ -24,12 +24,12 @@ import (\n)\nfunc TestPythonHello(t *testing.T) {\n- if out, err := testutil.Pull(\"google/python-hello\"); err != nil {\n- t.Fatalf(\"docker pull failed: %v\\nout: %s\", err, out)\n+ if err := testutil.Pull(\"google/python-hello\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\", err)\n}\nd := testutil.MakeDocker(\"python-hello-test\")\n- if out, err := d.Run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\n- t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ if _, err := d.Run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n@@ -40,8 +40,8 @@ func TestPythonHello(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := d.WaitForHTTP(port, 10*time.Second); err != nil {\n- t.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n+ if err := testutil.WaitForHTTP(port, 10*time.Second); err != nil {\n+ t.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n// Ensure that content is being served.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/image/tomcat_test.go",
"new_path": "runsc/test/image/tomcat_test.go",
"diff": "@@ -24,12 +24,12 @@ import (\n)\nfunc TestTomcat(t *testing.T) {\n- if out, err := testutil.Pull(\"tomcat:8.0\"); err != nil {\n- t.Fatalf(\"docker pull failed: %v\\nout: %s\", err, out)\n+ if err := testutil.Pull(\"tomcat:8.0\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\", err)\n}\nd := testutil.MakeDocker(\"tomcat-test\")\n- if out, err := d.Run(\"-p\", \"8080\", \"tomcat:8.0\"); err != nil {\n- t.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n+ if _, err := d.Run(\"-p\", \"8080\", \"tomcat:8.0\"); err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n}\ndefer d.CleanUp()\n@@ -40,8 +40,8 @@ func TestTomcat(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := d.WaitForHTTP(port, 10*time.Second); err != nil {\n- t.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n+ if err := testutil.WaitForHTTP(port, 10*time.Second); err != nil {\n+ t.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n// Ensure that content is being served.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/integration/integration_test.go",
"new_path": "runsc/test/integration/integration_test.go",
"diff": "@@ -37,11 +37,9 @@ import (\n\"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n)\n-// This container is a docker image for the Flask microframework hello world application.\n-const container = \"python-hello-test\"\n-\n// httpRequestSucceeds sends a request to a given url and checks that the status is OK.\n-func httpRequestSucceeds(client http.Client, url string) error {\n+func httpRequestSucceeds(client http.Client, server string, port int) error {\n+ url := fmt.Sprintf(\"http://%s:%d\", server, port)\n// Ensure that content is being served.\nresp, err := client.Get(url)\nif err != nil {\n@@ -55,33 +53,50 @@ func httpRequestSucceeds(client http.Client, url string) error {\n// TestLifeCycle tests a basic Create/Start/Stop docker container life cycle.\nfunc TestLifeCycle(t *testing.T) {\n- d := testutil.MakeDocker(container)\n-\n- // Test docker create.\n- if out, err := d.Do(\"create\", \"--runtime\", d.Runtime, \"--name\", d.Name, \"-p\", \"8080\", \"google/python-hello\"); err != nil {\n- t.Fatalf(\"docker create failed: %v\\nout: %s\", err, out)\n+ if err := testutil.Pull(\"nginx\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\", err)\n}\n-\n- // Test docker start.\n- if out, err := d.Do(\"start\", d.Name); err != nil {\n+ d := testutil.MakeDocker(\"lifecycle-test\")\n+ if err := d.Create(\"-p\", \"80\", \"nginx\"); err != nil {\n+ t.Fatalf(\"docker create failed: %v\", err)\n+ }\n+ if err := d.Start(); err != nil {\nd.CleanUp()\n- t.Fatalf(\"docker start failed: %v\\nout: %s\", err, out)\n+ t.Fatalf(\"docker start failed: %v\", err)\n}\n- // Test docker stop.\n- if out, err := d.Do(\"stop\", d.Name); err != nil {\n- d.CleanUp()\n- t.Fatalf(\"docker stop failed: %v\\nout: %s\", err, out)\n+ // Test that container is working\n+ port, err := d.FindPort(80)\n+ if err != nil {\n+ t.Fatalf(\"docker.FindPort(80) failed: %v\", err)\n+ }\n+ if err := testutil.WaitForHTTP(port, 5*time.Second); err != nil {\n+ t.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n+ }\n+ client := http.Client{Timeout: time.Duration(2 * time.Second)}\n+ if err := httpRequestSucceeds(client, \"localhost\", port); err != nil {\n+ t.Errorf(\"http request failed: %v\", err)\n}\n- // Test removing the container.\n- if out, err := d.Do(\"rm\", d.Name); err != nil {\n- t.Fatalf(\"docker rm failed: %v\\nout: %s\", err, out)\n+ if err := d.Stop(); err != nil {\n+ d.CleanUp()\n+ t.Fatalf(\"docker stop failed: %v\", err)\n+ }\n+ if err := d.Remove(); err != nil {\n+ t.Fatalf(\"docker rm failed: %v\", err)\n}\n}\nfunc TestPauseResume(t *testing.T) {\n- d := testutil.MakeDocker(container)\n+ if !testutil.IsPauseResumeSupported() {\n+ t.Log(\"Pause/resume is not supported, skipping test.\")\n+ return\n+ }\n+\n+ if err := testutil.Pull(\"google/python-hello\"); err != nil {\n+ t.Fatalf(\"docker pull failed: %v\", err)\n+ }\n+ d := testutil.MakeDocker(\"pause-resume-test\")\nif out, err := d.Run(\"-p\", \"8080\", \"google/python-hello\"); err != nil {\nt.Fatalf(\"docker run failed: %v\\nout: %s\", err, out)\n}\n@@ -94,28 +109,22 @@ func TestPauseResume(t *testing.T) {\n}\n// Wait until it's up and running.\n- if err := d.WaitForHTTP(port, 5*time.Second); err != nil {\n- t.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n- }\n-\n- timeout := time.Duration(2 * time.Second)\n- client := http.Client{\n- Timeout: timeout,\n+ if err := testutil.WaitForHTTP(port, 20*time.Second); err != nil {\n+ t.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n- url := fmt.Sprintf(\"http://localhost:%d\", port)\n// Check that container is working.\n- if err := httpRequestSucceeds(client, url); err != nil {\n+ client := http.Client{Timeout: time.Duration(2 * time.Second)}\n+ if err := httpRequestSucceeds(client, \"localhost\", port); err != nil {\nt.Errorf(\"http request failed: %v\", err)\n}\n- // Pause container.\n- if out, err := d.Do(\"pause\", d.Name); err != nil {\n- t.Fatalf(\"docker pause failed: %v\\nout: %s\", err, out)\n+ if err := d.Pause(); err != nil {\n+ t.Fatalf(\"docker pause failed: %v\", err)\n}\n// Check if container is paused.\n- switch _, err := client.Get(url); v := err.(type) {\n+ switch _, err := client.Get(fmt.Sprintf(\"http://localhost:%d\", port)); v := err.(type) {\ncase nil:\nt.Errorf(\"http req expected to fail but it succeeded\")\ncase net.Error:\n@@ -126,18 +135,17 @@ func TestPauseResume(t *testing.T) {\nt.Errorf(\"http req got unexpected error %v\", v)\n}\n- // Resume container.\n- if out, err := d.Do(\"unpause\", d.Name); err != nil {\n- t.Fatalf(\"docker unpause failed: %v\\nout: %s\", err, out)\n+ if err := d.Unpause(); err != nil {\n+ t.Fatalf(\"docker unpause failed: %v\", err)\n}\n// Wait until it's up and running.\n- if err := d.WaitForHTTP(port, 5*time.Second); err != nil {\n- t.Fatalf(\"docker.WaitForHTTP() timeout: %v\", err)\n+ if err := testutil.WaitForHTTP(port, 20*time.Second); err != nil {\n+ t.Fatalf(\"WaitForHTTP() timeout: %v\", err)\n}\n// Check if container is working again.\n- if err := httpRequestSucceeds(client, url); err != nil {\n+ if err := httpRequestSucceeds(client, \"localhost\", port); err != nil {\nt.Errorf(\"http request failed: %v\", err)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/docker.go",
"new_path": "runsc/test/testutil/docker.go",
"diff": "@@ -19,7 +19,6 @@ import (\n\"io/ioutil\"\n\"log\"\n\"math/rand\"\n- \"net/http\"\n\"os\"\n\"os/exec\"\n\"path\"\n@@ -41,6 +40,12 @@ func runtime() string {\nreturn r\n}\n+// IsPauseResumeSupported returns true if Pause/Resume is supported by runtime.\n+func IsPauseResumeSupported() bool {\n+ // Native host network stack can't be saved.\n+ return !strings.Contains(runtime(), \"hostnet\")\n+}\n+\n// EnsureSupportedDockerVersion checks if correct docker is installed.\nfunc EnsureSupportedDockerVersion() {\ncmd := exec.Command(\"docker\", \"version\")\n@@ -100,7 +105,7 @@ func do(args ...string) (string, error) {\ncmd := exec.Command(\"docker\", args...)\nout, err := cmd.CombinedOutput()\nif err != nil {\n- return \"\", fmt.Errorf(\"error executing docker %s: %v\", args, err)\n+ return \"\", fmt.Errorf(\"error executing docker %s: %v\\nout: %s\", args, err, out)\n}\nreturn string(out), nil\n}\n@@ -108,8 +113,9 @@ func do(args ...string) (string, error) {\n// Pull pulls a docker image. This is used in tests to isolate the\n// time to pull the image off the network from the time to actually\n// start the container, to avoid timeouts over slow networks.\n-func Pull(image string) (string, error) {\n- return do(\"pull\", image)\n+func Pull(image string) error {\n+ _, err := do(\"pull\", image)\n+ return err\n}\n// Docker contains the name and the runtime of a docker container.\n@@ -125,6 +131,30 @@ func MakeDocker(namePrefix string) Docker {\nreturn Docker{Name: namePrefix + suffix, Runtime: runtime()}\n}\n+// Create calls 'docker create' with the arguments provided.\n+func (d *Docker) Create(args ...string) error {\n+ a := []string{\"create\", \"--runtime\", d.Runtime, \"--name\", d.Name}\n+ a = append(a, args...)\n+ _, err := do(a...)\n+ return err\n+}\n+\n+// Start calls 'docker start'.\n+func (d *Docker) Start() error {\n+ if _, err := do(\"start\", d.Name); err != nil {\n+ return fmt.Errorf(\"error starting container %q: %v\", d.Name, err)\n+ }\n+ return nil\n+}\n+\n+// Stop calls 'docker stop'.\n+func (d *Docker) Stop() error {\n+ if _, err := do(\"stop\", d.Name); err != nil {\n+ return fmt.Errorf(\"error stopping container %q: %v\", d.Name, err)\n+ }\n+ return nil\n+}\n+\n// Run calls 'docker run' with the arguments provided.\nfunc (d *Docker) Run(args ...string) (string, error) {\na := []string{\"run\", \"--runtime\", d.Runtime, \"--name\", d.Name, \"-d\"}\n@@ -132,17 +162,38 @@ func (d *Docker) Run(args ...string) (string, error) {\nreturn do(a...)\n}\n-// CleanUp kills and deletes the container.\n-func (d *Docker) CleanUp() error {\n- if _, err := do(\"kill\", d.Name); err != nil {\n- return fmt.Errorf(\"error killing container %q: %v\", d.Name, err)\n+// Pause calls 'docker pause'.\n+func (d *Docker) Pause() error {\n+ if _, err := do(\"pause\", d.Name); err != nil {\n+ return fmt.Errorf(\"error pausing container %q: %v\", d.Name, err)\n+ }\n+ return nil\n}\n+\n+// Unpause calls 'docker pause'.\n+func (d *Docker) Unpause() error {\n+ if _, err := do(\"unpause\", d.Name); err != nil {\n+ return fmt.Errorf(\"error unpausing container %q: %v\", d.Name, err)\n+ }\n+ return nil\n+}\n+\n+// Remove calls 'docker rm'.\n+func (d *Docker) Remove() error {\nif _, err := do(\"rm\", d.Name); err != nil {\nreturn fmt.Errorf(\"error deleting container %q: %v\", d.Name, err)\n}\nreturn nil\n}\n+// CleanUp kills and deletes the container.\n+func (d *Docker) CleanUp() error {\n+ if _, err := do(\"kill\", d.Name); err != nil {\n+ return fmt.Errorf(\"error killing container %q: %v\", d.Name, err)\n+ }\n+ return d.Remove()\n+}\n+\n// FindPort returns the host port that is mapped to 'sandboxPort'. This calls\n// docker to allocate a free port in the host and prevent conflicts.\nfunc (d *Docker) FindPort(sandboxPort int) (int, error) {\n@@ -177,16 +228,3 @@ func (d *Docker) WaitForOutput(pattern string, timeout time.Duration) error {\n}\nreturn fmt.Errorf(\"timeout waiting for output %q: %s\", re.String(), out)\n}\n-\n-// WaitForHTTP tries GET requests on a port until the call succeeds or a timeout.\n-func (d *Docker) WaitForHTTP(port int, timeout time.Duration) error {\n- for exp := time.Now().Add(timeout); time.Now().Before(exp); {\n- url := fmt.Sprintf(\"http://localhost:%d/\", port)\n- if _, err := http.Get(url); err == nil {\n- // Success!\n- return nil\n- }\n- time.Sleep(100 * time.Millisecond)\n- }\n- return fmt.Errorf(\"timeout waiting for HTTP server on port %d\", port)\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -21,6 +21,7 @@ import (\n\"fmt\"\n\"io\"\n\"io/ioutil\"\n+ \"net/http\"\n\"os\"\n\"path/filepath\"\n\"time\"\n@@ -182,3 +183,12 @@ func Poll(cb func() error, timeout time.Duration) error {\nb := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\nreturn backoff.Retry(cb, b)\n}\n+\n+// WaitForHTTP tries GET requests on a port until the call succeeds or timeout.\n+func WaitForHTTP(port int, timeout time.Duration) error {\n+ cb := func() error {\n+ _, err := http.Get(fmt.Sprintf(\"http://localhost:%d/\", port))\n+ return err\n+ }\n+ return Poll(cb, timeout)\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix build break in test
integration_test runs manually and breakage wasn't detected. Added test to
kokoro to ensure breakages are detected in the future.
PiperOrigin-RevId: 207772835
Change-Id: Iada81b579b558477d4db3516b38366ef6a2e933d |
259,992 | 08.08.2018 10:24:53 | 25,200 | 0d350aac7f70487bc28bae0d0f457155a4e19081 | Enable SACK in runsc
SACK is disabled by default and needs to be manually enabled. It not only
improves performance, but also fixes hangs downloading files from certain
websites. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/hostinet/stack.go",
"new_path": "pkg/sentry/socket/hostinet/stack.go",
"diff": "@@ -84,11 +84,13 @@ func (s *Stack) Configure() error {\nlog.Warningf(\"Failed to read TCP send buffer size, using default values\")\n}\n- s.tcpSACKEnabled = false\n+ // SACK is important for performance and even compatibility, assume it's\n+ // enabled if we can't find the actual value.\n+ s.tcpSACKEnabled = true\nif sack, err := ioutil.ReadFile(\"/proc/sys/net/ipv4/tcp_sack\"); err == nil {\ns.tcpSACKEnabled = strings.TrimSpace(string(sack)) != \"0\"\n} else {\n- log.Warningf(\"Failed to read if TCP SACK if enabled, setting to false\")\n+ log.Warningf(\"Failed to read if TCP SACK if enabled, setting to true\")\n}\nreturn nil\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/controller.go",
"new_path": "runsc/boot/controller.go",
"diff": "@@ -285,7 +285,10 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {\nfs.SetRestoreEnvironment(*renv)\n// Prepare to load from the state file.\n- networkStack := newEmptyNetworkStack(cm.l.conf, k)\n+ networkStack, err := newEmptyNetworkStack(cm.l.conf, k)\n+ if err != nil {\n+ return fmt.Errorf(\"failed to create network: %v\", err)\n+ }\ninfo, err := o.FilePayload.Files[0].Stat()\nif err != nil {\nreturn err\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/loader.go",
"new_path": "runsc/boot/loader.go",
"diff": "@@ -174,7 +174,10 @@ func New(spec *specs.Spec, conf *Config, controllerFD int, ioFDs []int, console\n// this point. Netns is configured before Run() is called. Netstack is\n// configured using a control uRPC message. Host network is configured inside\n// Run().\n- networkStack := newEmptyNetworkStack(conf, k)\n+ networkStack, err := newEmptyNetworkStack(conf, k)\n+ if err != nil {\n+ return nil, fmt.Errorf(\"failed to create network: %v\", err)\n+ }\n// Initiate the Kernel object, which is required by the Context passed\n// to createVFS in order to mount (among other things) procfs.\n@@ -525,16 +528,20 @@ func (l *Loader) WaitExit() kernel.ExitStatus {\nreturn l.k.GlobalInit().ExitStatus()\n}\n-func newEmptyNetworkStack(conf *Config, clock tcpip.Clock) inet.Stack {\n+func newEmptyNetworkStack(conf *Config, clock tcpip.Clock) (inet.Stack, error) {\nswitch conf.Network {\ncase NetworkHost:\n- return hostinet.NewStack()\n+ return hostinet.NewStack(), nil\ncase NetworkNone, NetworkSandbox:\n// NetworkNone sets up loopback using netstack.\nnetProtos := []string{ipv4.ProtocolName, ipv6.ProtocolName, arp.ProtocolName}\nprotoNames := []string{tcp.ProtocolName, udp.ProtocolName, ping.ProtocolName4}\n- return &epsocket.Stack{stack.New(netProtos, protoNames, stack.Options{Clock: clock})}\n+ s := &epsocket.Stack{stack.New(netProtos, protoNames, stack.Options{Clock: clock})}\n+ if err := s.Stack.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SACKEnabled(true)); err != nil {\n+ return nil, fmt.Errorf(\"failed to enable SACK: %v\", err)\n+ }\n+ return s, nil\ndefault:\npanic(fmt.Sprintf(\"invalid network configuration: %v\", conf.Network))\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enable SACK in runsc
SACK is disabled by default and needs to be manually enabled. It not only
improves performance, but also fixes hangs downloading files from certain
websites.
PiperOrigin-RevId: 207906742
Change-Id: I4fb7277b67bfdf83ac8195f1b9c38265a0d51e8b |
259,881 | 08.08.2018 18:06:18 | 25,200 | 9144ddda09c0a370bfc7391b0d7382836e296b5d | Bump rules_go and gazelle to 0.14.0 | [
{
"change_type": "MODIFY",
"old_path": "WORKSPACE",
"new_path": "WORKSPACE",
"diff": "# Load go bazel rules and gazelle.\nhttp_archive(\nname = \"io_bazel_rules_go\",\n- url = \"https://github.com/bazelbuild/rules_go/releases/download/0.13.0/rules_go-0.13.0.tar.gz\",\n- sha256 = \"ba79c532ac400cefd1859cbc8a9829346aa69e3b99482cd5a54432092cbc3933\",\n+ url = \"https://github.com/bazelbuild/rules_go/releases/download/0.14.0/rules_go-0.14.0.tar.gz\",\n+ sha256 = \"5756a4ad75b3703eb68249d50e23f5d64eaf1593e886b9aa931aa6e938c4e301\",\n)\nhttp_archive(\nname = \"bazel_gazelle\",\n- url = \"https://github.com/bazelbuild/bazel-gazelle/releases/download/0.13.0/bazel-gazelle-0.13.0.tar.gz\",\n- sha256 = \"bc653d3e058964a5a26dcad02b6c72d7d63e6bb88d94704990b908a1445b8758\",\n+ url = \"https://github.com/bazelbuild/bazel-gazelle/releases/download/0.14.0/bazel-gazelle-0.14.0.tar.gz\",\n+ sha256 = \"c0a5739d12c6d05b6c1ad56f2200cb0b57c5a70e03ebd2f7b87ce88cabf09c7b\",\n)\nload(\"@io_bazel_rules_go//go:def.bzl\", \"go_rules_dependencies\", \"go_register_toolchains\")\ngo_rules_dependencies()\n"
}
] | Go | Apache License 2.0 | google/gvisor | Bump rules_go and gazelle to 0.14.0
PiperOrigin-RevId: 207977844
Change-Id: I980c1ad76339e9e4e8ea6d58c1caf5245befa18a |
259,858 | 08.08.2018 21:27:58 | 25,200 | dbbe9ec91541dba387f8044cbf73fd29f604f902 | Protect PCIDs with a mutex.
Because the Drop method may be called across vCPUs, it is necessary to protect
the PCID database with a mutex to prevent concurrent modification. The PCID is
assigned prior to entersyscall, so it's safe to block. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/pagetables/pcids_x86.go",
"new_path": "pkg/sentry/platform/ring0/pagetables/pcids_x86.go",
"diff": "package pagetables\n+import (\n+ \"sync\"\n+)\n+\n// limitPCID is the number of valid PCIDs.\nconst limitPCID = 4096\n@@ -24,6 +28,9 @@ const limitPCID = 4096\n// This is not protected by locks and is thus suitable for use only with a\n// single CPU at a time.\ntype PCIDs struct {\n+ // mu protects below.\n+ mu sync.Mutex\n+\n// cache are the assigned page tables.\ncache map[*PageTables]uint16\n@@ -56,7 +63,9 @@ func NewPCIDs(start, size uint16) *PCIDs {\n// This may overwrite any previous assignment provided. If this in the case,\n// true is returned to indicate that the PCID should be flushed.\nfunc (p *PCIDs) Assign(pt *PageTables) (uint16, bool) {\n+ p.mu.Lock()\nif pcid, ok := p.cache[pt]; ok {\n+ p.mu.Unlock()\nreturn pcid, false // No flush.\n}\n@@ -68,6 +77,7 @@ func (p *PCIDs) Assign(pt *PageTables) (uint16, bool) {\n// We need to flush because while this is in the available\n// pool, it may have been used previously.\n+ p.mu.Unlock()\nreturn pcid, true\n}\n@@ -79,17 +89,21 @@ func (p *PCIDs) Assign(pt *PageTables) (uint16, bool) {\n// A flush is definitely required in this case, these page\n// tables may still be active. (They will just be assigned some\n// other PCID if and when they hit the given CPU again.)\n+ p.mu.Unlock()\nreturn pcid, true\n}\n// No PCID.\n+ p.mu.Unlock()\nreturn 0, false\n}\n// Drop drops references to a set of page tables.\nfunc (p *PCIDs) Drop(pt *PageTables) {\n+ p.mu.Lock()\nif pcid, ok := p.cache[pt]; ok {\ndelete(p.cache, pt)\np.avail = append(p.avail, pcid)\n}\n+ p.mu.Unlock()\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Protect PCIDs with a mutex.
Because the Drop method may be called across vCPUs, it is necessary to protect
the PCID database with a mutex to prevent concurrent modification. The PCID is
assigned prior to entersyscall, so it's safe to block.
PiperOrigin-RevId: 207992864
Change-Id: I8b36d55106981f51e30dcf03e12886330bb79d67 |
259,992 | 08.08.2018 22:38:41 | 25,200 | 4e171f7590284c1f4cedf90c92204873961b2e97 | Basic support for ip link/addr and ifconfig
Closes | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/netlink_route.go",
"new_path": "pkg/abi/linux/netlink_route.go",
"diff": "@@ -184,3 +184,8 @@ const (\nIFA_MULTICAST = 7\nIFA_FLAGS = 8\n)\n+\n+// Device types, from uapi/linux/if_arp.h.\n+const (\n+ ARPHRD_LOOPBACK = 772\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/inet/inet.go",
"new_path": "pkg/sentry/inet/inet.go",
"diff": "@@ -67,6 +67,9 @@ type Interface struct {\n// Addr is the hardware device address.\nAddr []byte\n+\n+ // MTU is the maximum transmission unit.\n+ MTU uint32\n}\n// InterfaceAddr contains information about a network interface address.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/epsocket.go",
"new_path": "pkg/sentry/socket/epsocket/epsocket.go",
"diff": "@@ -48,7 +48,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"\n- nstack \"gvisor.googlesource.com/gvisor/pkg/tcpip/stack\"\n+ \"gvisor.googlesource.com/gvisor/pkg/tcpip/stack\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix\"\n\"gvisor.googlesource.com/gvisor/pkg/waiter\"\n)\n@@ -452,7 +452,7 @@ func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name, outLen int) (\n// sockets backed by a commonEndpoint.\nfunc GetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int, skType unix.SockType, level, name, outLen int) (interface{}, *syserr.Error) {\nswitch level {\n- case syscall.SOL_SOCKET:\n+ case linux.SOL_SOCKET:\nswitch name {\ncase linux.SO_TYPE:\nif outLen < sizeOfInt32 {\n@@ -634,7 +634,7 @@ func (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVa\n// sockets backed by a commonEndpoint.\nfunc SetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, level int, name int, optVal []byte) *syserr.Error {\nswitch level {\n- case syscall.SOL_SOCKET:\n+ case linux.SOL_SOCKET:\nswitch name {\ncase linux.SO_SNDBUF:\nif len(optVal) < sizeOfInt32 {\n@@ -1191,7 +1191,9 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe\nif err != nil {\nreturn err\n}\n- usermem.ByteOrder.PutUint16(ifr.Data[:2], f)\n+ // Drop the flags that don't fit in the size that we need to return. This\n+ // matches Linux behavior.\n+ usermem.ByteOrder.PutUint16(ifr.Data[:2], uint16(f))\ncase syscall.SIOCGIFADDR:\n// Copy the IPv4 address out.\n@@ -1304,7 +1306,7 @@ func ifconfIoctl(ctx context.Context, io usermem.IO, ifc *linux.IFConf) error {\n// interfaceStatusFlags returns status flags for an interface in the stack.\n// Flag values and meanings are described in greater detail in netdevice(7) in\n// the SIOCGIFFLAGS section.\n-func interfaceStatusFlags(stack inet.Stack, name string) (uint16, *syserr.Error) {\n+func interfaceStatusFlags(stack inet.Stack, name string) (uint32, *syserr.Error) {\n// epsocket should only ever be passed an epsocket.Stack.\nepstack, ok := stack.(*Stack)\nif !ok {\n@@ -1312,37 +1314,27 @@ func interfaceStatusFlags(stack inet.Stack, name string) (uint16, *syserr.Error)\n}\n// Find the NIC corresponding to this interface.\n- var (\n- nicid tcpip.NICID\n- info nstack.NICInfo\n- found bool\n- )\n- ns := epstack.Stack\n- for nicid, info = range ns.NICInfo() {\n+ for _, info := range epstack.Stack.NICInfo() {\nif info.Name == name {\n- found = true\n- break\n+ return nicStateFlagsToLinux(info.Flags), nil\n}\n}\n- if !found {\nreturn 0, syserr.ErrNoDevice\n}\n- // Set flags based on NIC state.\n- nicFlags, err := ns.NICFlags(nicid)\n- if err != nil {\n- return 0, syserr.TranslateNetstackError(err)\n+func nicStateFlagsToLinux(f stack.NICStateFlags) uint32 {\n+ var rv uint32\n+ if f.Up {\n+ rv |= linux.IFF_UP | linux.IFF_LOWER_UP\n}\n-\n- var retFlags uint16\n- if nicFlags.Up {\n- retFlags |= linux.IFF_UP\n+ if f.Running {\n+ rv |= linux.IFF_RUNNING\n}\n- if nicFlags.Running {\n- retFlags |= linux.IFF_RUNNING\n+ if f.Promiscuous {\n+ rv |= linux.IFF_PROMISC\n}\n- if nicFlags.Promiscuous {\n- retFlags |= linux.IFF_PROMISC\n+ if f.Loopback {\n+ rv |= linux.IFF_LOOPBACK\n}\n- return retFlags, nil\n+ return rv\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/stack.go",
"new_path": "pkg/sentry/socket/epsocket/stack.go",
"diff": "@@ -41,10 +41,16 @@ func (s *Stack) SupportsIPv6() bool {\nfunc (s *Stack) Interfaces() map[int32]inet.Interface {\nis := make(map[int32]inet.Interface)\nfor id, ni := range s.Stack.NICInfo() {\n+ var devType uint16\n+ if ni.Flags.Loopback {\n+ devType = linux.ARPHRD_LOOPBACK\n+ }\nis[int32(id)] = inet.Interface{\nName: ni.Name,\nAddr: []byte(ni.LinkAddress),\n- // TODO: Other fields.\n+ Flags: uint32(nicStateFlagsToLinux(ni.Flags)),\n+ DeviceType: devType,\n+ MTU: ni.MTU,\n}\n}\nreturn is\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netlink/route/protocol.go",
"new_path": "pkg/sentry/socket/netlink/route/protocol.go",
"diff": "package route\nimport (\n+ \"bytes\"\n+\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/inet\"\n@@ -97,9 +99,18 @@ func (p *Protocol) dumpLinks(ctx context.Context, hdr linux.NetlinkMessageHeader\n})\nm.PutAttrString(linux.IFLA_IFNAME, i.Name)\n+ m.PutAttr(linux.IFLA_MTU, i.MTU)\n+\n+ mac := make([]byte, 6)\n+ brd := mac\n+ if len(i.Addr) > 0 {\n+ mac = i.Addr\n+ brd = bytes.Repeat([]byte{0xff}, len(i.Addr))\n+ }\n+ m.PutAttr(linux.IFLA_ADDRESS, mac)\n+ m.PutAttr(linux.IFLA_BROADCAST, brd)\n- // TODO: There are many more attributes, such as\n- // MAC address.\n+ // TODO: There are many more attributes.\n}\nreturn nil\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/netlink/socket.go",
"new_path": "pkg/sentry/socket/netlink/socket.go",
"diff": "package netlink\nimport (\n+ \"math\"\n\"sync\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n@@ -39,8 +40,18 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/waiter\"\n)\n+const sizeOfInt32 int = 4\n+\n+const (\n+ // minBufferSize is the smallest size of a send buffer.\n+ minSendBufferSize = 4 << 10 // 4096 bytes.\n+\n// defaultSendBufferSize is the default size for the send buffer.\n-const defaultSendBufferSize = 16 * 1024\n+ defaultSendBufferSize = 16 * 1024\n+\n+ // maxBufferSize is the largest size a send buffer can grow to.\n+ maxSendBufferSize = 4 << 20 // 4MB\n+)\n// netlinkSocketDevice is the netlink socket virtual device.\nvar netlinkSocketDevice = device.NewAnonDevice()\n@@ -86,7 +97,7 @@ type Socket struct {\n// sendBufferSize is the send buffer \"size\". We don't actually have a\n// fixed buffer but only consume this many bytes.\n- sendBufferSize uint64\n+ sendBufferSize uint32\n}\nvar _ socket.Socket = (*Socket)(nil)\n@@ -273,13 +284,54 @@ func (s *Socket) Shutdown(t *kernel.Task, how int) *syserr.Error {\n// GetSockOpt implements socket.Socket.GetSockOpt.\nfunc (s *Socket) GetSockOpt(t *kernel.Task, level int, name int, outLen int) (interface{}, *syserr.Error) {\n- // TODO: no sockopts supported.\n+ switch level {\n+ case linux.SOL_SOCKET:\n+ switch name {\n+ case linux.SO_SNDBUF:\n+ if outLen < sizeOfInt32 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+ return int32(s.sendBufferSize), nil\n+\n+ case linux.SO_RCVBUF:\n+ if outLen < sizeOfInt32 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+ // We don't have limit on receiving size.\n+ return math.MaxInt32, nil\n+ }\n+ }\n+ // TODO: other sockopts are not supported.\nreturn nil, syserr.ErrProtocolNotAvailable\n}\n// SetSockOpt implements socket.Socket.SetSockOpt.\nfunc (s *Socket) SetSockOpt(t *kernel.Task, level int, name int, opt []byte) *syserr.Error {\n- // TODO: no sockopts supported.\n+ switch level {\n+ case linux.SOL_SOCKET:\n+ switch name {\n+ case linux.SO_SNDBUF:\n+ if len(opt) < sizeOfInt32 {\n+ return syserr.ErrInvalidArgument\n+ }\n+ size := usermem.ByteOrder.Uint32(opt)\n+ if size < minSendBufferSize {\n+ size = minSendBufferSize\n+ } else if size > maxSendBufferSize {\n+ size = maxSendBufferSize\n+ }\n+ s.sendBufferSize = size\n+ return nil\n+ case linux.SO_RCVBUF:\n+ if len(opt) < sizeOfInt32 {\n+ return syserr.ErrInvalidArgument\n+ }\n+ // We don't have limit on receiving size. So just accept anything as\n+ // valid for compatibility.\n+ return nil\n+ }\n+ }\n+ // TODO: other sockopts are not supported.\nreturn syserr.ErrProtocolNotAvailable\n}\n@@ -489,7 +541,7 @@ func (s *Socket) sendMsg(ctx context.Context, src usermem.IOSequence, to []byte,\n// For simplicity, and consistency with Linux, we copy in the entire\n// message up front.\n- if uint64(src.NumBytes()) > s.sendBufferSize {\n+ if src.NumBytes() > int64(s.sendBufferSize) {\nreturn 0, syserr.ErrMessageTooLong\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/link/loopback/loopback.go",
"new_path": "pkg/tcpip/link/loopback/loopback.go",
"diff": "@@ -56,7 +56,7 @@ func (*endpoint) MTU() uint32 {\n// Capabilities implements stack.LinkEndpoint.Capabilities. Loopback advertises\n// itself as supporting checksum offload, but in reality it's just omitted.\nfunc (*endpoint) Capabilities() stack.LinkEndpointCapabilities {\n- return stack.CapabilityChecksumOffload | stack.CapabilitySaveRestore\n+ return stack.CapabilityChecksumOffload | stack.CapabilitySaveRestore | stack.CapabilityLoopback\n}\n// MaxHeaderLength implements stack.LinkEndpoint.MaxHeaderLength. Given that the\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -67,6 +67,13 @@ func (n *NIC) setPromiscuousMode(enable bool) {\nn.mu.Unlock()\n}\n+func (n *NIC) isPromiscuousMode() bool {\n+ n.mu.RLock()\n+ rv := n.promiscuous\n+ n.mu.RUnlock()\n+ return rv\n+}\n+\n// setSpoofing enables or disables address spoofing.\nfunc (n *NIC) setSpoofing(enable bool) {\nn.mu.Lock()\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/registration.go",
"new_path": "pkg/tcpip/stack/registration.go",
"diff": "@@ -205,6 +205,7 @@ const (\nCapabilityResolutionRequired\nCapabilitySaveRestore\nCapabilityDisconnectOk\n+ CapabilityLoopback\n)\n// LinkEndpoint is the interface implemented by data link layer protocols (e.g.,\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -563,6 +563,12 @@ type NICInfo struct {\nName string\nLinkAddress tcpip.LinkAddress\nProtocolAddresses []tcpip.ProtocolAddress\n+\n+ // Flags indicate the state of the NIC.\n+ Flags NICStateFlags\n+\n+ // MTU is the maximum transmission unit.\n+ MTU uint32\n}\n// NICInfo returns a map of NICIDs to their associated information.\n@@ -572,10 +578,18 @@ func (s *Stack) NICInfo() map[tcpip.NICID]NICInfo {\nnics := make(map[tcpip.NICID]NICInfo)\nfor id, nic := range s.nics {\n+ flags := NICStateFlags{\n+ Up: true, // Netstack interfaces are always up.\n+ Running: nic.linkEP.IsAttached(),\n+ Promiscuous: nic.isPromiscuousMode(),\n+ Loopback: nic.linkEP.Capabilities()&CapabilityLoopback != 0,\n+ }\nnics[id] = NICInfo{\nName: nic.name,\nLinkAddress: nic.linkEP.LinkAddress(),\nProtocolAddresses: nic.Addresses(),\n+ Flags: flags,\n+ MTU: nic.linkEP.MTU(),\n}\n}\nreturn nics\n@@ -591,27 +605,9 @@ type NICStateFlags struct {\n// Promiscuous indicates whether the interface is in promiscuous mode.\nPromiscuous bool\n-}\n-\n-// NICFlags returns flags about the state of the NIC. It returns an error if\n-// the NIC corresponding to id cannot be found.\n-func (s *Stack) NICFlags(id tcpip.NICID) (NICStateFlags, *tcpip.Error) {\n- s.mu.RLock()\n- defer s.mu.RUnlock()\n- nic := s.nics[id]\n- if nic == nil {\n- return NICStateFlags{}, tcpip.ErrUnknownNICID\n- }\n-\n- ret := NICStateFlags{\n- // Netstack interfaces are always up.\n- Up: true,\n-\n- Running: nic.linkEP.IsAttached(),\n- Promiscuous: nic.promiscuous,\n- }\n- return ret, nil\n+ // Loopback indicates whether the interface is a loopback.\n+ Loopback bool\n}\n// AddAddress adds a new network-layer address to the specified NIC.\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/boot/network.go",
"new_path": "runsc/boot/network.go",
"diff": "@@ -133,15 +133,16 @@ func (n *Network) CreateLinksAndRoutes(args *CreateLinksAndRoutesArgs, _ *struct\nreturn fmt.Errorf(\"failed to dup FD %v: %v\", oldFD, err)\n}\n+ mac := tcpip.LinkAddress(generateRndMac())\nlinkEP := fdbased.New(&fdbased.Options{\nFD: newFD,\nMTU: uint32(link.MTU),\nEthernetHeader: true,\nHandleLocal: true,\n- Address: tcpip.LinkAddress(generateRndMac()),\n+ Address: mac,\n})\n- log.Infof(\"Enabling interface %q with id %d on addresses %+v\", link.Name, nicID, link.Addresses)\n+ log.Infof(\"Enabling interface %q with id %d on addresses %+v (%v)\", link.Name, nicID, link.Addresses, mac)\nif err := n.createNICWithAddrs(nicID, link.Name, linkEP, link.Addresses); err != nil {\nreturn err\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Basic support for ip link/addr and ifconfig
Closes #94
PiperOrigin-RevId: 207997580
Change-Id: I19b426f1586b5ec12f8b0cd5884d5b401d334924 |
259,881 | 09.08.2018 16:49:23 | 25,200 | 2e06b23aa61216fcdbefcd6b11a24bca7a456b16 | Fix missing O_LARGEFILE from O_CREAT files
Cleanup some more syscall.O_* references while we're here. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/file.go",
"new_path": "pkg/abi/linux/file.go",
"diff": "@@ -27,6 +27,10 @@ const (\nO_RDONLY = 00000000\nO_WRONLY = 00000001\nO_RDWR = 00000002\n+ O_CREAT = 00000100\n+ O_EXCL = 00000200\n+ O_NOCTTY = 00000400\n+ O_TRUNC = 00001000\nO_APPEND = 00002000\nO_NONBLOCK = 00004000\nO_ASYNC = 00020000\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/fd_map.go",
"new_path": "pkg/sentry/kernel/fd_map.go",
"diff": "@@ -53,7 +53,8 @@ type FDFlags struct {\nCloseOnExec bool\n}\n-// ToLinuxFileFlags converts a kernel.FDFlags object to a Linux file flags representation.\n+// ToLinuxFileFlags converts a kernel.FDFlags object to a Linux file flags\n+// representation.\nfunc (f FDFlags) ToLinuxFileFlags() (mask uint) {\nif f.CloseOnExec {\nmask |= linux.O_CLOEXEC\n@@ -61,7 +62,8 @@ func (f FDFlags) ToLinuxFileFlags() (mask uint) {\nreturn\n}\n-// ToLinuxFDFlags converts a kernel.FDFlags object to a Linux descriptor flags representation.\n+// ToLinuxFDFlags converts a kernel.FDFlags object to a Linux descriptor flags\n+// representation.\nfunc (f FDFlags) ToLinuxFDFlags() (mask uint) {\nif f.CloseOnExec {\nmask |= linux.FD_CLOEXEC\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/flags.go",
"new_path": "pkg/sentry/syscalls/linux/flags.go",
"diff": "@@ -34,8 +34,8 @@ func flagsToPermissions(mask uint) (p fs.PermMask) {\nreturn\n}\n-// linuxToFlags converts linux file flags to a FileFlags object.\n-func linuxToFlags(mask uint) (flags fs.FileFlags) {\n+// linuxToFlags converts Linux file flags to a FileFlags object.\n+func linuxToFlags(mask uint) fs.FileFlags {\nreturn fs.FileFlags{\nDirect: mask&linux.O_DIRECT != 0,\nSync: mask&linux.O_SYNC != 0,\n@@ -48,13 +48,3 @@ func linuxToFlags(mask uint) (flags fs.FileFlags) {\nLargeFile: mask&linux.O_LARGEFILE != 0,\n}\n}\n-\n-// linuxToSettableFlags converts linux file flags to a SettableFileFlags object.\n-func linuxToSettableFlags(mask uint) fs.SettableFileFlags {\n- return fs.SettableFileFlags{\n- Direct: mask&linux.O_DIRECT != 0,\n- NonBlocking: mask&linux.O_NONBLOCK != 0,\n- Append: mask&linux.O_APPEND != 0,\n- Async: mask&linux.O_ASYNC != 0,\n- }\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_file.go",
"new_path": "pkg/sentry/syscalls/linux/sys_file.go",
"diff": "@@ -164,7 +164,7 @@ func openAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint) (fd u\nif dirPath {\nreturn syserror.ENOTDIR\n}\n- if fileFlags.Write && flags&syscall.O_TRUNC != 0 {\n+ if fileFlags.Write && flags&linux.O_TRUNC != 0 {\nif err := d.Inode.Truncate(t, d, 0); err != nil {\nreturn err\n}\n@@ -178,7 +178,7 @@ func openAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint) (fd u\ndefer file.DecRef()\n// Success.\n- fdFlags := kernel.FDFlags{CloseOnExec: flags&syscall.O_CLOEXEC != 0}\n+ fdFlags := kernel.FDFlags{CloseOnExec: flags&linux.O_CLOEXEC != 0}\nnewFD, err := t.FDMap().NewFDFrom(0, file, fdFlags, t.ThreadGroup().Limits())\nif err != nil {\nreturn err\n@@ -302,6 +302,10 @@ func createAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint, mod\nreturn syserror.ENOTDIR\n}\n+ fileFlags := linuxToFlags(flags)\n+ // Linux always adds the O_LARGEFILE flag when running in 64-bit mode.\n+ fileFlags.LargeFile = true\n+\n// Does this file exist already?\ntargetDirent, err := t.MountNamespace().FindInode(t, root, d, name, linux.MaxSymlinkTraversals)\nvar newFile *fs.File\n@@ -311,7 +315,7 @@ func createAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint, mod\ndefer targetDirent.DecRef()\n// Check if we wanted to create.\n- if flags&syscall.O_EXCL != 0 {\n+ if flags&linux.O_EXCL != 0 {\nreturn syserror.EEXIST\n}\n@@ -323,14 +327,14 @@ func createAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint, mod\n}\n// Should we truncate the file?\n- if flags&syscall.O_TRUNC != 0 {\n+ if flags&linux.O_TRUNC != 0 {\nif err := targetDirent.Inode.Truncate(t, targetDirent, 0); err != nil {\nreturn err\n}\n}\n// Create a new fs.File.\n- newFile, err = targetDirent.Inode.GetFile(t, targetDirent, linuxToFlags(flags))\n+ newFile, err = targetDirent.Inode.GetFile(t, targetDirent, fileFlags)\nif err != nil {\nreturn syserror.ConvertIntr(err, kernel.ERESTARTSYS)\n}\n@@ -346,7 +350,7 @@ func createAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint, mod\n// Attempt a creation.\nperms := fs.FilePermsFromMode(mode &^ linux.FileMode(t.FSContext().Umask()))\n- newFile, err = d.Create(t, root, name, linuxToFlags(flags), perms)\n+ newFile, err = d.Create(t, root, name, fileFlags, perms)\nif err != nil {\n// No luck, bail.\nreturn err\n@@ -356,7 +360,7 @@ func createAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint, mod\n}\n// Success.\n- fdFlags := kernel.FDFlags{CloseOnExec: flags&syscall.O_CLOEXEC != 0}\n+ fdFlags := kernel.FDFlags{CloseOnExec: flags&linux.O_CLOEXEC != 0}\nnewFD, err := t.FDMap().NewFDFrom(0, newFile, fdFlags, t.ThreadGroup().Limits())\nif err != nil {\nreturn err\n@@ -380,7 +384,7 @@ func createAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint, mod\nfunc Open(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\naddr := args[0].Pointer()\nflags := uint(args[1].Uint())\n- if flags&syscall.O_CREAT != 0 {\n+ if flags&linux.O_CREAT != 0 {\nmode := linux.FileMode(args[2].ModeT())\nn, err := createAt(t, linux.AT_FDCWD, addr, flags, mode)\nreturn n, nil, err\n@@ -394,7 +398,7 @@ func Openat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\ndirFD := kdefs.FD(args[0].Int())\naddr := args[1].Pointer()\nflags := uint(args[2].Uint())\n- if flags&syscall.O_CREAT != 0 {\n+ if flags&linux.O_CREAT != 0 {\nmode := linux.FileMode(args[3].ModeT())\nn, err := createAt(t, dirFD, addr, flags, mode)\nreturn n, nil, err\n@@ -407,7 +411,7 @@ func Openat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\nfunc Creat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\naddr := args[0].Pointer()\nmode := linux.FileMode(args[1].ModeT())\n- n, err := createAt(t, linux.AT_FDCWD, addr, syscall.O_WRONLY|syscall.O_TRUNC, mode)\n+ n, err := createAt(t, linux.AT_FDCWD, addr, linux.O_WRONLY|linux.O_TRUNC, mode)\nreturn n, nil, err\n}\n@@ -747,7 +751,7 @@ func Dup3(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC\n}\ndefer oldFile.DecRef()\n- err := t.FDMap().NewFDAt(newfd, oldFile, kernel.FDFlags{CloseOnExec: flags&syscall.O_CLOEXEC != 0}, t.ThreadGroup().Limits())\n+ err := t.FDMap().NewFDAt(newfd, oldFile, kernel.FDFlags{CloseOnExec: flags&linux.O_CLOEXEC != 0}, t.ThreadGroup().Limits())\nif err != nil {\nreturn 0, nil, err\n}\n@@ -802,7 +806,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nswitch cmd {\ncase linux.F_DUPFD, linux.F_DUPFD_CLOEXEC:\nfrom := kdefs.FD(args[2].Int())\n- fdFlags := kernel.FDFlags{CloseOnExec: cmd == syscall.F_DUPFD_CLOEXEC}\n+ fdFlags := kernel.FDFlags{CloseOnExec: cmd == linux.F_DUPFD_CLOEXEC}\nfd, err := t.FDMap().NewFDFrom(from, file, fdFlags, t.ThreadGroup().Limits())\nif err != nil {\nreturn 0, nil, err\n@@ -813,13 +817,13 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\ncase linux.F_SETFD:\nflags := args[2].Uint()\nt.FDMap().SetFlags(fd, kernel.FDFlags{\n- CloseOnExec: flags&syscall.FD_CLOEXEC != 0,\n+ CloseOnExec: flags&linux.FD_CLOEXEC != 0,\n})\ncase linux.F_GETFL:\nreturn uintptr(file.Flags().ToLinux()), nil, nil\ncase linux.F_SETFL:\nflags := uint(args[2].Uint())\n- file.SetFlags(linuxToSettableFlags(flags))\n+ file.SetFlags(linuxToFlags(flags).Settable())\ncase linux.F_SETLK, linux.F_SETLKW:\n// In Linux the file system can choose to provide lock operations for an inode.\n// Normally pipe and socket types lack lock operations. We diverge and use a heavy\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_pipe.go",
"new_path": "pkg/sentry/syscalls/linux/sys_pipe.go",
"diff": "@@ -17,6 +17,7 @@ package linux\nimport (\n\"syscall\"\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs\"\n@@ -26,7 +27,7 @@ import (\n// pipe2 implements the actual system call with flags.\nfunc pipe2(t *kernel.Task, addr usermem.Addr, flags uint) (uintptr, error) {\n- if flags&^(syscall.O_NONBLOCK|syscall.O_CLOEXEC) != 0 {\n+ if flags&^(linux.O_NONBLOCK|linux.O_CLOEXEC) != 0 {\nreturn 0, syscall.EINVAL\n}\nr, w := pipe.NewConnectedPipe(t, pipe.DefaultPipeSize, usermem.PageSize)\n@@ -38,14 +39,14 @@ func pipe2(t *kernel.Task, addr usermem.Addr, flags uint) (uintptr, error) {\ndefer w.DecRef()\nrfd, err := t.FDMap().NewFDFrom(0, r, kernel.FDFlags{\n- CloseOnExec: flags&syscall.O_CLOEXEC != 0},\n+ CloseOnExec: flags&linux.O_CLOEXEC != 0},\nt.ThreadGroup().Limits())\nif err != nil {\nreturn 0, err\n}\nwfd, err := t.FDMap().NewFDFrom(0, w, kernel.FDFlags{\n- CloseOnExec: flags&syscall.O_CLOEXEC != 0},\n+ CloseOnExec: flags&linux.O_CLOEXEC != 0},\nt.ThreadGroup().Limits())\nif err != nil {\nt.FDMap().Remove(rfd)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix missing O_LARGEFILE from O_CREAT files
Cleanup some more syscall.O_* references while we're here.
PiperOrigin-RevId: 208133460
Change-Id: I48db71a38f817e4f4673977eafcc0e3874eb9a25 |
259,992 | 09.08.2018 17:04:18 | 25,200 | 0ac912f99e44e8e89985dd83ec946deadbfd8797 | Fix runsc integration_test when using --network=host
inethost doesn't support netlink and 'ifconfig' call to retrieve IP address
fails. Look up IP address in /etc/hosts instead. | [
{
"change_type": "MODIFY",
"old_path": "runsc/test/integration/integration_test.go",
"new_path": "runsc/test/integration/integration_test.go",
"diff": "@@ -162,8 +162,8 @@ func TestConnectToSelf(t *testing.T) {\n}\ndefer d.CleanUp()\n- // Finds IP address for eth0.\n- ip, err := d.Exec(\"/bin/sh\", \"-c\", \"ifconfig eth0 | grep -E -o \\\".*inet [^ ]+\\\" | cut -d: -f2\")\n+ // Finds IP address for host.\n+ ip, err := d.Exec(\"/bin/sh\", \"-c\", \"cat /etc/hosts | grep ${HOSTNAME} | awk '{print $1}'\")\nif err != nil {\nt.Fatal(\"docker exec failed:\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix runsc integration_test when using --network=host
inethost doesn't support netlink and 'ifconfig' call to retrieve IP address
fails. Look up IP address in /etc/hosts instead.
PiperOrigin-RevId: 208135641
Change-Id: I3c2ce15db6fc7c3306a45e4bfb9cc5d4423ffad3 |
260,008 | 10.08.2018 10:25:37 | 25,200 | d5b702b64f05a200ed94f0cd977d3f84dae01162 | Validate FS.base before establishing it in the task's register set. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/sys_tls.go",
"new_path": "pkg/sentry/syscalls/linux/sys_tls.go",
"diff": "@@ -22,6 +22,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n)\n// ArchPrctl implements linux syscall arch_prctl(2).\n@@ -36,9 +37,13 @@ func ArchPrctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys\n}\ncase linux.ARCH_SET_FS:\n+ fsbase := args[1].Uint64()\n+ if _, ok := t.MemoryManager().CheckIORange(usermem.Addr(fsbase), 0); !ok {\n+ return 0, nil, syscall.EPERM\n+ }\nregs := &t.Arch().StateData().Regs\nregs.Fs = 0\n- regs.Fs_base = args[1].Uint64()\n+ regs.Fs_base = fsbase\ndefault:\nreturn 0, nil, syscall.EINVAL\n"
}
] | Go | Apache License 2.0 | google/gvisor | Validate FS.base before establishing it in the task's register set.
PiperOrigin-RevId: 208229341
Change-Id: I5d84bc52bbafa073446ef497e56958d0d7955aa8 |
259,991 | 10.08.2018 15:39:02 | 25,200 | 3c60a192ca96838e895bd3607f8a85845245f81e | Added a reference to the checkpoint/restore readme.
In the main readme, checkpoint and restore is listed as an
advanced feature, and a link to its readme is provided. | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -360,6 +360,13 @@ configuration (`/etc/docker/daemon.json`):\nThen restart the Docker daemon.\n+### Checkpoint/Restore\n+\n+gVisor has the ability to checkpoint a process, save its current state in a\n+state file, and restore into a new container using the state file. For more\n+information about the checkpoint and restore commands, see the\n+[checkpoint/restore readme](https://gvisor.googlesource.com/gvisor/+/master/runsc/checkpoint_restore.md).\n+\n## FAQ & Known Issues\n### Will my container work with gVisor?\n"
}
] | Go | Apache License 2.0 | google/gvisor | Added a reference to the checkpoint/restore readme.
In the main readme, checkpoint and restore is listed as an
advanced feature, and a link to its readme is provided.
PiperOrigin-RevId: 208279833
Change-Id: Ib3db28a8df8ec93cf8d98d5dfd2ee2f75a61e664 |
259,991 | 10.08.2018 16:09:52 | 25,200 | ae6f092fe117a738df34e072ef5ba01a41c89222 | Implemented the splice(2) syscall.
Currently the implementation matches the behavior of moving data
between two file descriptors. However, it does not implement this
through zero-copy movement. Thus, this code is a starting point
to build the more complex implementation. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/BUILD",
"new_path": "pkg/abi/linux/BUILD",
"diff": "@@ -43,6 +43,7 @@ go_library(\n\"shm.go\",\n\"signal.go\",\n\"socket.go\",\n+ \"splice.go\",\n\"time.go\",\n\"tty.go\",\n\"uio.go\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/abi/linux/splice.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+// Flags for splice(2).\n+const (\n+ SPLICE_F_NONBLOCK = 2\n+)\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/BUILD",
"new_path": "pkg/sentry/syscalls/linux/BUILD",
"diff": "@@ -34,6 +34,7 @@ go_library(\n\"sys_shm.go\",\n\"sys_signal.go\",\n\"sys_socket.go\",\n+ \"sys_splice.go\",\n\"sys_stat.go\",\n\"sys_sync.go\",\n\"sys_sysinfo.go\",\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/syscalls/linux/linux64.go",
"new_path": "pkg/sentry/syscalls/linux/linux64.go",
"diff": "@@ -319,7 +319,7 @@ var AMD64 = &kernel.SyscallTable{\n272: Unshare,\n273: syscalls.Error(syscall.ENOSYS), // SetRobustList, obsolete\n274: syscalls.Error(syscall.ENOSYS), // GetRobustList, obsolete\n- // 275: Splice, TODO\n+ 275: Splice,\n// 276: Tee, TODO\n// 277: SyncFileRange, TODO\n// 278: Vmsplice, TODO\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/syscalls/linux/sys_splice.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+import (\n+ \"io\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/pipe\"\n+ \"gvisor.googlesource.com/gvisor/pkg/syserror\"\n+)\n+\n+// Splice implements linux syscall splice(2).\n+func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ fdIn := kdefs.FD(args[0].Int())\n+ offIn := args[1].Pointer()\n+ fdOut := kdefs.FD(args[2].Int())\n+ offOut := args[3].Pointer()\n+ size := int64(args[4].SizeT())\n+ flags := uint(args[5].Uint())\n+\n+ fileIn := t.FDMap().GetFile(fdIn)\n+ if fileIn == nil {\n+ return 0, nil, syserror.EBADF\n+ }\n+ defer fileIn.DecRef()\n+ fileOut := t.FDMap().GetFile(fdOut)\n+ if fileOut == nil {\n+ return 0, nil, syserror.EBADF\n+ }\n+ defer fileOut.DecRef()\n+\n+ // Check for whether we have pipes.\n+ ipipe := fs.IsPipe(fileIn.Dirent.Inode.StableAttr)\n+ opipe := fs.IsPipe(fileOut.Dirent.Inode.StableAttr)\n+ if (ipipe && offIn != 0) || (opipe && offOut != 0) {\n+ return 0, nil, syserror.ESPIPE\n+ }\n+\n+ // Check if both file descriptors are pipes.\n+ if ipipe && opipe {\n+ var readPipe *pipe.Pipe\n+ switch p := fileIn.FileOperations.(type) {\n+ case *pipe.Reader:\n+ readPipe = p.ReaderWriter.Pipe\n+ case *pipe.ReaderWriter:\n+ readPipe = p.Pipe\n+ default:\n+ return 0, nil, syserror.EBADF\n+ }\n+ var writePipe *pipe.Pipe\n+ switch p := fileOut.FileOperations.(type) {\n+ case *pipe.Writer:\n+ writePipe = p.ReaderWriter.Pipe\n+ case *pipe.ReaderWriter:\n+ writePipe = p.Pipe\n+ default:\n+ return 0, nil, syserror.EBADF\n+ }\n+\n+ // Splicing with two ends of the same pipe is not allowed.\n+ if readPipe == writePipe {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ spliced, err := splicePipeToPipe(t, fileIn, fileOut, size, flags)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+ return uintptr(spliced), nil, nil\n+ }\n+\n+ // Check if the file descriptor that contains the data to move is a pipe.\n+ if ipipe {\n+ flagsOut := fileOut.Flags()\n+ offset := uint64(fileOut.Offset())\n+\n+ // If there is an offset for the file, ensure the file has the Pwrite flag.\n+ if offOut != 0 {\n+ if !flagsOut.Pwrite {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ if _, err := t.CopyIn(offOut, &offset); err != nil {\n+ return 0, nil, err\n+ }\n+ }\n+\n+ if !flagsOut.Write {\n+ return 0, nil, syserror.EBADF\n+ }\n+\n+ if flagsOut.Append {\n+ return 0, nil, syserror.EINVAL\n+ }\n+\n+ switch fileIn.FileOperations.(type) {\n+ case *pipe.Reader, *pipe.ReaderWriter:\n+ // If the pipe in is a Reader or ReaderWriter, we can continue.\n+ default:\n+ return 0, nil, syserror.EBADF\n+ }\n+ spliced, err := spliceWrite(t, fileIn, fileOut, size, offset, flags)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+\n+ // Make sure value that offset points to is updated.\n+ if offOut == 0 {\n+ fileOut.Seek(t, fs.SeekSet, spliced+int64(offset))\n+ } else if _, err := t.CopyOut(offOut, spliced+int64(offset)); err != nil {\n+ return 0, nil, err\n+ }\n+ return uintptr(spliced), nil, nil\n+ }\n+\n+ // Check if the file descriptor that the data will be moved to is a pipe.\n+ if opipe {\n+ flagsIn := fileIn.Flags()\n+ offset := uint64(fileIn.Offset())\n+\n+ // If there is an offset for the file, ensure the file has the Pread flag.\n+ if offIn != 0 {\n+ if !flagsIn.Pread {\n+ return 0, nil, syserror.EINVAL\n+ }\n+ if _, err := t.CopyIn(offIn, &offset); err != nil {\n+ return 0, nil, err\n+ }\n+ }\n+\n+ if !flagsIn.Read {\n+ return 0, nil, syserror.EBADF\n+ }\n+\n+ switch fileOut.FileOperations.(type) {\n+ case *pipe.Writer, *pipe.ReaderWriter:\n+ // If the pipe out is a Writer or ReaderWriter, we can continue.\n+ default:\n+ return 0, nil, syserror.EBADF\n+ }\n+ spliced, err := spliceRead(t, fileIn, fileOut, size, offset, flags)\n+ if err != nil {\n+ return 0, nil, err\n+ }\n+\n+ // Make sure value that offset points to is updated.\n+ if offIn == 0 {\n+ fileOut.Seek(t, fs.SeekSet, spliced+int64(offset))\n+ } else if _, err := t.CopyOut(offIn, spliced+int64(offset)); err != nil {\n+ return 0, nil, err\n+ }\n+ return uintptr(spliced), nil, nil\n+ }\n+\n+ // Splice requires one of the file descriptors to be a pipe.\n+ return 0, nil, syserror.EINVAL\n+}\n+\n+// splicePipeToPipe moves data from one pipe to another pipe.\n+// TODO: Implement with zero copy movement/without copying between\n+// user and kernel address spaces.\n+func splicePipeToPipe(t *kernel.Task, inPipe *fs.File, outPipe *fs.File, size int64, flags uint) (int64, error) {\n+ w := &fs.FileWriter{t, outPipe}\n+ if flags == linux.SPLICE_F_NONBLOCK {\n+ r := &io.LimitedReader{R: &fs.FileReader{t, inPipe}, N: size}\n+ return io.Copy(w, r)\n+ }\n+ var n int64\n+ for read := int64(0); read < size; {\n+ var err error\n+ r := &io.LimitedReader{R: &fs.FileReader{t, inPipe}, N: size}\n+ n, err = io.Copy(w, r)\n+ if err != nil && err != syserror.ErrWouldBlock {\n+ return 0, err\n+ }\n+ read += n\n+ }\n+ return n, nil\n+}\n+\n+// spliceRead moves data from a file to a pipe.\n+// TODO: Implement with zero copy movement/without copying between\n+// user and kernel address spaces.\n+func spliceRead(t *kernel.Task, inFile *fs.File, outPipe *fs.File, size int64, offset uint64, flags uint) (int64, error) {\n+ w := &fs.FileWriter{t, outPipe}\n+ if flags == linux.SPLICE_F_NONBLOCK {\n+ r := io.NewSectionReader(&fs.FileReader{t, inFile}, int64(offset), size)\n+ return io.Copy(w, r)\n+ }\n+ var n int64\n+ for read := int64(0); read < size; {\n+ r := io.NewSectionReader(&fs.FileReader{t, inFile}, int64(offset), size)\n+ var err error\n+ n, err = io.Copy(w, r)\n+ if err != nil && err != syserror.ErrWouldBlock {\n+ return 0, err\n+ }\n+ read += n\n+ }\n+ return n, nil\n+}\n+\n+// offsetWriter implements io.Writer on a section of an underlying\n+// WriterAt starting from the offset and ending at the limit.\n+type offsetWriter struct {\n+ w io.WriterAt\n+ off int64\n+ limit int64\n+}\n+\n+// Write implements io.Writer.Write and writes the content of the offsetWriter\n+// starting at the offset and ending at the limit into the given buffer.\n+func (o *offsetWriter) Write(p []byte) (n int, err error) {\n+ if o.off >= o.limit {\n+ return 0, io.EOF\n+ }\n+ if max := o.limit - o.off; int64(len(p)) > max {\n+ p = p[0:max]\n+ }\n+ n, err = o.w.WriteAt(p, o.off)\n+ o.off += int64(n)\n+ return n, err\n+}\n+\n+// spliceWrite moves data from a pipe to a file.\n+// TODO: Implement with zero copy movement/without copying between\n+// user and kernel address spaces.\n+func spliceWrite(t *kernel.Task, inPipe *fs.File, outFile *fs.File, size int64, offset uint64, flags uint) (int64, error) {\n+ w := &offsetWriter{&fs.FileWriter{t, outFile}, int64(offset), int64(offset) + size}\n+ if flags == linux.SPLICE_F_NONBLOCK {\n+ r := &io.LimitedReader{R: &fs.FileReader{t, inPipe}, N: size}\n+ return io.Copy(w, r)\n+ }\n+ var n int64\n+ for read := int64(0); read < size; {\n+ var err error\n+ r := &io.LimitedReader{R: &fs.FileReader{t, inPipe}, N: size}\n+ n, err = io.Copy(w, r)\n+ if err != nil && err != syserror.ErrWouldBlock {\n+ return 0, err\n+ }\n+ read += n\n+ }\n+ return n, nil\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Implemented the splice(2) syscall.
Currently the implementation matches the behavior of moving data
between two file descriptors. However, it does not implement this
through zero-copy movement. Thus, this code is a starting point
to build the more complex implementation.
PiperOrigin-RevId: 208284483
Change-Id: Ibde79520a3d50bc26aead7ad4f128d2be31db14e |
259,858 | 13.08.2018 12:49:18 | 25,200 | 85235ac212713942cb4c0ab2947964711a342a64 | Add path sanity checks. | [
{
"change_type": "MODIFY",
"old_path": "pkg/p9/handlers.go",
"new_path": "pkg/p9/handlers.go",
"diff": "@@ -17,6 +17,7 @@ package p9\nimport (\n\"io\"\n\"os\"\n+ \"strings\"\n\"sync/atomic\"\n\"syscall\"\n@@ -83,8 +84,24 @@ func (t *Tflush) handle(cs *connState) message {\nreturn &Rflush{}\n}\n+// isSafeName returns true iff the name does not contain directory characters.\n+//\n+// We permit walks only on safe names and store the sequence of paths used for\n+// any given walk in each FID. (This is immutable.) We use this to mark\n+// relevant FIDs as moved when a successful rename occurs.\n+func isSafeName(name string) bool {\n+ return name != \"\" && !strings.Contains(name, \"/\") && name != \".\" && name != \"..\"\n+}\n+\n// handle implements handler.handle.\nfunc (t *Twalk) handle(cs *connState) message {\n+ // Check the names.\n+ for _, name := range t.Names {\n+ if !isSafeName(name) {\n+ return newErr(syscall.EINVAL)\n+ }\n+ }\n+\n// Lookup the FID.\nref, ok := cs.LookupFID(t.FID)\nif !ok {\n@@ -200,6 +217,11 @@ func (t *Tlopen) handle(cs *connState) message {\n}\nfunc (t *Tlcreate) do(cs *connState, uid UID) (*Rlcreate, error) {\n+ // Don't allow complex names.\n+ if !isSafeName(t.Name) {\n+ return nil, syscall.EINVAL\n+ }\n+\n// Lookup the FID.\nref, ok := cs.LookupFID(t.FID)\nif !ok {\n@@ -216,7 +238,11 @@ func (t *Tlcreate) do(cs *connState, uid UID) (*Rlcreate, error) {\n// Replace the FID reference.\n//\n// The new file will be opened already.\n- cs.InsertFID(t.FID, &fidRef{file: nsf, opened: true, openFlags: t.OpenFlags})\n+ cs.InsertFID(t.FID, &fidRef{\n+ file: nsf,\n+ opened: true,\n+ openFlags: t.OpenFlags,\n+ })\nreturn &Rlcreate{Rlopen: Rlopen{QID: qid, IoUnit: ioUnit, File: osFile}}, nil\n}\n@@ -240,6 +266,11 @@ func (t *Tsymlink) handle(cs *connState) message {\n}\nfunc (t *Tsymlink) do(cs *connState, uid UID) (*Rsymlink, error) {\n+ // Don't allow complex names.\n+ if !isSafeName(t.Name) {\n+ return nil, syscall.EINVAL\n+ }\n+\n// Lookup the FID.\nref, ok := cs.LookupFID(t.Directory)\nif !ok {\n@@ -258,6 +289,11 @@ func (t *Tsymlink) do(cs *connState, uid UID) (*Rsymlink, error) {\n// handle implements handler.handle.\nfunc (t *Tlink) handle(cs *connState) message {\n+ // Don't allow complex names.\n+ if !isSafeName(t.Name) {\n+ return newErr(syscall.EINVAL)\n+ }\n+\n// Lookup the FID.\nref, ok := cs.LookupFID(t.Directory)\nif !ok {\n@@ -282,6 +318,11 @@ func (t *Tlink) handle(cs *connState) message {\n// handle implements handler.handle.\nfunc (t *Trenameat) handle(cs *connState) message {\n+ // Don't allow complex names.\n+ if !isSafeName(t.OldName) || !isSafeName(t.NewName) {\n+ return newErr(syscall.EINVAL)\n+ }\n+\n// Lookup the FID.\nref, ok := cs.LookupFID(t.OldDirectory)\nif !ok {\n@@ -306,6 +347,11 @@ func (t *Trenameat) handle(cs *connState) message {\n// handle implements handler.handle.\nfunc (t *Tunlinkat) handle(cs *connState) message {\n+ // Don't allow complex names.\n+ if !isSafeName(t.Name) {\n+ return newErr(syscall.EINVAL)\n+ }\n+\n// Lookup the FID.\nref, ok := cs.LookupFID(t.Directory)\nif !ok {\n@@ -323,6 +369,11 @@ func (t *Tunlinkat) handle(cs *connState) message {\n// handle implements handler.handle.\nfunc (t *Trename) handle(cs *connState) message {\n+ // Don't allow complex names.\n+ if !isSafeName(t.Name) {\n+ return newErr(syscall.EINVAL)\n+ }\n+\n// Lookup the FID.\nref, ok := cs.LookupFID(t.FID)\nif !ok {\n@@ -437,6 +488,11 @@ func (t *Tmknod) handle(cs *connState) message {\n}\nfunc (t *Tmknod) do(cs *connState, uid UID) (*Rmknod, error) {\n+ // Don't allow complex names.\n+ if !isSafeName(t.Name) {\n+ return nil, syscall.EINVAL\n+ }\n+\n// Lookup the FID.\nref, ok := cs.LookupFID(t.Directory)\nif !ok {\n@@ -463,6 +519,11 @@ func (t *Tmkdir) handle(cs *connState) message {\n}\nfunc (t *Tmkdir) do(cs *connState, uid UID) (*Rmkdir, error) {\n+ // Don't allow complex names.\n+ if !isSafeName(t.Name) {\n+ return nil, syscall.EINVAL\n+ }\n+\n// Lookup the FID.\nref, ok := cs.LookupFID(t.Directory)\nif !ok {\n@@ -619,6 +680,13 @@ func (t *Tflushf) handle(cs *connState) message {\n// handle implements handler.handle.\nfunc (t *Twalkgetattr) handle(cs *connState) message {\n+ // Check the names.\n+ for _, name := range t.Names {\n+ if !isSafeName(name) {\n+ return newErr(syscall.EINVAL)\n+ }\n+ }\n+\n// Lookup the FID.\nref, ok := cs.LookupFID(t.FID)\nif !ok {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add path sanity checks.
PiperOrigin-RevId: 208527333
Change-Id: I55291bc6b8bc6b88fdd75baf899a71854c39c1a7 |
259,858 | 13.08.2018 13:29:54 | 25,200 | dde836a91858ceee25dbe023263752b39ae21274 | Prevent renames across walk fast path. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/dirent.go",
"new_path": "pkg/sentry/fs/dirent.go",
"diff": "@@ -533,14 +533,18 @@ func (d *Dirent) walk(ctx context.Context, root *Dirent, name string, walkMayUnl\nreturn nil, syscall.ENOENT\n}\n- // Slow path: load the InodeOperations into memory. Since this is a hot path and the lookup may be expensive,\n- // if possible release the lock and re-acquire it.\n+ // Slow path: load the InodeOperations into memory. Since this is a hot path and the lookup may be\n+ // expensive, if possible release the lock and re-acquire it.\nif walkMayUnlock {\n+ // While this dirent is unlocked, the lookup below is not allowed to proceed in tandem with a\n+ // rename operation. The rename should be fully complete before we call Lookup on anything.\nd.mu.Unlock()\n+ renameMu.RLock()\n}\nc, err := d.Inode.Lookup(ctx, name)\nif walkMayUnlock {\nd.mu.Lock()\n+ renameMu.RUnlock()\n}\n// No dice.\nif err != nil {\n@@ -1047,34 +1051,12 @@ func (d *Dirent) flush() {\n}\n}\n-// Busy indicates whether this Dirent is a mount point or root dirent, or has\n-// active positive children.\n-//\n-// This is expensive, since it flushes the children cache.\n-//\n-// TODO: Fix this busy-ness check.\n+// Busy indicates whether this Dirent is a mount point or root dirent.\nfunc (d *Dirent) Busy() bool {\nd.mu.Lock()\ndefer d.mu.Unlock()\n- if d.mounted || d.parent == nil {\n- return true\n- }\n-\n- // Flush any cached references to children that are doomed.\n- d.flush()\n-\n- // Count positive children.\n- var nonNegative int\n- for _, w := range d.children {\n- if child := w.Get(); child != nil {\n- if !child.(*Dirent).IsNegative() {\n- nonNegative++\n- }\n- child.DecRef()\n- }\n- }\n- return nonNegative > 0\n+ return d.mounted || d.parent == nil\n}\n// mount mounts a new dirent with the given inode over d.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Prevent renames across walk fast path.
PiperOrigin-RevId: 208533436
Change-Id: Ifc1a4e2d6438a424650bee831c301b1ac0d670a3 |
259,854 | 14.08.2018 15:05:44 | 25,200 | e97717e29a1bb3e373b130086c4182c598a8121c | Enforce Unix socket address length limit | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/epsocket.go",
"new_path": "pkg/sentry/socket/epsocket/epsocket.go",
"diff": "@@ -150,6 +150,9 @@ func GetAddress(sfamily int, addr []byte) (tcpip.FullAddress, *syserr.Error) {\nswitch family {\ncase linux.AF_UNIX:\npath := addr[2:]\n+ if len(path) > linux.UnixPathMax {\n+ return tcpip.FullAddress{}, syserr.ErrInvalidArgument\n+ }\n// Drop the terminating NUL (if one exists) and everything after it.\n// Skip the first byte, which is NUL for abstract paths.\nif len(path) > 1 {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Enforce Unix socket address length limit
PiperOrigin-RevId: 208720936
Change-Id: Ic943a88b6efeff49574306d4d4e1f113116ae32e |
259,891 | 14.08.2018 16:21:38 | 25,200 | d4939f6dc22e5607cf2ff8d2a9eb1178e47b0a22 | TTY: Fix data race where calls into tty.queue's waiter were not synchronized.
Now, there's a waiter for each end (master and slave) of the TTY, and each
waiter.Entry is only enqueued in one of the waiters. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tty/line_discipline.go",
"new_path": "pkg/sentry/fs/tty/line_discipline.go",
"diff": "@@ -23,6 +23,7 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/arch\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n+ \"gvisor.googlesource.com/gvisor/pkg/syserror\"\n\"gvisor.googlesource.com/gvisor/pkg/waiter\"\n)\n@@ -90,6 +91,12 @@ type lineDiscipline struct {\n// column is the location in a row of the cursor. This is important for\n// handling certain special characters like backspace.\ncolumn int\n+\n+ // masterWaiter is used to wait on the master end of the TTY.\n+ masterWaiter waiter.Queue `state:\"zerovalue\"`\n+\n+ // slaveWaiter is used to wait on the slave end of the TTY.\n+ slaveWaiter waiter.Queue `state:\"zerovalue\"`\n}\nfunc newLineDiscipline(termios linux.KernelTermios) *lineDiscipline {\n@@ -127,7 +134,9 @@ func (l *lineDiscipline) setTermios(ctx context.Context, io usermem.IO, args arc\n// buffer to its read buffer. Anything already in the read buffer is\n// now readable.\nif oldCanonEnabled && !l.termios.LEnabled(linux.ICANON) {\n- l.inQueue.pushWaitBuf(l)\n+ if n := l.inQueue.pushWaitBuf(l); n > 0 {\n+ l.slaveWaiter.Notify(waiter.EventIn)\n+ }\n}\nreturn 0, err\n@@ -152,13 +161,32 @@ func (l *lineDiscipline) inputQueueReadSize(ctx context.Context, io usermem.IO,\nfunc (l *lineDiscipline) inputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) {\nl.termiosMu.RLock()\ndefer l.termiosMu.RUnlock()\n- return l.inQueue.read(ctx, dst, l)\n+ n, pushed, err := l.inQueue.read(ctx, dst, l)\n+ if err != nil {\n+ return 0, err\n+ }\n+ if n > 0 {\n+ l.masterWaiter.Notify(waiter.EventOut)\n+ if pushed {\n+ l.slaveWaiter.Notify(waiter.EventIn)\n+ }\n+ return n, nil\n+ }\n+ return 0, syserror.ErrWouldBlock\n}\nfunc (l *lineDiscipline) inputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) {\nl.termiosMu.RLock()\ndefer l.termiosMu.RUnlock()\n- return l.inQueue.write(ctx, src, l)\n+ n, err := l.inQueue.write(ctx, src, l)\n+ if err != nil {\n+ return 0, err\n+ }\n+ if n > 0 {\n+ l.slaveWaiter.Notify(waiter.EventIn)\n+ return n, nil\n+ }\n+ return 0, syserror.ErrWouldBlock\n}\nfunc (l *lineDiscipline) outputQueueReadSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n@@ -168,13 +196,32 @@ func (l *lineDiscipline) outputQueueReadSize(ctx context.Context, io usermem.IO,\nfunc (l *lineDiscipline) outputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) {\nl.termiosMu.RLock()\ndefer l.termiosMu.RUnlock()\n- return l.outQueue.read(ctx, dst, l)\n+ n, pushed, err := l.outQueue.read(ctx, dst, l)\n+ if err != nil {\n+ return 0, err\n+ }\n+ if n > 0 {\n+ l.slaveWaiter.Notify(waiter.EventOut)\n+ if pushed {\n+ l.masterWaiter.Notify(waiter.EventIn)\n+ }\n+ return n, nil\n+ }\n+ return 0, syserror.ErrWouldBlock\n}\nfunc (l *lineDiscipline) outputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) {\nl.termiosMu.RLock()\ndefer l.termiosMu.RUnlock()\n- return l.outQueue.write(ctx, src, l)\n+ n, err := l.outQueue.write(ctx, src, l)\n+ if err != nil {\n+ return 0, err\n+ }\n+ if n > 0 {\n+ l.masterWaiter.Notify(waiter.EventIn)\n+ return n, nil\n+ }\n+ return 0, syserror.ErrWouldBlock\n}\n// transformer is a helper interface to make it easier to stateify queue.\n@@ -326,7 +373,9 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)\nq.readBuf.WriteRune(c)\n// Anything written to the readBuf will have to be echoed.\nif l.termios.LEnabled(linux.ECHO) {\n- l.outQueue.writeBytes(cBytes, l)\n+ if l.outQueue.writeBytes(cBytes, l) > 0 {\n+ l.masterWaiter.Notify(waiter.EventIn)\n+ }\n}\n// If we finish a line, make it available for reading.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tty/master.go",
"new_path": "pkg/sentry/fs/tty/master.go",
"diff": "@@ -124,14 +124,12 @@ func (mf *masterFileOperations) Release() {\n// EventRegister implements waiter.Waitable.EventRegister.\nfunc (mf *masterFileOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) {\n- mf.t.ld.inQueue.EventRegister(e, mask)\n- mf.t.ld.outQueue.EventRegister(e, mask)\n+ mf.t.ld.masterWaiter.EventRegister(e, mask)\n}\n// EventUnregister implements waiter.Waitable.EventUnregister.\nfunc (mf *masterFileOperations) EventUnregister(e *waiter.Entry) {\n- mf.t.ld.inQueue.EventUnregister(e)\n- mf.t.ld.outQueue.EventUnregister(e)\n+ mf.t.ld.masterWaiter.EventUnregister(e)\n}\n// Readiness implements waiter.Waitable.Readiness.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tty/queue.go",
"new_path": "pkg/sentry/fs/tty/queue.go",
"diff": "@@ -38,8 +38,6 @@ type queue struct {\n// mu protects everything in queue.\nmu sync.Mutex `state:\"nosave\"`\n- waiter.Queue `state:\"zerovalue\"`\n-\n// readBuf is buffer of data ready to be read when readable is true.\n// This data has been processed.\nreadBuf bytes.Buffer `state:\".([]byte)\"`\n@@ -112,15 +110,17 @@ func (q *queue) readableSize(ctx context.Context, io usermem.IO, args arch.Sysca\n}\n-// read reads from q to userspace.\n+// read reads from q to userspace. It returns the number of bytes read as well\n+// as whether the read caused more readable data to become available (whether\n+// data was pushed from the wait buffer to the read buffer).\n//\n// Preconditions:\n// * l.termiosMu must be held for reading.\n-func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, error) {\n+func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, bool, error) {\nq.mu.Lock()\ndefer q.mu.Unlock()\nif !q.readable {\n- return 0, syserror.ErrWouldBlock\n+ return 0, false, syserror.ErrWouldBlock\n}\n// Read out from the read buffer.\n@@ -133,7 +133,7 @@ func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipl\n}\nn, err := dst.Writer(ctx).Write(q.readBuf.Bytes()[:n])\nif err != nil {\n- return 0, err\n+ return 0, false, err\n}\n// Discard bytes read out.\nq.readBuf.Next(n)\n@@ -144,16 +144,9 @@ func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipl\n}\n// Move data from the queue's wait buffer to its read buffer.\n- q.pushWaitBufLocked(l)\n+ nPushed := q.pushWaitBufLocked(l)\n- // If state changed, notify any waiters. If nothing was available to\n- // read, let the caller know we could block.\n- if n > 0 {\n- q.Notify(waiter.EventOut)\n- } else {\n- return 0, syserror.ErrWouldBlock\n- }\n- return int64(n), nil\n+ return int64(n), nPushed > 0, nil\n}\n// write writes to q from userspace.\n@@ -169,14 +162,20 @@ func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscip\nreturn 0, err\n}\nb = b[:n]\n- return q.writeBytes(b, l)\n+\n+ // If state changed, notify any waiters. If we were unable to write\n+ // anything, let the caller know we could block.\n+ if c := q.writeBytes(b, l); c > 0 {\n+ return c, nil\n+ }\n+ return 0, syserror.ErrWouldBlock\n}\n// writeBytes writes to q from b.\n//\n// Preconditions:\n// * l.termiosMu must be held for reading.\n-func (q *queue) writeBytes(b []byte, l *lineDiscipline) (int64, error) {\n+func (q *queue) writeBytes(b []byte, l *lineDiscipline) int64 {\nq.mu.Lock()\ndefer q.mu.Unlock()\n// Write as much as possible to the read buffer.\n@@ -185,36 +184,26 @@ func (q *queue) writeBytes(b []byte, l *lineDiscipline) (int64, error) {\n// Write remaining data to the wait buffer.\nnWaiting, _ := q.waitBuf.Write(b[n:])\n- // If state changed, notify any waiters. If we were unable to write\n- // anything, let the caller know we could block.\n- if n > 0 {\n- q.Notify(waiter.EventIn)\n- } else if nWaiting == 0 {\n- return 0, syserror.ErrWouldBlock\n- }\n- return int64(n + nWaiting), nil\n+ return int64(n + nWaiting)\n}\n// pushWaitBuf fills the queue's read buffer with data from the wait buffer.\n//\n// Preconditions:\n// * l.termiosMu must be held for reading.\n-func (q *queue) pushWaitBuf(l *lineDiscipline) {\n+func (q *queue) pushWaitBuf(l *lineDiscipline) int {\nq.mu.Lock()\ndefer q.mu.Unlock()\n- q.pushWaitBufLocked(l)\n+ return q.pushWaitBufLocked(l)\n}\n// Preconditions:\n// * l.termiosMu must be held for reading.\n// * q.mu must be locked.\n-func (q *queue) pushWaitBufLocked(l *lineDiscipline) {\n+func (q *queue) pushWaitBufLocked(l *lineDiscipline) int {\n// Remove bytes from the wait buffer and move them to the read buffer.\nn := q.transform(l, q, q.waitBuf.Bytes())\nq.waitBuf.Next(n)\n- // If state changed, notify any waiters.\n- if n > 0 {\n- q.Notify(waiter.EventIn)\n- }\n+ return n\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tty/slave.go",
"new_path": "pkg/sentry/fs/tty/slave.go",
"diff": "@@ -109,14 +109,12 @@ func (sf *slaveFileOperations) Release() {\n// EventRegister implements waiter.Waitable.EventRegister.\nfunc (sf *slaveFileOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) {\n- sf.si.t.ld.outQueue.EventRegister(e, mask)\n- sf.si.t.ld.inQueue.EventRegister(e, mask)\n+ sf.si.t.ld.slaveWaiter.EventRegister(e, mask)\n}\n// EventUnregister implements waiter.Waitable.EventUnregister.\nfunc (sf *slaveFileOperations) EventUnregister(e *waiter.Entry) {\n- sf.si.t.ld.outQueue.EventUnregister(e)\n- sf.si.t.ld.inQueue.EventUnregister(e)\n+ sf.si.t.ld.slaveWaiter.EventUnregister(e)\n}\n// Readiness implements waiter.Waitable.Readiness.\n"
}
] | Go | Apache License 2.0 | google/gvisor | TTY: Fix data race where calls into tty.queue's waiter were not synchronized.
Now, there's a waiter for each end (master and slave) of the TTY, and each
waiter.Entry is only enqueued in one of the waiters.
PiperOrigin-RevId: 208734483
Change-Id: I06996148f123075f8dd48cde5a553e2be74c6dce |
259,854 | 14.08.2018 19:02:36 | 25,200 | a620bea045b018b717fbba3193975e6d97c09bf9 | Reduce map lookups in syserr | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/socket/epsocket/epsocket.go",
"new_path": "pkg/sentry/socket/epsocket/epsocket.go",
"diff": "@@ -473,7 +473,7 @@ func GetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int,\nif err == nil {\nreturn int32(0), nil\n}\n- return int32(syserr.ToLinux(syserr.TranslateNetstackError(err)).Number()), nil\n+ return int32(syserr.TranslateNetstackError(err).ToLinux().Number()), nil\ncase linux.SO_PEERCRED:\nif family != linux.AF_UNIX || outLen < syscall.SizeofUcred {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/syserr/BUILD",
"new_path": "pkg/syserr/BUILD",
"diff": "@@ -6,7 +6,6 @@ go_library(\nname = \"syserr\",\nsrcs = [\n\"host_linux.go\",\n- \"linuxabi.go\",\n\"netstack.go\",\n\"syserr.go\",\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/syserr/host_linux.go",
"new_path": "pkg/syserr/host_linux.go",
"diff": "package syserr\nimport (\n+ \"fmt\"\n\"syscall\"\n)\n-var linuxHostTranslations = map[syscall.Errno]*Error{\n- syscall.EPERM: ErrNotPermitted,\n- syscall.ENOENT: ErrNoFileOrDir,\n- syscall.ESRCH: ErrNoProcess,\n- syscall.EINTR: ErrInterrupted,\n- syscall.EIO: ErrIO,\n- syscall.ENXIO: ErrDeviceOrAddress,\n- syscall.E2BIG: ErrTooManyArgs,\n- syscall.ENOEXEC: ErrEcec,\n- syscall.EBADF: ErrBadFD,\n- syscall.ECHILD: ErrNoChild,\n- syscall.EAGAIN: ErrTryAgain,\n- syscall.ENOMEM: ErrNoMemory,\n- syscall.EACCES: ErrPermissionDenied,\n- syscall.EFAULT: ErrBadAddress,\n- syscall.ENOTBLK: ErrNotBlockDevice,\n- syscall.EBUSY: ErrBusy,\n- syscall.EEXIST: ErrExists,\n- syscall.EXDEV: ErrCrossDeviceLink,\n- syscall.ENODEV: ErrNoDevice,\n- syscall.ENOTDIR: ErrNotDir,\n- syscall.EISDIR: ErrIsDir,\n- syscall.EINVAL: ErrInvalidArgument,\n- syscall.ENFILE: ErrFileTableOverflow,\n- syscall.EMFILE: ErrTooManyOpenFiles,\n- syscall.ENOTTY: ErrNotTTY,\n- syscall.ETXTBSY: ErrTestFileBusy,\n- syscall.EFBIG: ErrFileTooBig,\n- syscall.ENOSPC: ErrNoSpace,\n- syscall.ESPIPE: ErrIllegalSeek,\n- syscall.EROFS: ErrReadOnlyFS,\n- syscall.EMLINK: ErrTooManyLinks,\n- syscall.EPIPE: ErrBrokenPipe,\n- syscall.EDOM: ErrDomain,\n- syscall.ERANGE: ErrRange,\n- syscall.EDEADLOCK: ErrDeadlock,\n- syscall.ENAMETOOLONG: ErrNameTooLong,\n- syscall.ENOLCK: ErrNoLocksAvailable,\n- syscall.ENOSYS: ErrInvalidSyscall,\n- syscall.ENOTEMPTY: ErrDirNotEmpty,\n- syscall.ELOOP: ErrLinkLoop,\n- syscall.ENOMSG: ErrNoMessage,\n- syscall.EIDRM: ErrIdentifierRemoved,\n- syscall.ECHRNG: ErrChannelOutOfRange,\n- syscall.EL2NSYNC: ErrLevelTwoNotSynced,\n- syscall.EL3HLT: ErrLevelThreeHalted,\n- syscall.EL3RST: ErrLevelThreeReset,\n- syscall.ELNRNG: ErrLinkNumberOutOfRange,\n- syscall.EUNATCH: ErrProtocolDriverNotAttached,\n- syscall.ENOCSI: ErrNoCSIAvailable,\n- syscall.EL2HLT: ErrLevelTwoHalted,\n- syscall.EBADE: ErrInvalidExchange,\n- syscall.EBADR: ErrInvalidRequestDescriptor,\n- syscall.EXFULL: ErrExchangeFull,\n- syscall.ENOANO: ErrNoAnode,\n- syscall.EBADRQC: ErrInvalidRequestCode,\n- syscall.EBADSLT: ErrInvalidSlot,\n- syscall.EBFONT: ErrBadFontFile,\n- syscall.ENOSTR: ErrNotStream,\n- syscall.ENODATA: ErrNoDataAvailable,\n- syscall.ETIME: ErrTimerExpired,\n- syscall.ENOSR: ErrStreamsResourceDepleted,\n- syscall.ENONET: ErrMachineNotOnNetwork,\n- syscall.ENOPKG: ErrPackageNotInstalled,\n- syscall.EREMOTE: ErrIsRemote,\n- syscall.ENOLINK: ErrNoLink,\n- syscall.EADV: ErrAdvertise,\n- syscall.ESRMNT: ErrSRMount,\n- syscall.ECOMM: ErrSendCommunication,\n- syscall.EPROTO: ErrProtocol,\n- syscall.EMULTIHOP: ErrMultihopAttempted,\n- syscall.EDOTDOT: ErrRFS,\n- syscall.EBADMSG: ErrInvalidDataMessage,\n- syscall.EOVERFLOW: ErrOverflow,\n- syscall.ENOTUNIQ: ErrNetworkNameNotUnique,\n- syscall.EBADFD: ErrFDInBadState,\n- syscall.EREMCHG: ErrRemoteAddressChanged,\n- syscall.ELIBACC: ErrSharedLibraryInaccessible,\n- syscall.ELIBBAD: ErrCorruptedSharedLibrary,\n- syscall.ELIBSCN: ErrLibSectionCorrupted,\n- syscall.ELIBMAX: ErrTooManySharedLibraries,\n- syscall.ELIBEXEC: ErrSharedLibraryExeced,\n- syscall.EILSEQ: ErrIllegalByteSequence,\n- syscall.ERESTART: ErrShouldRestart,\n- syscall.ESTRPIPE: ErrStreamPipe,\n- syscall.EUSERS: ErrTooManyUsers,\n- syscall.ENOTSOCK: ErrNotASocket,\n- syscall.EDESTADDRREQ: ErrDestinationAddressRequired,\n- syscall.EMSGSIZE: ErrMessageTooLong,\n- syscall.EPROTOTYPE: ErrWrongProtocolForSocket,\n- syscall.ENOPROTOOPT: ErrProtocolNotAvailable,\n- syscall.EPROTONOSUPPORT: ErrProtocolNotSupported,\n- syscall.ESOCKTNOSUPPORT: ErrSocketNotSupported,\n- syscall.EOPNOTSUPP: ErrEndpointOperation,\n- syscall.EPFNOSUPPORT: ErrProtocolFamilyNotSupported,\n- syscall.EAFNOSUPPORT: ErrAddressFamilyNotSupported,\n- syscall.EADDRINUSE: ErrAddressInUse,\n- syscall.EADDRNOTAVAIL: ErrAddressNotAvailable,\n- syscall.ENETDOWN: ErrNetworkDown,\n- syscall.ENETUNREACH: ErrNetworkUnreachable,\n- syscall.ENETRESET: ErrNetworkReset,\n- syscall.ECONNABORTED: ErrConnectionAborted,\n- syscall.ECONNRESET: ErrConnectionReset,\n- syscall.ENOBUFS: ErrNoBufferSpace,\n- syscall.EISCONN: ErrAlreadyConnected,\n- syscall.ENOTCONN: ErrNotConnected,\n- syscall.ESHUTDOWN: ErrShutdown,\n- syscall.ETOOMANYREFS: ErrTooManyRefs,\n- syscall.ETIMEDOUT: ErrTimedOut,\n- syscall.ECONNREFUSED: ErrConnectionRefused,\n- syscall.EHOSTDOWN: ErrHostDown,\n- syscall.EHOSTUNREACH: ErrNoRoute,\n- syscall.EALREADY: ErrAlreadyInProgress,\n- syscall.EINPROGRESS: ErrInProgress,\n- syscall.ESTALE: ErrStaleFileHandle,\n- syscall.EUCLEAN: ErrStructureNeedsCleaning,\n- syscall.ENOTNAM: ErrIsNamedFile,\n- syscall.EREMOTEIO: ErrRemoteIO,\n- syscall.EDQUOT: ErrQuotaExceeded,\n- syscall.ENOMEDIUM: ErrNoMedium,\n- syscall.EMEDIUMTYPE: ErrWrongMediumType,\n- syscall.ECANCELED: ErrCanceled,\n- syscall.ENOKEY: ErrNoKey,\n- syscall.EKEYEXPIRED: ErrKeyExpired,\n- syscall.EKEYREVOKED: ErrKeyRevoked,\n- syscall.EKEYREJECTED: ErrKeyRejected,\n- syscall.EOWNERDEAD: ErrOwnerDied,\n- syscall.ENOTRECOVERABLE: ErrNotRecoverable,\n+const maxErrno = 134\n+\n+type linuxHostTranslation struct {\n+ err *Error\n+ ok bool\n}\n+var linuxHostTranslations [maxErrno]linuxHostTranslation\n+\n// FromHost translates a syscall.Errno to a corresponding Error value.\nfunc FromHost(err syscall.Errno) *Error {\n- e, ok := linuxHostTranslations[err]\n- if !ok {\n- panic(\"Unknown host errno \" + err.Error())\n+ if err < 0 || int(err) >= len(linuxHostTranslations) || !linuxHostTranslations[err].ok {\n+ panic(fmt.Sprintf(\"unknown host errno %q (%d)\", err.Error(), err))\n+ }\n+ return linuxHostTranslations[err].err\n+}\n+\n+func addLinuxHostTranslation(host syscall.Errno, trans *Error) {\n+ if linuxHostTranslations[host].ok {\n+ panic(fmt.Sprintf(\"duplicate translation for host errno %q (%d)\", host.Error(), host))\n}\n- return e\n+ linuxHostTranslations[host] = linuxHostTranslation{err: trans, ok: true}\n}\n"
},
{
"change_type": "DELETE",
"old_path": "pkg/syserr/linuxabi.go",
"new_path": null,
"diff": "-// Copyright 2018 Google Inc.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-package syserr\n-\n-import (\n- \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n-)\n-\n-var linuxABITranslations = map[*Error]*linux.Errno{}\n-\n-// ToLinux translates an Error to a corresponding *linux.Errno value.\n-func ToLinux(err *Error) *linux.Errno {\n- le, ok := linuxABITranslations[err]\n- if !ok {\n- panic(\"No Linux ABI translation available for \" + err.String())\n- }\n- return le\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/syserr/syserr.go",
"new_path": "pkg/syserr/syserr.go",
"diff": "@@ -27,21 +27,35 @@ import (\n// Error represents an internal error.\ntype Error struct {\n- string\n+ // message is the human readable form of this Error.\n+ message string\n+\n+ // noTranslation indicates that this Error cannot be translated to a\n+ // linux.Errno.\n+ noTranslation bool\n+\n+ // errno is the linux.Errno this Error should be translated to. nil means\n+ // that this Error should be translated to a nil linux.Errno.\n+ errno *linux.Errno\n}\n// New creates a new Error and adds a translation for it.\n//\n// New must only be called at init.\nfunc New(message string, linuxTranslation *linux.Errno) *Error {\n- err := &Error{message}\n- linuxABITranslations[err] = linuxTranslation\n+ err := &Error{message: message, errno: linuxTranslation}\n- // TODO: Remove this.\nif linuxTranslation == nil {\n- linuxBackwardsTranslations[err] = nil\n- } else {\n- e := error(syscall.Errno(linuxTranslation.Number()))\n+ return err\n+ }\n+\n+ // TODO: Remove this.\n+ errno := linuxTranslation.Number()\n+ if errno <= 0 || errno >= len(linuxBackwardsTranslations) {\n+ panic(fmt.Sprint(\"invalid errno: \", errno))\n+ }\n+\n+ e := error(syscall.Errno(errno))\n// syserror.ErrWouldBlock gets translated to syserror.EWOULDBLOCK and\n// enables proper blocking semantics. This should temporary address the\n// class of blocking bugs that keep popping up with the current state of\n@@ -49,8 +63,7 @@ func New(message string, linuxTranslation *linux.Errno) *Error {\nif e == syserror.EWOULDBLOCK {\ne = syserror.ErrWouldBlock\n}\n- linuxBackwardsTranslations[err] = e\n- }\n+ linuxBackwardsTranslations[errno] = linuxBackwardsTranslation{err: e, ok: true}\nreturn err\n}\n@@ -58,7 +71,13 @@ func New(message string, linuxTranslation *linux.Errno) *Error {\n// NewWithoutTranslation creates a new Error. If translation is attempted on\n// the error, translation will fail.\nfunc NewWithoutTranslation(message string) *Error {\n- return &Error{message}\n+ return &Error{message: message, noTranslation: true}\n+}\n+\n+func newWithHost(message string, linuxTranslation *linux.Errno, hostErrno syscall.Errno) *Error {\n+ e := New(message, linuxTranslation)\n+ addLinuxHostTranslation(hostErrno, e)\n+ return e\n}\n// String implements fmt.Stringer.String.\n@@ -66,11 +85,16 @@ func (e *Error) String() string {\nif e == nil {\nreturn \"<nil>\"\n}\n- return e.string\n+ return e.message\n+}\n+\n+type linuxBackwardsTranslation struct {\n+ err error\n+ ok bool\n}\n// TODO: Remove this.\n-var linuxBackwardsTranslations = map[*Error]error{}\n+var linuxBackwardsTranslations [maxErrno]linuxBackwardsTranslation\n// ToError translates an Error to a corresponding error value.\n//\n@@ -79,11 +103,26 @@ func (e *Error) ToError() error {\nif e == nil {\nreturn nil\n}\n- err, ok := linuxBackwardsTranslations[e]\n- if !ok {\n- panic(fmt.Sprintf(\"unknown error: %q\", e.string))\n+ if e.noTranslation {\n+ panic(fmt.Sprintf(\"error %q does not support translation\", e.message))\n}\n- return err\n+ if e.errno == nil {\n+ return nil\n+ }\n+ errno := e.errno.Number()\n+ if errno <= 0 || errno >= len(linuxBackwardsTranslations) || !linuxBackwardsTranslations[errno].ok {\n+ panic(fmt.Sprintf(\"unknown error %q (%d)\", e.message, errno))\n+ }\n+ return linuxBackwardsTranslations[errno].err\n+}\n+\n+// ToLinux converts the Error to a Linux ABI error that can be returned to the\n+// application.\n+func (e *Error) ToLinux() *linux.Errno {\n+ if e.noTranslation {\n+ panic(fmt.Sprintf(\"No Linux ABI translation available for %q\", e.message))\n+ }\n+ return e.errno\n}\n// TODO: Remove or replace most of these errors.\n@@ -91,134 +130,137 @@ func (e *Error) ToError() error {\n// Some of the errors should be replaced with package specific errors and\n// others should be removed entirely.\nvar (\n- ErrNotPermitted = New(\"operation not permitted\", linux.EPERM)\n- ErrNoFileOrDir = New(\"no such file or directory\", linux.ENOENT)\n- ErrNoProcess = New(\"no such process\", linux.ESRCH)\n- ErrInterrupted = New(\"interrupted system call\", linux.EINTR)\n- ErrIO = New(\"I/O error\", linux.EIO)\n- ErrDeviceOrAddress = New(\"no such device or address\", linux.ENXIO)\n- ErrTooManyArgs = New(\"argument list too long\", linux.E2BIG)\n- ErrEcec = New(\"exec format error\", linux.ENOEXEC)\n- ErrBadFD = New(\"bad file number\", linux.EBADF)\n- ErrNoChild = New(\"no child processes\", linux.ECHILD)\n- ErrTryAgain = New(\"try again\", linux.EAGAIN)\n- ErrNoMemory = New(\"out of memory\", linux.ENOMEM)\n- ErrPermissionDenied = New(\"permission denied\", linux.EACCES)\n- ErrBadAddress = New(\"bad address\", linux.EFAULT)\n- ErrNotBlockDevice = New(\"block device required\", linux.ENOTBLK)\n- ErrBusy = New(\"device or resource busy\", linux.EBUSY)\n- ErrExists = New(\"file exists\", linux.EEXIST)\n- ErrCrossDeviceLink = New(\"cross-device link\", linux.EXDEV)\n- ErrNoDevice = New(\"no such device\", linux.ENODEV)\n- ErrNotDir = New(\"not a directory\", linux.ENOTDIR)\n- ErrIsDir = New(\"is a directory\", linux.EISDIR)\n- ErrInvalidArgument = New(\"invalid argument\", linux.EINVAL)\n- ErrFileTableOverflow = New(\"file table overflow\", linux.ENFILE)\n- ErrTooManyOpenFiles = New(\"too many open files\", linux.EMFILE)\n- ErrNotTTY = New(\"not a typewriter\", linux.ENOTTY)\n- ErrTestFileBusy = New(\"text file busy\", linux.ETXTBSY)\n- ErrFileTooBig = New(\"file too large\", linux.EFBIG)\n- ErrNoSpace = New(\"no space left on device\", linux.ENOSPC)\n- ErrIllegalSeek = New(\"illegal seek\", linux.ESPIPE)\n- ErrReadOnlyFS = New(\"read-only file system\", linux.EROFS)\n- ErrTooManyLinks = New(\"too many links\", linux.EMLINK)\n- ErrBrokenPipe = New(\"broken pipe\", linux.EPIPE)\n- ErrDomain = New(\"math argument out of domain of func\", linux.EDOM)\n- ErrRange = New(\"math result not representable\", linux.ERANGE)\n- ErrDeadlock = New(\"resource deadlock would occur\", linux.EDEADLOCK)\n- ErrNameTooLong = New(\"file name too long\", linux.ENAMETOOLONG)\n- ErrNoLocksAvailable = New(\"no record locks available\", linux.ENOLCK)\n- ErrInvalidSyscall = New(\"invalid system call number\", linux.ENOSYS)\n- ErrDirNotEmpty = New(\"directory not empty\", linux.ENOTEMPTY)\n- ErrLinkLoop = New(\"too many symbolic links encountered\", linux.ELOOP)\n+ ErrNotPermitted = newWithHost(\"operation not permitted\", linux.EPERM, syscall.EPERM)\n+ ErrNoFileOrDir = newWithHost(\"no such file or directory\", linux.ENOENT, syscall.ENOENT)\n+ ErrNoProcess = newWithHost(\"no such process\", linux.ESRCH, syscall.ESRCH)\n+ ErrInterrupted = newWithHost(\"interrupted system call\", linux.EINTR, syscall.EINTR)\n+ ErrIO = newWithHost(\"I/O error\", linux.EIO, syscall.EIO)\n+ ErrDeviceOrAddress = newWithHost(\"no such device or address\", linux.ENXIO, syscall.ENXIO)\n+ ErrTooManyArgs = newWithHost(\"argument list too long\", linux.E2BIG, syscall.E2BIG)\n+ ErrEcec = newWithHost(\"exec format error\", linux.ENOEXEC, syscall.ENOEXEC)\n+ ErrBadFD = newWithHost(\"bad file number\", linux.EBADF, syscall.EBADF)\n+ ErrNoChild = newWithHost(\"no child processes\", linux.ECHILD, syscall.ECHILD)\n+ ErrTryAgain = newWithHost(\"try again\", linux.EAGAIN, syscall.EAGAIN)\n+ ErrNoMemory = newWithHost(\"out of memory\", linux.ENOMEM, syscall.ENOMEM)\n+ ErrPermissionDenied = newWithHost(\"permission denied\", linux.EACCES, syscall.EACCES)\n+ ErrBadAddress = newWithHost(\"bad address\", linux.EFAULT, syscall.EFAULT)\n+ ErrNotBlockDevice = newWithHost(\"block device required\", linux.ENOTBLK, syscall.ENOTBLK)\n+ ErrBusy = newWithHost(\"device or resource busy\", linux.EBUSY, syscall.EBUSY)\n+ ErrExists = newWithHost(\"file exists\", linux.EEXIST, syscall.EEXIST)\n+ ErrCrossDeviceLink = newWithHost(\"cross-device link\", linux.EXDEV, syscall.EXDEV)\n+ ErrNoDevice = newWithHost(\"no such device\", linux.ENODEV, syscall.ENODEV)\n+ ErrNotDir = newWithHost(\"not a directory\", linux.ENOTDIR, syscall.ENOTDIR)\n+ ErrIsDir = newWithHost(\"is a directory\", linux.EISDIR, syscall.EISDIR)\n+ ErrInvalidArgument = newWithHost(\"invalid argument\", linux.EINVAL, syscall.EINVAL)\n+ ErrFileTableOverflow = newWithHost(\"file table overflow\", linux.ENFILE, syscall.ENFILE)\n+ ErrTooManyOpenFiles = newWithHost(\"too many open files\", linux.EMFILE, syscall.EMFILE)\n+ ErrNotTTY = newWithHost(\"not a typewriter\", linux.ENOTTY, syscall.ENOTTY)\n+ ErrTestFileBusy = newWithHost(\"text file busy\", linux.ETXTBSY, syscall.ETXTBSY)\n+ ErrFileTooBig = newWithHost(\"file too large\", linux.EFBIG, syscall.EFBIG)\n+ ErrNoSpace = newWithHost(\"no space left on device\", linux.ENOSPC, syscall.ENOSPC)\n+ ErrIllegalSeek = newWithHost(\"illegal seek\", linux.ESPIPE, syscall.ESPIPE)\n+ ErrReadOnlyFS = newWithHost(\"read-only file system\", linux.EROFS, syscall.EROFS)\n+ ErrTooManyLinks = newWithHost(\"too many links\", linux.EMLINK, syscall.EMLINK)\n+ ErrBrokenPipe = newWithHost(\"broken pipe\", linux.EPIPE, syscall.EPIPE)\n+ ErrDomain = newWithHost(\"math argument out of domain of func\", linux.EDOM, syscall.EDOM)\n+ ErrRange = newWithHost(\"math result not representable\", linux.ERANGE, syscall.ERANGE)\n+ ErrDeadlock = newWithHost(\"resource deadlock would occur\", linux.EDEADLOCK, syscall.EDEADLOCK)\n+ ErrNameTooLong = newWithHost(\"file name too long\", linux.ENAMETOOLONG, syscall.ENAMETOOLONG)\n+ ErrNoLocksAvailable = newWithHost(\"no record locks available\", linux.ENOLCK, syscall.ENOLCK)\n+ ErrInvalidSyscall = newWithHost(\"invalid system call number\", linux.ENOSYS, syscall.ENOSYS)\n+ ErrDirNotEmpty = newWithHost(\"directory not empty\", linux.ENOTEMPTY, syscall.ENOTEMPTY)\n+ ErrLinkLoop = newWithHost(\"too many symbolic links encountered\", linux.ELOOP, syscall.ELOOP)\n+ ErrNoMessage = newWithHost(\"no message of desired type\", linux.ENOMSG, syscall.ENOMSG)\n+ ErrIdentifierRemoved = newWithHost(\"identifier removed\", linux.EIDRM, syscall.EIDRM)\n+ ErrChannelOutOfRange = newWithHost(\"channel number out of range\", linux.ECHRNG, syscall.ECHRNG)\n+ ErrLevelTwoNotSynced = newWithHost(\"level 2 not synchronized\", linux.EL2NSYNC, syscall.EL2NSYNC)\n+ ErrLevelThreeHalted = newWithHost(\"level 3 halted\", linux.EL3HLT, syscall.EL3HLT)\n+ ErrLevelThreeReset = newWithHost(\"level 3 reset\", linux.EL3RST, syscall.EL3RST)\n+ ErrLinkNumberOutOfRange = newWithHost(\"link number out of range\", linux.ELNRNG, syscall.ELNRNG)\n+ ErrProtocolDriverNotAttached = newWithHost(\"protocol driver not attached\", linux.EUNATCH, syscall.EUNATCH)\n+ ErrNoCSIAvailable = newWithHost(\"no CSI structure available\", linux.ENOCSI, syscall.ENOCSI)\n+ ErrLevelTwoHalted = newWithHost(\"level 2 halted\", linux.EL2HLT, syscall.EL2HLT)\n+ ErrInvalidExchange = newWithHost(\"invalid exchange\", linux.EBADE, syscall.EBADE)\n+ ErrInvalidRequestDescriptor = newWithHost(\"invalid request descriptor\", linux.EBADR, syscall.EBADR)\n+ ErrExchangeFull = newWithHost(\"exchange full\", linux.EXFULL, syscall.EXFULL)\n+ ErrNoAnode = newWithHost(\"no anode\", linux.ENOANO, syscall.ENOANO)\n+ ErrInvalidRequestCode = newWithHost(\"invalid request code\", linux.EBADRQC, syscall.EBADRQC)\n+ ErrInvalidSlot = newWithHost(\"invalid slot\", linux.EBADSLT, syscall.EBADSLT)\n+ ErrBadFontFile = newWithHost(\"bad font file format\", linux.EBFONT, syscall.EBFONT)\n+ ErrNotStream = newWithHost(\"device not a stream\", linux.ENOSTR, syscall.ENOSTR)\n+ ErrNoDataAvailable = newWithHost(\"no data available\", linux.ENODATA, syscall.ENODATA)\n+ ErrTimerExpired = newWithHost(\"timer expired\", linux.ETIME, syscall.ETIME)\n+ ErrStreamsResourceDepleted = newWithHost(\"out of streams resources\", linux.ENOSR, syscall.ENOSR)\n+ ErrMachineNotOnNetwork = newWithHost(\"machine is not on the network\", linux.ENONET, syscall.ENONET)\n+ ErrPackageNotInstalled = newWithHost(\"package not installed\", linux.ENOPKG, syscall.ENOPKG)\n+ ErrIsRemote = newWithHost(\"object is remote\", linux.EREMOTE, syscall.EREMOTE)\n+ ErrNoLink = newWithHost(\"link has been severed\", linux.ENOLINK, syscall.ENOLINK)\n+ ErrAdvertise = newWithHost(\"advertise error\", linux.EADV, syscall.EADV)\n+ ErrSRMount = newWithHost(\"srmount error\", linux.ESRMNT, syscall.ESRMNT)\n+ ErrSendCommunication = newWithHost(\"communication error on send\", linux.ECOMM, syscall.ECOMM)\n+ ErrProtocol = newWithHost(\"protocol error\", linux.EPROTO, syscall.EPROTO)\n+ ErrMultihopAttempted = newWithHost(\"multihop attempted\", linux.EMULTIHOP, syscall.EMULTIHOP)\n+ ErrRFS = newWithHost(\"RFS specific error\", linux.EDOTDOT, syscall.EDOTDOT)\n+ ErrInvalidDataMessage = newWithHost(\"not a data message\", linux.EBADMSG, syscall.EBADMSG)\n+ ErrOverflow = newWithHost(\"value too large for defined data type\", linux.EOVERFLOW, syscall.EOVERFLOW)\n+ ErrNetworkNameNotUnique = newWithHost(\"name not unique on network\", linux.ENOTUNIQ, syscall.ENOTUNIQ)\n+ ErrFDInBadState = newWithHost(\"file descriptor in bad state\", linux.EBADFD, syscall.EBADFD)\n+ ErrRemoteAddressChanged = newWithHost(\"remote address changed\", linux.EREMCHG, syscall.EREMCHG)\n+ ErrSharedLibraryInaccessible = newWithHost(\"can not access a needed shared library\", linux.ELIBACC, syscall.ELIBACC)\n+ ErrCorruptedSharedLibrary = newWithHost(\"accessing a corrupted shared library\", linux.ELIBBAD, syscall.ELIBBAD)\n+ ErrLibSectionCorrupted = newWithHost(\".lib section in a.out corrupted\", linux.ELIBSCN, syscall.ELIBSCN)\n+ ErrTooManySharedLibraries = newWithHost(\"attempting to link in too many shared libraries\", linux.ELIBMAX, syscall.ELIBMAX)\n+ ErrSharedLibraryExeced = newWithHost(\"cannot exec a shared library directly\", linux.ELIBEXEC, syscall.ELIBEXEC)\n+ ErrIllegalByteSequence = newWithHost(\"illegal byte sequence\", linux.EILSEQ, syscall.EILSEQ)\n+ ErrShouldRestart = newWithHost(\"interrupted system call should be restarted\", linux.ERESTART, syscall.ERESTART)\n+ ErrStreamPipe = newWithHost(\"streams pipe error\", linux.ESTRPIPE, syscall.ESTRPIPE)\n+ ErrTooManyUsers = newWithHost(\"too many users\", linux.EUSERS, syscall.EUSERS)\n+ ErrNotASocket = newWithHost(\"socket operation on non-socket\", linux.ENOTSOCK, syscall.ENOTSOCK)\n+ ErrDestinationAddressRequired = newWithHost(\"destination address required\", linux.EDESTADDRREQ, syscall.EDESTADDRREQ)\n+ ErrMessageTooLong = newWithHost(\"message too long\", linux.EMSGSIZE, syscall.EMSGSIZE)\n+ ErrWrongProtocolForSocket = newWithHost(\"protocol wrong type for socket\", linux.EPROTOTYPE, syscall.EPROTOTYPE)\n+ ErrProtocolNotAvailable = newWithHost(\"protocol not available\", linux.ENOPROTOOPT, syscall.ENOPROTOOPT)\n+ ErrProtocolNotSupported = newWithHost(\"protocol not supported\", linux.EPROTONOSUPPORT, syscall.EPROTONOSUPPORT)\n+ ErrSocketNotSupported = newWithHost(\"socket type not supported\", linux.ESOCKTNOSUPPORT, syscall.ESOCKTNOSUPPORT)\n+ ErrEndpointOperation = newWithHost(\"operation not supported on transport endpoint\", linux.EOPNOTSUPP, syscall.EOPNOTSUPP)\n+ ErrProtocolFamilyNotSupported = newWithHost(\"protocol family not supported\", linux.EPFNOSUPPORT, syscall.EPFNOSUPPORT)\n+ ErrAddressFamilyNotSupported = newWithHost(\"address family not supported by protocol\", linux.EAFNOSUPPORT, syscall.EAFNOSUPPORT)\n+ ErrAddressInUse = newWithHost(\"address already in use\", linux.EADDRINUSE, syscall.EADDRINUSE)\n+ ErrAddressNotAvailable = newWithHost(\"cannot assign requested address\", linux.EADDRNOTAVAIL, syscall.EADDRNOTAVAIL)\n+ ErrNetworkDown = newWithHost(\"network is down\", linux.ENETDOWN, syscall.ENETDOWN)\n+ ErrNetworkUnreachable = newWithHost(\"network is unreachable\", linux.ENETUNREACH, syscall.ENETUNREACH)\n+ ErrNetworkReset = newWithHost(\"network dropped connection because of reset\", linux.ENETRESET, syscall.ENETRESET)\n+ ErrConnectionAborted = newWithHost(\"software caused connection abort\", linux.ECONNABORTED, syscall.ECONNABORTED)\n+ ErrConnectionReset = newWithHost(\"connection reset by peer\", linux.ECONNRESET, syscall.ECONNRESET)\n+ ErrNoBufferSpace = newWithHost(\"no buffer space available\", linux.ENOBUFS, syscall.ENOBUFS)\n+ ErrAlreadyConnected = newWithHost(\"transport endpoint is already connected\", linux.EISCONN, syscall.EISCONN)\n+ ErrNotConnected = newWithHost(\"transport endpoint is not connected\", linux.ENOTCONN, syscall.ENOTCONN)\n+ ErrShutdown = newWithHost(\"cannot send after transport endpoint shutdown\", linux.ESHUTDOWN, syscall.ESHUTDOWN)\n+ ErrTooManyRefs = newWithHost(\"too many references: cannot splice\", linux.ETOOMANYREFS, syscall.ETOOMANYREFS)\n+ ErrTimedOut = newWithHost(\"connection timed out\", linux.ETIMEDOUT, syscall.ETIMEDOUT)\n+ ErrConnectionRefused = newWithHost(\"connection refused\", linux.ECONNREFUSED, syscall.ECONNREFUSED)\n+ ErrHostDown = newWithHost(\"host is down\", linux.EHOSTDOWN, syscall.EHOSTDOWN)\n+ ErrNoRoute = newWithHost(\"no route to host\", linux.EHOSTUNREACH, syscall.EHOSTUNREACH)\n+ ErrAlreadyInProgress = newWithHost(\"operation already in progress\", linux.EALREADY, syscall.EALREADY)\n+ ErrInProgress = newWithHost(\"operation now in progress\", linux.EINPROGRESS, syscall.EINPROGRESS)\n+ ErrStaleFileHandle = newWithHost(\"stale file handle\", linux.ESTALE, syscall.ESTALE)\n+ ErrStructureNeedsCleaning = newWithHost(\"structure needs cleaning\", linux.EUCLEAN, syscall.EUCLEAN)\n+ ErrIsNamedFile = newWithHost(\"is a named type file\", linux.ENOTNAM, syscall.ENOTNAM)\n+ ErrRemoteIO = newWithHost(\"remote I/O error\", linux.EREMOTEIO, syscall.EREMOTEIO)\n+ ErrQuotaExceeded = newWithHost(\"quota exceeded\", linux.EDQUOT, syscall.EDQUOT)\n+ ErrNoMedium = newWithHost(\"no medium found\", linux.ENOMEDIUM, syscall.ENOMEDIUM)\n+ ErrWrongMediumType = newWithHost(\"wrong medium type\", linux.EMEDIUMTYPE, syscall.EMEDIUMTYPE)\n+ ErrCanceled = newWithHost(\"operation canceled\", linux.ECANCELED, syscall.ECANCELED)\n+ ErrNoKey = newWithHost(\"required key not available\", linux.ENOKEY, syscall.ENOKEY)\n+ ErrKeyExpired = newWithHost(\"key has expired\", linux.EKEYEXPIRED, syscall.EKEYEXPIRED)\n+ ErrKeyRevoked = newWithHost(\"key has been revoked\", linux.EKEYREVOKED, syscall.EKEYREVOKED)\n+ ErrKeyRejected = newWithHost(\"key was rejected by service\", linux.EKEYREJECTED, syscall.EKEYREJECTED)\n+ ErrOwnerDied = newWithHost(\"owner died\", linux.EOWNERDEAD, syscall.EOWNERDEAD)\n+ ErrNotRecoverable = newWithHost(\"state not recoverable\", linux.ENOTRECOVERABLE, syscall.ENOTRECOVERABLE)\n+\n+ // ErrWouldBlock translates to EWOULDBLOCK which is the same as EAGAIN\n+ // on Linux.\nErrWouldBlock = New(\"operation would block\", linux.EWOULDBLOCK)\n- ErrNoMessage = New(\"no message of desired type\", linux.ENOMSG)\n- ErrIdentifierRemoved = New(\"identifier removed\", linux.EIDRM)\n- ErrChannelOutOfRange = New(\"channel number out of range\", linux.ECHRNG)\n- ErrLevelTwoNotSynced = New(\"level 2 not synchronized\", linux.EL2NSYNC)\n- ErrLevelThreeHalted = New(\"level 3 halted\", linux.EL3HLT)\n- ErrLevelThreeReset = New(\"level 3 reset\", linux.EL3RST)\n- ErrLinkNumberOutOfRange = New(\"link number out of range\", linux.ELNRNG)\n- ErrProtocolDriverNotAttached = New(\"protocol driver not attached\", linux.EUNATCH)\n- ErrNoCSIAvailable = New(\"no CSI structure available\", linux.ENOCSI)\n- ErrLevelTwoHalted = New(\"level 2 halted\", linux.EL2HLT)\n- ErrInvalidExchange = New(\"invalid exchange\", linux.EBADE)\n- ErrInvalidRequestDescriptor = New(\"invalid request descriptor\", linux.EBADR)\n- ErrExchangeFull = New(\"exchange full\", linux.EXFULL)\n- ErrNoAnode = New(\"no anode\", linux.ENOANO)\n- ErrInvalidRequestCode = New(\"invalid request code\", linux.EBADRQC)\n- ErrInvalidSlot = New(\"invalid slot\", linux.EBADSLT)\n- ErrBadFontFile = New(\"bad font file format\", linux.EBFONT)\n- ErrNotStream = New(\"device not a stream\", linux.ENOSTR)\n- ErrNoDataAvailable = New(\"no data available\", linux.ENODATA)\n- ErrTimerExpired = New(\"timer expired\", linux.ETIME)\n- ErrStreamsResourceDepleted = New(\"out of streams resources\", linux.ENOSR)\n- ErrMachineNotOnNetwork = New(\"machine is not on the network\", linux.ENONET)\n- ErrPackageNotInstalled = New(\"package not installed\", linux.ENOPKG)\n- ErrIsRemote = New(\"object is remote\", linux.EREMOTE)\n- ErrNoLink = New(\"link has been severed\", linux.ENOLINK)\n- ErrAdvertise = New(\"advertise error\", linux.EADV)\n- ErrSRMount = New(\"srmount error\", linux.ESRMNT)\n- ErrSendCommunication = New(\"communication error on send\", linux.ECOMM)\n- ErrProtocol = New(\"protocol error\", linux.EPROTO)\n- ErrMultihopAttempted = New(\"multihop attempted\", linux.EMULTIHOP)\n- ErrRFS = New(\"RFS specific error\", linux.EDOTDOT)\n- ErrInvalidDataMessage = New(\"not a data message\", linux.EBADMSG)\n- ErrOverflow = New(\"value too large for defined data type\", linux.EOVERFLOW)\n- ErrNetworkNameNotUnique = New(\"name not unique on network\", linux.ENOTUNIQ)\n- ErrFDInBadState = New(\"file descriptor in bad state\", linux.EBADFD)\n- ErrRemoteAddressChanged = New(\"remote address changed\", linux.EREMCHG)\n- ErrSharedLibraryInaccessible = New(\"can not access a needed shared library\", linux.ELIBACC)\n- ErrCorruptedSharedLibrary = New(\"accessing a corrupted shared library\", linux.ELIBBAD)\n- ErrLibSectionCorrupted = New(\".lib section in a.out corrupted\", linux.ELIBSCN)\n- ErrTooManySharedLibraries = New(\"attempting to link in too many shared libraries\", linux.ELIBMAX)\n- ErrSharedLibraryExeced = New(\"cannot exec a shared library directly\", linux.ELIBEXEC)\n- ErrIllegalByteSequence = New(\"illegal byte sequence\", linux.EILSEQ)\n- ErrShouldRestart = New(\"interrupted system call should be restarted\", linux.ERESTART)\n- ErrStreamPipe = New(\"streams pipe error\", linux.ESTRPIPE)\n- ErrTooManyUsers = New(\"too many users\", linux.EUSERS)\n- ErrNotASocket = New(\"socket operation on non-socket\", linux.ENOTSOCK)\n- ErrDestinationAddressRequired = New(\"destination address required\", linux.EDESTADDRREQ)\n- ErrMessageTooLong = New(\"message too long\", linux.EMSGSIZE)\n- ErrWrongProtocolForSocket = New(\"protocol wrong type for socket\", linux.EPROTOTYPE)\n- ErrProtocolNotAvailable = New(\"protocol not available\", linux.ENOPROTOOPT)\n- ErrProtocolNotSupported = New(\"protocol not supported\", linux.EPROTONOSUPPORT)\n- ErrSocketNotSupported = New(\"socket type not supported\", linux.ESOCKTNOSUPPORT)\n- ErrEndpointOperation = New(\"operation not supported on transport endpoint\", linux.EOPNOTSUPP)\n- ErrProtocolFamilyNotSupported = New(\"protocol family not supported\", linux.EPFNOSUPPORT)\n- ErrAddressFamilyNotSupported = New(\"address family not supported by protocol\", linux.EAFNOSUPPORT)\n- ErrAddressInUse = New(\"address already in use\", linux.EADDRINUSE)\n- ErrAddressNotAvailable = New(\"cannot assign requested address\", linux.EADDRNOTAVAIL)\n- ErrNetworkDown = New(\"network is down\", linux.ENETDOWN)\n- ErrNetworkUnreachable = New(\"network is unreachable\", linux.ENETUNREACH)\n- ErrNetworkReset = New(\"network dropped connection because of reset\", linux.ENETRESET)\n- ErrConnectionAborted = New(\"software caused connection abort\", linux.ECONNABORTED)\n- ErrConnectionReset = New(\"connection reset by peer\", linux.ECONNRESET)\n- ErrNoBufferSpace = New(\"no buffer space available\", linux.ENOBUFS)\n- ErrAlreadyConnected = New(\"transport endpoint is already connected\", linux.EISCONN)\n- ErrNotConnected = New(\"transport endpoint is not connected\", linux.ENOTCONN)\n- ErrShutdown = New(\"cannot send after transport endpoint shutdown\", linux.ESHUTDOWN)\n- ErrTooManyRefs = New(\"too many references: cannot splice\", linux.ETOOMANYREFS)\n- ErrTimedOut = New(\"connection timed out\", linux.ETIMEDOUT)\n- ErrConnectionRefused = New(\"connection refused\", linux.ECONNREFUSED)\n- ErrHostDown = New(\"host is down\", linux.EHOSTDOWN)\n- ErrNoRoute = New(\"no route to host\", linux.EHOSTUNREACH)\n- ErrAlreadyInProgress = New(\"operation already in progress\", linux.EALREADY)\n- ErrInProgress = New(\"operation now in progress\", linux.EINPROGRESS)\n- ErrStaleFileHandle = New(\"stale file handle\", linux.ESTALE)\n- ErrStructureNeedsCleaning = New(\"structure needs cleaning\", linux.EUCLEAN)\n- ErrIsNamedFile = New(\"is a named type file\", linux.ENOTNAM)\n- ErrRemoteIO = New(\"remote I/O error\", linux.EREMOTEIO)\n- ErrQuotaExceeded = New(\"quota exceeded\", linux.EDQUOT)\n- ErrNoMedium = New(\"no medium found\", linux.ENOMEDIUM)\n- ErrWrongMediumType = New(\"wrong medium type\", linux.EMEDIUMTYPE)\n- ErrCanceled = New(\"operation Canceled\", linux.ECANCELED)\n- ErrNoKey = New(\"required key not available\", linux.ENOKEY)\n- ErrKeyExpired = New(\"key has expired\", linux.EKEYEXPIRED)\n- ErrKeyRevoked = New(\"key has been revoked\", linux.EKEYREVOKED)\n- ErrKeyRejected = New(\"key was rejected by service\", linux.EKEYREJECTED)\n- ErrOwnerDied = New(\"owner died\", linux.EOWNERDEAD)\n- ErrNotRecoverable = New(\"state not recoverable\", linux.ENOTRECOVERABLE)\n)\n// FromError converts a generic error to an *Error.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Reduce map lookups in syserr
PiperOrigin-RevId: 208755352
Change-Id: Ia24630f452a4a42940ab73a8113a2fd5ea2cfca2 |
259,854 | 15.08.2018 17:00:54 | 25,200 | eacbe6a678ec08751543868ef19f9197c167fe60 | Remove obsolete comment about panicking | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/buffer/view.go",
"new_path": "pkg/tcpip/buffer/view.go",
"diff": "@@ -121,7 +121,6 @@ func (vv *VectorisedView) Clone(buffer []View) VectorisedView {\n}\n// First returns the first view of the vectorised view.\n-// It panics if the vectorised view is empty.\nfunc (vv *VectorisedView) First() View {\nif len(vv.views) == 0 {\nreturn nil\n"
}
] | Go | Apache License 2.0 | google/gvisor | Remove obsolete comment about panicking
PiperOrigin-RevId: 208908702
Change-Id: I6be9c765c257a9ddb1a965a03942ab3fc3a34a43 |
259,992 | 16.08.2018 10:54:21 | 25,200 | da087e66cc0eb1616437e5b729576801671d3696 | Combine functions to search for file under one common function
Bazel adds the build type in front of directories making it hard to
refer to binaries in code. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -206,27 +206,6 @@ func run(spec *specs.Spec, conf *boot.Config) error {\nreturn nil\n}\n-// findUDSApp finds the uds_test_app binary to be used in the UnixDomainSocket test.\n-func findUDSApp() (string, error) {\n- // TODO: Use bazel FindBinary function.\n-\n- // uds_test_app is in a directory like:\n- // './linux_amd64_pure_stripped/uds_test_app.go'.\n- //\n- // Since I don't want to construct 'linux_amd64_pure_stripped' based on the\n- // build type, do a quick search for: './*/uds_test_app'\n- // Note: This glob will only succeed when file is one directory deep.\n- matches, err := filepath.Glob(\"./*/uds_test_app\")\n- if err != nil {\n- return \"\", fmt.Errorf(\"error globbing: %v\", err)\n- }\n- if i := len(matches); i != 1 {\n- return \"\", fmt.Errorf(\"error identifying uds_test_app from matches: got %d matches\", i)\n- }\n-\n- return matches[0], nil\n-}\n-\ntype configOption int\nconst (\n@@ -760,16 +739,9 @@ func TestUnixDomainSockets(t *testing.T) {\n// Get file path for corresponding output file in sandbox.\noutputFileSandbox := filepath.Join(goferRoot, output)\n- // Need to get working directory, even though not intuitive.\n- wd, _ := os.Getwd()\n- localPath, err := findUDSApp()\n+ app, err := testutil.FindFile(\"runsc/container/uds_test_app\")\nif err != nil {\n- t.Fatalf(\"error finding localPath: %v\", err)\n- }\n- app := filepath.Join(wd, localPath)\n-\n- if _, err = os.Stat(app); err != nil {\n- t.Fatalf(\"error finding the uds_test_app: %v\", err)\n+ t.Fatal(\"error finding uds_test_app:\", err)\n}\nsocketPath := filepath.Join(dir, socket)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -37,39 +37,53 @@ var RaceEnabled = false\n// ConfigureExePath configures the executable for runsc in the test environment.\nfunc ConfigureExePath() error {\n-\n- // runsc is in a directory like: 'runsc/linux_amd64_pure_stripped/runsc'.\n- // Since I don't want to construct 'linux_amd64_pure_stripped' based on the\n- // build type, do a quick search for: 'runsc/*/runsc'\n- exePath := \"\"\n- lv1 := \"./runsc\"\n- lv1fis, err := ioutil.ReadDir(lv1)\n+ path, err := FindFile(\"runsc/runsc\")\nif err != nil {\nreturn err\n}\n- for _, fi := range lv1fis {\n- if !fi.IsDir() {\n- continue\n- }\n- lv2fis, err := ioutil.ReadDir(filepath.Join(lv1, fi.Name()))\n- if err != nil {\n- return err\n+ specutils.ExePath = path\n+ return nil\n}\n- for _, candidate := range lv2fis {\n- if !candidate.IsDir() && candidate.Name() == \"runsc\" {\n- exePath, err = filepath.Abs(filepath.Join(lv1, fi.Name(), candidate.Name()))\n+\n+// FindFile searchs for a file inside the test run environment. It returns the\n+// full path to the file. It fails if none or more than one file is found.\n+func FindFile(path string) (string, error) {\n+ wd, err := os.Getwd()\nif err != nil {\n- return err\n+ return \"\", err\n}\n+\n+ // The test root is demarcated by a path element called \"__main__\". Search for\n+ // it backwards from the in the working directory.\n+ root := wd\n+ for {\n+ dir, name := filepath.Split(root)\n+ if name == \"__main__\" {\nbreak\n}\n+ if len(dir) == 0 {\n+ return \"\", fmt.Errorf(\"directory __main__ not found in %q\", wd)\n}\n+ // Remove ending slash to loop around.\n+ root = dir[:len(dir)-1]\n}\n- if exePath == \"\" {\n- return fmt.Errorf(\"path to runsc not found\")\n+\n+ // bazel adds the build type to the directory structure. Since I don't want\n+ // to guess what build type it's, just place '*' to match anything.\n+ //\n+ // The pattern goes like: /test-path/__main__/directories/*/file.\n+ pattern := filepath.Join(root, filepath.Dir(path), \"*\", filepath.Base(path))\n+ matches, err := filepath.Glob(pattern)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"error globbing %q: %v\", pattern, err)\n}\n- specutils.ExePath = exePath\n- return nil\n+ if len(matches) == 0 {\n+ return \"\", fmt.Errorf(\"file %q not found\", path)\n+ }\n+ if len(matches) != 1 {\n+ return \"\", fmt.Errorf(\"more than one match found for %q: %s\", path, matches)\n+ }\n+ return matches[0], nil\n}\n// TestConfig return the default configuration to use in tests.\n"
}
] | Go | Apache License 2.0 | google/gvisor | Combine functions to search for file under one common function
Bazel adds the build type in front of directories making it hard to
refer to binaries in code.
PiperOrigin-RevId: 209010854
Change-Id: I6c9da1ac3bbe79766868a3b14222dd42d03b4ec5 |
260,028 | 16.08.2018 16:27:14 | 25,200 | aeec7a4c007ac53401e05bf72894a3b998eead95 | fs: Support possible and online knobs for cpu
Some linux commands depend on /sys/devices/system/cpu/possible, such
as 'lscpu'.
Add 2 knobs for cpu:
/sys/devices/system/cpu/possible
/sys/devices/system/cpu/online
Both the values are '0 - Kernel.ApplicationCores()-1'. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/sys/BUILD",
"new_path": "pkg/sentry/fs/sys/BUILD",
"diff": "@@ -6,6 +6,7 @@ go_library(\nname = \"sys\",\nsrcs = [\n\"device.go\",\n+ \"devices.go\",\n\"fs.go\",\n\"sys.go\",\n],\n@@ -16,6 +17,8 @@ go_library(\n\"//pkg/sentry/device\",\n\"//pkg/sentry/fs\",\n\"//pkg/sentry/fs/ramfs\",\n+ \"//pkg/sentry/kernel\",\n\"//pkg/sentry/usermem\",\n+ \"//pkg/syserror\",\n],\n)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "pkg/sentry/fs/sys/devices.go",
"diff": "+// Copyright 2018 Google Inc.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package sys\n+\n+import (\n+ \"fmt\"\n+ \"io\"\n+\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/kernel\"\n+ \"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n+ \"gvisor.googlesource.com/gvisor/pkg/syserror\"\n+)\n+\n+// +stateify savable\n+type cpunum struct {\n+ ramfs.Entry\n+}\n+\n+func (c *cpunum) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) {\n+ if offset < 0 {\n+ return 0, syserror.EINVAL\n+ }\n+\n+ k := kernel.KernelFromContext(ctx)\n+ if k == nil {\n+ return 0, io.EOF\n+ }\n+\n+ str := []byte(fmt.Sprintf(\"0-%d\\n\", k.ApplicationCores()-1))\n+ if offset >= int64(len(str)) {\n+ return 0, io.EOF\n+ }\n+\n+ n, err := dst.CopyOut(ctx, str[offset:])\n+ return int64(n), err\n+}\n+\n+func newPossible(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\n+ c := &cpunum{}\n+ c.InitEntry(ctx, fs.RootOwner, fs.FilePermsFromMode(0444))\n+ return newFile(c, msrc)\n+}\n+\n+func newCPU(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\n+ return newDir(ctx, msrc, map[string]*fs.Inode{\n+ \"possible\": newPossible(ctx, msrc),\n+ \"online\": newPossible(ctx, msrc),\n+ })\n+}\n+\n+func newSystemDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\n+ return newDir(ctx, msrc, map[string]*fs.Inode{\n+ \"cpu\": newCPU(ctx, msrc),\n+ })\n+}\n+\n+func newDevicesDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\n+ return newDir(ctx, msrc, map[string]*fs.Inode{\n+ \"system\": newSystemDir(ctx, msrc),\n+ })\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/sys/sys.go",
"new_path": "pkg/sentry/fs/sys/sys.go",
"diff": "@@ -22,13 +22,25 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/usermem\"\n)\n+// sys is a root sys node.\n+//\n// +stateify savable\n-type dir struct {\n+type sys struct {\nramfs.Dir\n}\n+func newFile(node fs.InodeOperations, msrc *fs.MountSource) *fs.Inode {\n+ sattr := fs.StableAttr{\n+ DeviceID: sysfsDevice.DeviceID(),\n+ InodeID: sysfsDevice.NextIno(),\n+ BlockSize: usermem.PageSize,\n+ Type: fs.SpecialFile,\n+ }\n+ return fs.NewInode(node, msrc, sattr)\n+}\n+\nfunc newDir(ctx context.Context, msrc *fs.MountSource, contents map[string]*fs.Inode) *fs.Inode {\n- d := &dir{}\n+ d := &sys{}\nd.InitDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0555))\nreturn fs.NewInode(d, msrc, fs.StableAttr{\nDeviceID: sysfsDevice.DeviceID(),\n@@ -48,7 +60,7 @@ func New(ctx context.Context, msrc *fs.MountSource) *fs.Inode {\n\"bus\": newDir(ctx, msrc, nil),\n\"class\": newDir(ctx, msrc, nil),\n\"dev\": newDir(ctx, msrc, nil),\n- \"devices\": newDir(ctx, msrc, nil),\n+ \"devices\": newDevicesDir(ctx, msrc),\n\"firmware\": newDir(ctx, msrc, nil),\n\"fs\": newDir(ctx, msrc, nil),\n\"kernel\": newDir(ctx, msrc, nil),\n"
}
] | Go | Apache License 2.0 | google/gvisor | fs: Support possible and online knobs for cpu
Some linux commands depend on /sys/devices/system/cpu/possible, such
as 'lscpu'.
Add 2 knobs for cpu:
/sys/devices/system/cpu/possible
/sys/devices/system/cpu/online
Both the values are '0 - Kernel.ApplicationCores()-1'.
Change-Id: Iabd8a4e559cbb630ed249686b92c22b4e7120663
PiperOrigin-RevId: 209070163 |
259,992 | 17.08.2018 13:05:59 | 25,200 | 11800311a537bf1286f71ab419fa251a1e81e54f | Add nonExclusiveFS dimension to more tests
The ones using 'kvm' actually mean that they don't want overlay. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -215,6 +215,7 @@ const (\n)\nvar all = []configOption{overlay, kvm, nonExclusiveFS}\n+var noOverlay = []configOption{kvm, nonExclusiveFS}\n// configs generates different configurations to run tests.\nfunc configs(opts ...configOption) []*boot.Config {\n@@ -557,7 +558,7 @@ func TestExec(t *testing.T) {\n// be the next consecutive number after the last number from the checkpointed container.\nfunc TestCheckpointRestore(t *testing.T) {\n// Skip overlay because test requires writing to host file.\n- for _, conf := range configs(kvm) {\n+ for _, conf := range configs(noOverlay...) {\nt.Logf(\"Running test with conf: %+v\", conf)\ndir, err := ioutil.TempDir(\"\", \"checkpoint-test\")\n@@ -716,7 +717,7 @@ func TestUnixDomainSockets(t *testing.T) {\n)\n// Skip overlay because test requires writing to host file.\n- for _, conf := range configs(kvm) {\n+ for _, conf := range configs(noOverlay...) {\nt.Logf(\"Running test with conf: %+v\", conf)\ndir, err := ioutil.TempDir(\"\", \"uds-test\")\n@@ -852,7 +853,7 @@ func TestUnixDomainSockets(t *testing.T) {\n// It will then unpause and confirm that both processes are running. Then it will\n// wait until one sleep completes and check to make sure the other is running.\nfunc TestPauseResume(t *testing.T) {\n- for _, conf := range configs(kvm) {\n+ for _, conf := range configs(noOverlay...) {\nt.Logf(\"Running test with conf: %+v\", conf)\nconst uid = 343\nspec := testutil.NewSpecWithArgs(\"sleep\", \"20\")\n@@ -1208,7 +1209,7 @@ func TestConsoleSocket(t *testing.T) {\n// TestRunNonRoot checks that sandbox can be configured when running as\n// non-privileged user.\nfunc TestRunNonRoot(t *testing.T) {\n- for _, conf := range configs(kvm) {\n+ for _, conf := range configs(noOverlay...) {\nt.Logf(\"Running test with conf: %+v\", conf)\nspec := testutil.NewSpecWithArgs(\"/bin/true\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add nonExclusiveFS dimension to more tests
The ones using 'kvm' actually mean that they don't want overlay.
PiperOrigin-RevId: 209194318
Change-Id: I941a443cb6d783e2c80cf66eb8d8630bcacdb574 |
259,992 | 20.08.2018 11:25:42 | 25,200 | 0fc7b306959e83ebf14792206c9a626490b02c2d | Standardize mounts in tests
Tests get a readonly rootfs mapped to / (which was the case before)
and writable TEST_TMPDIR. This makes it easier to setup containers to
write to files and to share state between test and containers. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -558,17 +558,19 @@ func TestExec(t *testing.T) {\n// be the next consecutive number after the last number from the checkpointed container.\nfunc TestCheckpointRestore(t *testing.T) {\n// Skip overlay because test requires writing to host file.\n- for _, conf := range configs(noOverlay...) {\n+ //\n+ // TODO: Skip nonExclusiveFS because $TEST_TMPDIR mount is\n+ // mistakenly marked as RO after revalidation.\n+ for _, conf := range configs(kvm) {\nt.Logf(\"Running test with conf: %+v\", conf)\n- dir, err := ioutil.TempDir(\"\", \"checkpoint-test\")\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"checkpoint-test\")\nif err != nil {\nt.Fatalf(\"ioutil.TempDir failed: %v\", err)\n}\nif err := os.Chmod(dir, 0777); err != nil {\nt.Fatalf(\"error chmoding file: %q, %v\", dir, err)\n}\n- defer os.RemoveAll(dir)\noutputPath := filepath.Join(dir, \"output\")\noutputFile, err := createWriteableOutputFile(outputPath)\n@@ -577,14 +579,8 @@ func TestCheckpointRestore(t *testing.T) {\n}\ndefer outputFile.Close()\n- script := \"for ((i=0; ;i++)); do echo $i >> /tmp2/output; sleep 1; done\"\n+ script := fmt.Sprintf(\"for ((i=0; ;i++)); do echo $i >> %q; sleep 1; done\", outputPath)\nspec := testutil.NewSpecWithArgs(\"bash\", \"-c\", script)\n- spec.Mounts = append(spec.Mounts, specs.Mount{\n- Type: \"bind\",\n- Destination: \"/tmp2\",\n- Source: dir,\n- })\n-\nrootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n@@ -712,52 +708,38 @@ func TestCheckpointRestore(t *testing.T) {\nfunc TestUnixDomainSockets(t *testing.T) {\nconst (\noutput = \"uds_output\"\n- goferRoot = \"/tmp2\"\nsocket = \"uds_socket\"\n)\n// Skip overlay because test requires writing to host file.\n- for _, conf := range configs(noOverlay...) {\n+ //\n+ // TODO: Skip nonExclusiveFS because $TEST_TMPDIR mount is\n+ // mistakenly marked as RO after revalidation.\n+ for _, conf := range configs(kvm) {\nt.Logf(\"Running test with conf: %+v\", conf)\n- dir, err := ioutil.TempDir(\"\", \"uds-test\")\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"uds-test\")\nif err != nil {\nt.Fatalf(\"ioutil.TempDir failed: %v\", err)\n}\n- if err := os.Chmod(dir, 0777); err != nil {\n- t.Fatalf(\"error chmoding file: %q, %v\", dir, err)\n- }\ndefer os.RemoveAll(dir)\noutputPath := filepath.Join(dir, output)\n-\n- outputFile, err := createWriteableOutputFile(outputPath)\n+ outputFile, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)\nif err != nil {\nt.Fatalf(\"error creating output file: %v\", err)\n}\ndefer outputFile.Close()\n- // Get file path for corresponding output file in sandbox.\n- outputFileSandbox := filepath.Join(goferRoot, output)\n-\napp, err := testutil.FindFile(\"runsc/container/uds_test_app\")\nif err != nil {\nt.Fatal(\"error finding uds_test_app:\", err)\n}\nsocketPath := filepath.Join(dir, socket)\n- socketPathSandbox := filepath.Join(goferRoot, socket)\ndefer os.Remove(socketPath)\n- spec := testutil.NewSpecWithArgs(app, \"--file\", outputFileSandbox,\n- \"--socket\", socketPathSandbox)\n-\n- spec.Mounts = append(spec.Mounts, specs.Mount{\n- Type: \"bind\",\n- Destination: goferRoot,\n- Source: dir,\n- })\n-\n+ spec := testutil.NewSpecWithArgs(app, \"--file\", outputPath, \"--socket\", socketPath)\nspec.Process.User = specs.User{\nUID: uint32(os.Getuid()),\nGID: uint32(os.Getgid()),\n@@ -811,7 +793,7 @@ func TestUnixDomainSockets(t *testing.T) {\nif err := os.Remove(outputPath); err != nil {\nt.Fatalf(\"error removing file\")\n}\n- outputFile2, err := createWriteableOutputFile(outputPath)\n+ outputFile2, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)\nif err != nil {\nt.Fatalf(\"error creating output file: %v\", err)\n}\n@@ -858,20 +840,11 @@ func TestPauseResume(t *testing.T) {\nconst uid = 343\nspec := testutil.NewSpecWithArgs(\"sleep\", \"20\")\n- dir, err := ioutil.TempDir(\"\", \"pause-test\")\n- if err != nil {\n- t.Fatalf(\"ioutil.TempDir failed: %v\", err)\n- }\n- lock, err := ioutil.TempFile(dir, \"lock\")\n+ lock, err := ioutil.TempFile(testutil.TmpDir(), \"lock\")\nif err != nil {\nt.Fatalf(\"error creating output file: %v\", err)\n}\ndefer lock.Close()\n- spec.Mounts = append(spec.Mounts, specs.Mount{\n- Type: \"bind\",\n- Destination: \"/tmp2\",\n- Source: dir,\n- })\nrootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\n@@ -908,7 +881,7 @@ func TestPauseResume(t *testing.T) {\n},\n}\n- script := fmt.Sprintf(\"while [[ -f /tmp2/%s ]]; do sleep 0.1; done\", filepath.Base(lock.Name()))\n+ script := fmt.Sprintf(\"while [[ -f %q ]]; do sleep 0.1; done\", lock.Name())\nexecArgs := control.ExecArgs{\nFilename: \"/bin/bash\",\nArgv: []string{\"bash\", \"-c\", script},\n@@ -1040,14 +1013,6 @@ func TestCapabilities(t *testing.T) {\nt.Logf(\"Running test with conf: %+v\", conf)\nspec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n-\n- // We generate files in the host temporary directory.\n- spec.Mounts = append(spec.Mounts, specs.Mount{\n- Destination: os.TempDir(),\n- Source: os.TempDir(),\n- Type: \"bind\",\n- })\n-\nrootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\nt.Fatalf(\"error setting up container: %v\", err)\n@@ -1218,7 +1183,7 @@ func TestRunNonRoot(t *testing.T) {\n// User that container runs as can't list '$TMP/blocked' and would fail to\n// mount it.\n- dir, err := ioutil.TempDir(\"\", \"blocked\")\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"blocked\")\nif err != nil {\nt.Fatalf(\"ioutil.TempDir() failed: %v\", err)\n}\n@@ -1230,15 +1195,8 @@ func TestRunNonRoot(t *testing.T) {\nt.Fatalf(\"os.MkDir(%q) failed: %v\", dir, err)\n}\n- // We generate files in the host temporary directory.\n- spec.Mounts = append(spec.Mounts, specs.Mount{\n- Destination: dir,\n- Source: dir,\n- Type: \"bind\",\n- })\n-\nif err := run(spec, conf); err != nil {\n- t.Fatalf(\"error running sadbox: %v\", err)\n+ t.Fatalf(\"error running sandbox: %v\", err)\n}\n}\n}\n@@ -1249,17 +1207,20 @@ func TestMountNewDir(t *testing.T) {\nfor _, conf := range configs(overlay) {\nt.Logf(\"Running test with conf: %+v\", conf)\n- srcDir := path.Join(os.TempDir(), \"src\", \"newdir\", \"anotherdir\")\n+ root, err := ioutil.TempDir(testutil.TmpDir(), \"root\")\n+ if err != nil {\n+ t.Fatal(\"ioutil.TempDir() failed:\", err)\n+ }\n+ if err := os.Chmod(root, 0755); err != nil {\n+ t.Fatalf(\"os.Chmod(%q) failed: %v\", root, err)\n+ }\n+\n+ srcDir := path.Join(root, \"src\", \"dir\", \"anotherdir\")\nif err := os.MkdirAll(srcDir, 0755); err != nil {\nt.Fatalf(\"os.MkDir(%q) failed: %v\", srcDir, err)\n}\n- // Attempt to remove dir to ensure it doesn't exist.\n- mountDir := path.Join(os.TempDir(), \"newdir\")\n- if err := os.RemoveAll(mountDir); err != nil {\n- t.Fatalf(\"os.RemoveAll(%q) failed: %v\", mountDir, err)\n- }\n- mountDir = path.Join(mountDir, \"anotherdir\")\n+ mountDir := path.Join(root, \"dir\", \"anotherdir\")\nspec := testutil.NewSpecWithArgs(\"/bin/ls\", mountDir)\nspec.Mounts = append(spec.Mounts, specs.Mount{\n@@ -1269,7 +1230,7 @@ func TestMountNewDir(t *testing.T) {\n})\nif err := run(spec, conf); err != nil {\n- t.Fatalf(\"error running sadbox: %v\", err)\n+ t.Fatalf(\"error running sandbox: %v\", err)\n}\n}\n}\n@@ -1310,13 +1271,13 @@ func TestReadonlyMount(t *testing.T) {\nfor _, conf := range configs(overlay) {\nt.Logf(\"Running test with conf: %+v\", conf)\n- spec := testutil.NewSpecWithArgs(\"/bin/touch\", \"/foo/file\")\n- dir, err := ioutil.TempDir(\"\", \"ro-mount\")\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"ro-mount\")\n+ spec := testutil.NewSpecWithArgs(\"/bin/touch\", path.Join(dir, \"file\"))\nif err != nil {\nt.Fatalf(\"ioutil.TempDir() failed: %v\", err)\n}\nspec.Mounts = append(spec.Mounts, specs.Mount{\n- Destination: \"/foo\",\n+ Destination: dir,\nSource: dir,\nType: \"bind\",\nOptions: []string{\"ro\"},\n@@ -1613,17 +1574,14 @@ func TestContainerVolumeContentsShared(t *testing.T) {\n// the filesystem.\nspec := testutil.NewSpecWithArgs(\"sleep\", \"1000\")\n- // Mount host temp dir inside the sandbox at '/tmp2'.\n- hostTmpDir, err := ioutil.TempDir(\"\", \"root-fs-test\")\n- sandboxTmpDir := \"/tmp2\"\n+ // TODO: $TEST_TMPDIR mount is mistakenly marked as RO after\n+ // revalidation. Remove when it's fixed.\n+ spec.Root.Readonly = false\n+\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"root-fs-test\")\nif err != nil {\nt.Fatalf(\"TempDir failed: %v\", err)\n}\n- spec.Mounts = append(spec.Mounts, specs.Mount{\n- Type: \"bind\",\n- Destination: sandboxTmpDir,\n- Source: hostTmpDir,\n- })\nrootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\n@@ -1643,105 +1601,103 @@ func TestContainerVolumeContentsShared(t *testing.T) {\n}\n// File that will be used to check consistency inside/outside sandbox.\n- hostFilename := filepath.Join(hostTmpDir, \"file\")\n- sandboxFilename := filepath.Join(sandboxTmpDir, \"file\")\n+ filename := filepath.Join(dir, \"file\")\n// File does not exist yet. Reading from the sandbox should fail.\nexecArgsTestFile := control.ExecArgs{\nFilename: \"/usr/bin/test\",\n- Argv: []string{\"test\", \"-f\", sandboxFilename},\n+ Argv: []string{\"test\", \"-f\", filename},\n}\nif ws, err := c.Execute(&execArgsTestFile); err != nil {\n- t.Fatalf(\"unexpected error testing file %q: %v\", sandboxFilename, err)\n+ t.Fatalf(\"unexpected error testing file %q: %v\", filename, err)\n} else if ws.ExitStatus() == 0 {\nt.Errorf(\"test %q exited with code %v, wanted not zero\", ws.ExitStatus(), err)\n}\n// Create the file from outside of the sandbox.\n- if err := ioutil.WriteFile(hostFilename, []byte(\"foobar\"), 0777); err != nil {\n- t.Fatalf(\"error writing to file %q: %v\", hostFilename, err)\n+ if err := ioutil.WriteFile(filename, []byte(\"foobar\"), 0777); err != nil {\n+ t.Fatalf(\"error writing to file %q: %v\", filename, err)\n}\n// Now we should be able to test the file from within the sandbox.\nif ws, err := c.Execute(&execArgsTestFile); err != nil {\n- t.Fatalf(\"unexpected error testing file %q: %v\", sandboxFilename, err)\n+ t.Fatalf(\"unexpected error testing file %q: %v\", filename, err)\n} else if ws.ExitStatus() != 0 {\n- t.Errorf(\"test %q exited with code %v, wanted zero\", sandboxFilename, ws.ExitStatus())\n+ t.Errorf(\"test %q exited with code %v, wanted zero\", filename, ws.ExitStatus())\n}\n// Rename the file from outside of the sandbox.\n- newHostFilename := filepath.Join(hostTmpDir, \"newfile\")\n- newSandboxFilename := filepath.Join(sandboxTmpDir, \"newfile\")\n- if err := os.Rename(hostFilename, newHostFilename); err != nil {\n- t.Fatalf(\"os.Rename(%q, %q) failed: %v\", hostFilename, newHostFilename, err)\n+ newFilename := filepath.Join(dir, \"newfile\")\n+ if err := os.Rename(filename, newFilename); err != nil {\n+ t.Fatalf(\"os.Rename(%q, %q) failed: %v\", filename, newFilename, err)\n}\n// File should no longer exist at the old path within the sandbox.\nif ws, err := c.Execute(&execArgsTestFile); err != nil {\n- t.Fatalf(\"unexpected error testing file %q: %v\", sandboxFilename, err)\n+ t.Fatalf(\"unexpected error testing file %q: %v\", filename, err)\n} else if ws.ExitStatus() == 0 {\n- t.Errorf(\"test %q exited with code %v, wanted not zero\", sandboxFilename, ws.ExitStatus())\n+ t.Errorf(\"test %q exited with code %v, wanted not zero\", filename, ws.ExitStatus())\n}\n// We should be able to test the new filename from within the sandbox.\nexecArgsTestNewFile := control.ExecArgs{\nFilename: \"/usr/bin/test\",\n- Argv: []string{\"test\", \"-f\", newSandboxFilename},\n+ Argv: []string{\"test\", \"-f\", newFilename},\n}\nif ws, err := c.Execute(&execArgsTestNewFile); err != nil {\n- t.Fatalf(\"unexpected error testing file %q: %v\", newSandboxFilename, err)\n+ t.Fatalf(\"unexpected error testing file %q: %v\", newFilename, err)\n} else if ws.ExitStatus() != 0 {\n- t.Errorf(\"test %q exited with code %v, wanted zero\", newSandboxFilename, ws.ExitStatus())\n+ t.Errorf(\"test %q exited with code %v, wanted zero\", newFilename, ws.ExitStatus())\n}\n// Delete the renamed file from outside of the sandbox.\n- if err := os.Remove(newHostFilename); err != nil {\n- t.Fatalf(\"error removing file %q: %v\", hostFilename, err)\n+ if err := os.Remove(newFilename); err != nil {\n+ t.Fatalf(\"error removing file %q: %v\", filename, err)\n}\n// Renamed file should no longer exist at the old path within the sandbox.\nif ws, err := c.Execute(&execArgsTestNewFile); err != nil {\n- t.Fatalf(\"unexpected error testing file %q: %v\", newSandboxFilename, err)\n+ t.Fatalf(\"unexpected error testing file %q: %v\", newFilename, err)\n} else if ws.ExitStatus() == 0 {\n- t.Errorf(\"test %q exited with code %v, wanted not zero\", newSandboxFilename, ws.ExitStatus())\n+ t.Errorf(\"test %q exited with code %v, wanted not zero\", newFilename, ws.ExitStatus())\n}\n// Now create the file from WITHIN the sandbox.\nexecArgsTouch := control.ExecArgs{\nFilename: \"/usr/bin/touch\",\n- Argv: []string{\"touch\", sandboxFilename},\n+ Argv: []string{\"touch\", filename},\nKUID: auth.KUID(os.Getuid()),\nKGID: auth.KGID(os.Getgid()),\n}\nif ws, err := c.Execute(&execArgsTouch); err != nil {\n- t.Fatalf(\"unexpected error touching file %q: %v\", sandboxFilename, err)\n+ t.Fatalf(\"unexpected error touching file %q: %v\", filename, err)\n} else if ws.ExitStatus() != 0 {\n- t.Errorf(\"touch %q exited with code %v, wanted zero\", sandboxFilename, ws.ExitStatus())\n+ t.Errorf(\"touch %q exited with code %v, wanted zero\", filename, ws.ExitStatus())\n}\n// File should exist outside the sandbox.\n- if _, err := os.Stat(hostFilename); err != nil {\n- t.Errorf(\"stat %q got error %v, wanted nil\", hostFilename, err)\n+ if _, err := os.Stat(filename); err != nil {\n+ t.Errorf(\"stat %q got error %v, wanted nil\", filename, err)\n}\n// File should exist outside the sandbox.\n- if _, err := os.Stat(hostFilename); err != nil {\n- t.Errorf(\"stat %q got error %v, wanted nil\", hostFilename, err)\n+ if _, err := os.Stat(filename); err != nil {\n+ t.Errorf(\"stat %q got error %v, wanted nil\", filename, err)\n}\n// Delete the file from within the sandbox.\nexecArgsRemove := control.ExecArgs{\nFilename: \"/bin/rm\",\n- Argv: []string{\"rm\", sandboxFilename},\n+ Argv: []string{\"rm\", filename},\n}\nif ws, err := c.Execute(&execArgsRemove); err != nil {\n- t.Fatalf(\"unexpected error removing file %q: %v\", sandboxFilename, err)\n+ t.Fatalf(\"unexpected error removing file %q: %v\", filename, err)\n} else if ws.ExitStatus() != 0 {\n- t.Errorf(\"remove %q exited with code %v, wanted zero\", sandboxFilename, ws.ExitStatus())\n+ t.Errorf(\"remove %q exited with code %v, wanted zero\", filename, ws.ExitStatus())\n}\n// File should not exist outside the sandbox.\n- if _, err := os.Stat(hostFilename); !os.IsNotExist(err) {\n- t.Errorf(\"stat %q got error %v, wanted ErrNotExist\", hostFilename, err)\n+ if _, err := os.Stat(filename); !os.IsNotExist(err) {\n+ t.Errorf(\"stat %q got error %v, wanted ErrNotExist\", filename, err)\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -377,7 +377,7 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund\n// outside.\naddr := boot.ControlSocketAddr(s.ID)\nfd, err := server.CreateSocket(addr)\n- log.Infof(\"creating sandbox process with addr: %s\", addr)\n+ log.Infof(\"Creating sandbox process with addr: %s\", addr[1:]) // skip \"\\00\".\nif err != nil {\nreturn fmt.Errorf(\"error creating control server socket for sandbox %q: %v\", s.ID, err)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/test/testutil/testutil.go",
"new_path": "runsc/test/testutil/testutil.go",
"diff": "@@ -35,6 +35,16 @@ import (\n// RaceEnabled is set to true if it was built with '--race' option.\nvar RaceEnabled = false\n+// TmpDir returns the absolute path to a writable directory that can be used as\n+// scratch by the test.\n+func TmpDir() string {\n+ dir := os.Getenv(\"TEST_TMPDIR\")\n+ if dir == \"\" {\n+ dir = \"/tmp\"\n+ }\n+ return dir\n+}\n+\n// ConfigureExePath configures the executable for runsc in the test environment.\nfunc ConfigureExePath() error {\npath, err := FindFile(\"runsc/runsc\")\n@@ -102,7 +112,7 @@ func TestConfig() *boot.Config {\n// NewSpecWithArgs creates a simple spec with the given args suitable for use\n// in tests.\nfunc NewSpecWithArgs(args ...string) *specs.Spec {\n- spec := &specs.Spec{\n+ return &specs.Spec{\n// The host filesystem root is the container root.\nRoot: &specs.Root{\nPath: \"/\",\n@@ -114,13 +124,23 @@ func NewSpecWithArgs(args ...string) *specs.Spec {\n\"PATH=\" + os.Getenv(\"PATH\"),\n},\n},\n+ Mounts: []specs.Mount{\n+ // Root is readonly, but many tests want to write to tmpdir.\n+ // This creates a writable mount inside the root. Also, when tmpdir points\n+ // to \"/tmp\", it makes the the actual /tmp to be mounted and not a tmpfs\n+ // inside the sentry.\n+ specs.Mount{\n+ Type: \"bind\",\n+ Destination: TmpDir(),\n+ Source: TmpDir(),\n+ },\n+ },\n}\n- return spec\n}\n// SetupRootDir creates a root directory for containers.\nfunc SetupRootDir() (string, error) {\n- rootDir, err := ioutil.TempDir(\"\", \"containers\")\n+ rootDir, err := ioutil.TempDir(TmpDir(), \"containers\")\nif err != nil {\nreturn \"\", fmt.Errorf(\"error creating root dir: %v\", err)\n}\n@@ -141,7 +161,7 @@ func SetupContainer(spec *specs.Spec, conf *boot.Config) (rootDir, bundleDir str\n// SetupContainerInRoot creates a bundle for the container, generates a test\n// config, and writes the spec to config.json in the bundle dir.\nfunc SetupContainerInRoot(rootDir string, spec *specs.Spec, conf *boot.Config) (bundleDir string, err error) {\n- bundleDir, err = ioutil.TempDir(\"\", \"bundle\")\n+ bundleDir, err = ioutil.TempDir(TmpDir(), \"bundle\")\nif err != nil {\nreturn \"\", fmt.Errorf(\"error creating bundle dir: %v\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Standardize mounts in tests
Tests get a readonly rootfs mapped to / (which was the case before)
and writable TEST_TMPDIR. This makes it easier to setup containers to
write to files and to share state between test and containers.
PiperOrigin-RevId: 209453224
Change-Id: I4d988e45dc0909a0450a3bb882fe280cf9c24334 |
259,854 | 21.08.2018 11:15:15 | 25,200 | 9c407382b031f16160f83383ef8b0d419457829a | Fix races in kernel.(*Task).Value() | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/task.go",
"new_path": "pkg/sentry/kernel/task.go",
"diff": "@@ -568,12 +568,18 @@ func (t *Task) Value(key interface{}) interface{} {\ncase CtxPIDNamespace:\nreturn t.tg.pidns\ncase CtxUTSNamespace:\n+ t.mu.Lock()\n+ defer t.mu.Unlock()\nreturn t.utsns\ncase CtxIPCNamespace:\n+ t.mu.Lock()\n+ defer t.mu.Unlock()\nreturn t.ipcns\ncase CtxTask:\nreturn t\ncase auth.CtxCredentials:\n+ t.mu.Lock()\n+ defer t.mu.Unlock()\nreturn t.creds\ncase context.CtxThreadGroupID:\nreturn int32(t.ThreadGroup().ID())\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix races in kernel.(*Task).Value()
PiperOrigin-RevId: 209627180
Change-Id: Idc84afd38003427e411df6e75abfabd9174174e1 |
259,992 | 21.08.2018 13:13:34 | 25,200 | d6d165cb0b8147461388287ffd4cfee221940123 | Initial change for multi-gofer support | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/debug.go",
"new_path": "runsc/cmd/debug.go",
"diff": "@@ -92,7 +92,7 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n}\nlog.Infof(\"Found sandbox %q, PID: %d\", c.Sandbox.ID, c.Sandbox.Pid)\n- if !c.Sandbox.IsRunning() {\n+ if !c.IsRunning() {\nFatalf(\"sandbox %q is not running\", c.Sandbox.ID)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/BUILD",
"new_path": "runsc/container/BUILD",
"diff": "@@ -23,10 +23,10 @@ go_library(\ndeps = [\n\"//pkg/log\",\n\"//pkg/sentry/control\",\n- \"//pkg/syserror\",\n\"//runsc/boot\",\n\"//runsc/sandbox\",\n\"//runsc/specutils\",\n+ \"@com_github_cenkalti_backoff//:go_default_library\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container.go",
"new_path": "runsc/container/container.go",
"diff": "package container\nimport (\n+ \"context\"\n\"encoding/json\"\n\"fmt\"\n\"io/ioutil\"\n@@ -27,10 +28,10 @@ import (\n\"syscall\"\n\"time\"\n+ \"github.com/cenkalti/backoff\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/control\"\n- \"gvisor.googlesource.com/gvisor/pkg/syserror\"\n\"gvisor.googlesource.com/gvisor/runsc/boot\"\n\"gvisor.googlesource.com/gvisor/runsc/sandbox\"\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n@@ -89,6 +90,10 @@ type Container struct {\n// Status is the current container Status.\nStatus Status `json:\"status\"`\n+ // GoferPid is the pid of the gofer running along side the sandbox. May\n+ // be 0 if the gofer has been killed or it's not being used.\n+ GoferPid int `json:\"goferPid\"`\n+\n// Sandbox is the sandbox this container is running in. It will be nil\n// if the container is not in state Running or Created.\nSandbox *sandbox.Sandbox `json:\"sandbox\"`\n@@ -130,12 +135,11 @@ func Load(rootDir, id string) (*Container, error) {\n// This is inherently racey.\nif c.Status == Running || c.Status == Created {\n// Check if the sandbox process is still running.\n- if c.Sandbox.IsRunning() {\n+ if c.IsRunning() {\n// TODO: Send a message into the sandbox to\n// see if this particular container is still running.\n} else {\n- // Sandbox no longer exists, so this container\n- // definitely does not exist.\n+ // Sandbox no longer exists, so this container definitely does not exist.\nc.Status = Stopped\nc.Sandbox = nil\n}\n@@ -221,12 +225,13 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo\nlog.Debugf(\"Creating new sandbox for container %q\", id)\n// Start a new sandbox for this container. Any errors after this point\n// must destroy the container.\n- s, err := sandbox.Create(id, spec, conf, bundleDir, consoleSocket)\n+ s, goferPid, err := sandbox.Create(id, spec, conf, bundleDir, consoleSocket)\nif err != nil {\nc.Destroy()\nreturn nil, err\n}\nc.Sandbox = s\n+ c.GoferPid = goferPid\n} else {\n// This is sort of confusing. For a sandbox with a root\n// container and a child container in it, runsc sees:\n@@ -398,7 +403,18 @@ func (c *Container) WaitPID(pid int32) (syscall.WaitStatus, error) {\nif c.Status == Stopped {\nreturn 0, fmt.Errorf(\"container is stopped\")\n}\n- return c.Sandbox.WaitPID(pid, c.ID)\n+ ws, err := c.Sandbox.WaitPID(pid, c.ID)\n+ if err != nil {\n+ return 0, err\n+ }\n+ if c.Sandbox.IsRootContainer(c.ID) {\n+ // If waiting for the root, give some time for the sandbox process to exit\n+ // to prevent races with resources that might still be in use.\n+ if err := c.waitForStopped(); err != nil {\n+ return 0, err\n+ }\n+ }\n+ return ws, nil\n}\n// Signal sends the signal to the container.\n@@ -502,6 +518,14 @@ func (c *Container) Destroy() error {\nlog.Warningf(\"Failed to destroy sandbox %q: %v\", c.Sandbox.ID, err)\n}\n}\n+ if c.GoferPid != 0 {\n+ log.Debugf(\"Killing gofer for container %q, PID: %d\", c.ID, c.GoferPid)\n+ if err := syscall.Kill(c.GoferPid, syscall.SIGKILL); err != nil {\n+ log.Warningf(\"error sending signal %d to pid %d: %v\", syscall.SIGKILL, c.GoferPid, err)\n+ } else {\n+ c.GoferPid = 0\n+ }\n+ }\nc.Sandbox = nil\nc.Status = Stopped\n@@ -509,29 +533,38 @@ func (c *Container) Destroy() error {\nreturn nil\n}\n-// DestroyAndWait frees all resources associated with the container\n-// and waits for destroy to finish before returning.\n-func (c *Container) DestroyAndWait() error {\n- sandboxPid := c.Sandbox.Pid\n- goferPid := c.Sandbox.GoferPid\n-\n- if err := c.Destroy(); err != nil {\n- return fmt.Errorf(\"error destroying container %v: %v\", c, err)\n- }\n-\n- if sandboxPid != 0 {\n- if err := waitForDeath(sandboxPid, 5*time.Second); err != nil {\n- return fmt.Errorf(\"error waiting for sandbox death: %v\", err)\n+// IsRunning returns true if the sandbox or gofer process is running.\n+func (c *Container) IsRunning() bool {\n+ if c.Status == Stopped {\n+ return false\n}\n+ if c.Sandbox != nil && c.Sandbox.IsRunning() {\n+ return true\n}\n+ if c.GoferPid != 0 {\n+ // Send a signal 0 to the gofer process.\n+ if err := syscall.Kill(c.GoferPid, 0); err == nil {\n+ log.Warningf(\"Found orphan gofer process, pid: %d\", c.GoferPid)\n+ // Attempt to kill gofer if it's orphan.\n+ syscall.Kill(c.GoferPid, syscall.SIGKILL)\n- if goferPid != 0 {\n- if err := waitForDeath(goferPid, 5*time.Second); err != nil {\n- return fmt.Errorf(\"error waiting for gofer death: %v\", err)\n+ // Don't wait for gofer to die. Return 'running' and hope gofer is dead\n+ // next time around.\n+ return true\n+ }\n}\n+ return false\n}\n- return nil\n+// DestroyAndWait frees all resources associated with the container\n+// and waits for destroy to finish before returning.\n+//\n+// TODO: This only works for single container.\n+func (c *Container) DestroyAndWait() error {\n+ if err := c.Destroy(); err != nil {\n+ return fmt.Errorf(\"error destroying container %v: %v\", c, err)\n+ }\n+ return c.waitForStopped()\n}\n// save saves the container metadata to a file.\n@@ -551,29 +584,15 @@ func (c *Container) save() error {\nreturn nil\n}\n-// waitForDeath ensures that process is dead before proceeding.\n-//\n-// This is racy because the kernel can potentially reuse the pid in the time\n-// between the process' death and the first check after the process has ended.\n-func waitForDeath(pid int, timeout time.Duration) error {\n- backoff := 1 * time.Millisecond\n- for start := time.Now(); time.Now().Sub(start) < timeout; {\n-\n- if err := syscall.Kill(pid, 0); err != nil {\n- if err == syserror.ESRCH {\n- // pid does not exist so process must have died\n- return nil\n- }\n- return fmt.Errorf(\"error killing pid (%d): %v\", pid, err)\n- }\n- // pid is still alive.\n-\n- // Process continues to run, backoff and retry.\n- time.Sleep(backoff)\n- backoff *= 2\n- if backoff > 1*time.Second {\n- backoff = 1 * time.Second\n+func (c *Container) waitForStopped() error {\n+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n+ defer cancel()\n+ b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)\n+ op := func() error {\n+ if !c.IsRunning() {\n+ return fmt.Errorf(\"container is still running\")\n}\n+ return nil\n}\n- return fmt.Errorf(\"timed out waiting for process (%d)\", pid)\n+ return backoff.Retry(op, b)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -350,7 +350,7 @@ func TestLifecycle(t *testing.T) {\n// ourselves.\np, _ := os.FindProcess(s.Sandbox.Pid)\np.Wait()\n- g, _ := os.FindProcess(s.Sandbox.GoferPid)\n+ g, _ := os.FindProcess(s.GoferPid)\ng.Wait()\n// Load the container from disk and check the status.\n@@ -1701,3 +1701,48 @@ func TestContainerVolumeContentsShared(t *testing.T) {\nt.Errorf(\"stat %q got error %v, wanted ErrNotExist\", filename, err)\n}\n}\n+\n+func TestGoferExits(t *testing.T) {\n+ spec := testutil.NewSpecWithArgs(\"/bin/sleep\", \"10000\")\n+ conf := testutil.TestConfig()\n+ rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer os.RemoveAll(rootDir)\n+ defer os.RemoveAll(bundleDir)\n+\n+ // Create and start the container.\n+ c, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer c.Destroy()\n+ if err := c.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n+ }\n+\n+ sandboxProc, err := os.FindProcess(c.Sandbox.Pid)\n+ if err != nil {\n+ t.Fatalf(\"error finding sandbox process: %v\", err)\n+ }\n+ gofer, err := os.FindProcess(c.GoferPid)\n+ if err != nil {\n+ t.Fatalf(\"error finding sandbox process: %v\", err)\n+ }\n+\n+ // Kill sandbox and expect gofer to exit on its own.\n+ if err := sandboxProc.Kill(); err != nil {\n+ t.Fatalf(\"error killing sandbox process: %v\", err)\n+ }\n+ if _, err := sandboxProc.Wait(); err != nil {\n+ t.Fatalf(\"error waiting for sandbox process: %v\", err)\n+ }\n+\n+ if _, err := gofer.Wait(); err != nil {\n+ t.Fatalf(\"error waiting for gofer process: %v\", err)\n+ }\n+ if c.IsRunning() {\n+ t.Errorf(\"container shouldn't be running, container: %+v\", c)\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/BUILD",
"new_path": "runsc/sandbox/BUILD",
"diff": "package(licenses = [\"notice\"]) # Apache 2.0\n-load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\", \"go_test\")\n+load(\"@io_bazel_rules_go//go:def.bzl\", \"go_library\")\ngo_library(\nname = \"sandbox\",\n@@ -29,17 +29,3 @@ go_library(\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n-\n-go_test(\n- name = \"sandbox_test\",\n- size = \"small\",\n- srcs = [\"sandbox_test.go\"],\n- data = [\n- \"//runsc\",\n- ],\n- embed = [\":sandbox\"],\n- deps = [\n- \"//pkg/log\",\n- \"//runsc/test/testutil\",\n- ],\n-)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -40,46 +40,45 @@ import (\n// It is used to start/stop sandbox process (and associated processes like\n// gofers), as well as for running and manipulating containers inside a running\n// sandbox.\n+//\n+// Note: Sandbox must be immutable because a copy of it is saved for each\n+// container and changes would not be synchronized to all of them.\ntype Sandbox struct {\n- // ID is the id of the sandbox. By convention, this is the same ID as\n- // the first container run in the sandbox.\n+ // ID is the id of the sandbox (immutable). By convention, this is the same\n+ // ID as the first container run in the sandbox.\nID string `json:\"id\"`\n- // Pid is the pid of the running sandbox. May be 0 is the sandbox is\n- // not running.\n+ // Pid is the pid of the running sandbox (immutable). May be 0 is the sandbox\n+ // is not running.\nPid int `json:\"pid\"`\n-\n- // GoferPid is the pid of the gofer running along side the sandbox. May\n- // be 0 if the gofer has been killed or it's not being used.\n- GoferPid int `json:\"goferPid\"`\n}\n// Create creates the sandbox process.\n-func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket string) (*Sandbox, error) {\n+func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket string) (*Sandbox, int, error) {\ns := &Sandbox{ID: id}\nbinPath, err := specutils.BinPath()\nif err != nil {\n- return nil, err\n+ return nil, 0, err\n}\n// Create the gofer process.\n- ioFiles, err := s.createGoferProcess(spec, conf, bundleDir, binPath)\n+ goferPid, ioFiles, err := s.createGoferProcess(spec, conf, bundleDir, binPath)\nif err != nil {\n- return nil, err\n+ return nil, 0, err\n}\n// Create the sandbox process.\nif err := s.createSandboxProcess(spec, conf, bundleDir, consoleSocket, binPath, ioFiles); err != nil {\n- return nil, err\n+ return nil, 0, err\n}\n// Wait for the control server to come up (or timeout).\nif err := s.waitForCreated(10 * time.Second); err != nil {\n- return nil, err\n+ return nil, 0, err\n}\n- return s, nil\n+ return s, goferPid, nil\n}\n// StartRoot starts running the root container process inside the sandbox.\n@@ -288,10 +287,10 @@ func (s *Sandbox) connError(err error) error {\nreturn fmt.Errorf(\"error connecting to control server at pid %d: %v\", s.Pid, err)\n}\n-func (s *Sandbox) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundleDir, binPath string) ([]*os.File, error) {\n+func (s *Sandbox) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundleDir, binPath string) (int, []*os.File, error) {\nif conf.FileAccess == boot.FileAccessDirect {\n// Don't start a gofer. The sandbox will access host FS directly.\n- return nil, nil\n+ return 0, nil, nil\n}\n// Start with the general config flags.\n@@ -315,7 +314,7 @@ func (s *Sandbox) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundle\nfor nextFD = 3; nextFD-3 < mountCount; nextFD++ {\nsandEnd, goferEnd, err := createSocketPair()\nif err != nil {\n- return nil, err\n+ return 0, nil, err\n}\ndefer goferEnd.Close()\nsandEnds = append(sandEnds, sandEnd)\n@@ -327,7 +326,7 @@ func (s *Sandbox) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundle\naddr := fsgofer.ControlSocketAddr(s.ID)\nserverFD, err := server.CreateSocket(addr)\nif err != nil {\n- return nil, fmt.Errorf(\"error creating control server socket for sandbox %q: %v\", s.ID, err)\n+ return 0, nil, fmt.Errorf(\"error creating control server socket for sandbox %q: %v\", s.ID, err)\n}\n// Add the control server fd.\n@@ -349,11 +348,10 @@ func (s *Sandbox) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundle\n// Start the gofer in the given namespace.\nlog.Debugf(\"Starting gofer: %s %v\", binPath, args)\nif err := startInNS(cmd, nss); err != nil {\n- return nil, err\n+ return 0, nil, err\n}\n- s.GoferPid = cmd.Process.Pid\nlog.Infof(\"Gofer started, pid: %d\", cmd.Process.Pid)\n- return sandEnds, nil\n+ return cmd.Process.Pid, sandEnds, nil\n}\n// createSocketPair creates a pair of files wrapping a socket pair.\n@@ -562,24 +560,9 @@ func (s *Sandbox) WaitPID(pid int32, cid string) (syscall.WaitStatus, error) {\nPID: pid,\nCID: cid,\n}\n-\nif err := conn.Call(boot.ContainerWaitPID, args, &ws); err != nil {\nreturn ws, fmt.Errorf(\"error waiting on PID %d in sandbox %q: %v\", pid, s.ID, err)\n}\n-\n- if s.IsRootContainer(cid) {\n- // If waiting for the root, give some time for the sandbox process to exit\n- // to prevent races with resources that might still be in use.\n- timeout := time.Now().Add(time.Second)\n- log.Debugf(\"Waiting for the sandbox process to exit\")\n- for s.IsRunning() {\n- if time.Now().After(timeout) {\n- log.Debugf(\"Timeout waiting for sandbox process to exit\")\n- break\n- }\n- time.Sleep(100 * time.Millisecond)\n- }\n- }\nreturn ws, nil\n}\n@@ -602,15 +585,8 @@ func (s *Sandbox) Destroy() error {\nif s.Pid != 0 {\n// TODO: Too harsh?\nlog.Debugf(\"Killing sandbox %q\", s.ID)\n- killProcess(s.Pid, unix.SIGKILL)\n- s.Pid = 0\n+ signalProcess(s.Pid, unix.SIGKILL)\n}\n- if s.GoferPid != 0 {\n- log.Debugf(\"Killing gofer for sandbox %q\", s.ID)\n- killProcess(s.GoferPid, unix.SIGKILL)\n- s.GoferPid = 0\n- }\n-\nreturn nil\n}\n@@ -689,19 +665,8 @@ func (s *Sandbox) Resume(cid string) error {\nfunc (s *Sandbox) IsRunning() bool {\nif s.Pid != 0 {\n// Send a signal 0 to the sandbox process.\n- if err := killProcess(s.Pid, 0); err == nil {\n- return true\n- }\n- }\n- if s.GoferPid != 0 {\n- // Send a signal 0 to the gofer process.\n- if err := killProcess(s.GoferPid, 0); err == nil {\n- log.Warningf(\"Found orphan gofer process, pid: %d\", s.GoferPid)\n- // Attempt to kill gofer if it's orphan.\n- killProcess(s.GoferPid, unix.SIGKILL)\n-\n- // Don't wait for gofer to die. Return 'running' and hope gofer is dead\n- // next time around.\n+ if err := signalProcess(s.Pid, 0); err == nil {\n+ // Succeeded, process is running.\nreturn true\n}\n}\n@@ -724,10 +689,10 @@ func (s *Sandbox) Stacks() (string, error) {\nreturn stacks, nil\n}\n-// killProcess sends a signal to the host process (i.e. a sandbox or gofer\n+// signalProcess sends a signal to the host process (i.e. a sandbox or gofer\n// process). Sandbox.Signal should be used to send a signal to a process\n// running inside the sandbox.\n-func killProcess(pid int, sig syscall.Signal) error {\n+func signalProcess(pid int, sig syscall.Signal) error {\nif err := syscall.Kill(pid, sig); err != nil {\nreturn fmt.Errorf(\"error sending signal %d to pid %d: %v\", sig, pid, err)\n}\n"
},
{
"change_type": "DELETE",
"old_path": "runsc/sandbox/sandbox_test.go",
"new_path": null,
"diff": "-// Copyright 2018 Google Inc.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-package sandbox\n-\n-import (\n- \"os\"\n- \"testing\"\n-\n- \"gvisor.googlesource.com/gvisor/pkg/log\"\n- \"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n-)\n-\n-func init() {\n- log.SetLevel(log.Debug)\n- if err := testutil.ConfigureExePath(); err != nil {\n- panic(err.Error())\n- }\n-}\n-\n-func TestGoferExits(t *testing.T) {\n- spec := testutil.NewSpecWithArgs(\"/bin/sleep\", \"10000\")\n- conf := testutil.TestConfig()\n- rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\n- if err != nil {\n- t.Fatalf(\"error setting up container: %v\", err)\n- }\n- defer os.RemoveAll(rootDir)\n- defer os.RemoveAll(bundleDir)\n-\n- // Create, start and wait for the container.\n- s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\")\n- if err != nil {\n- t.Fatalf(\"error creating container: %v\", err)\n- }\n- defer s.Destroy()\n- if err := s.StartRoot(spec, conf); err != nil {\n- t.Fatalf(\"error starting container: %v\", err)\n- }\n-\n- sandboxProc, err := os.FindProcess(s.Pid)\n- if err != nil {\n- t.Fatalf(\"error finding sandbox process: %v\", err)\n- }\n- gofer, err := os.FindProcess(s.GoferPid)\n- if err != nil {\n- t.Fatalf(\"error finding sandbox process: %v\", err)\n- }\n-\n- // Kill sandbox and expect gofer to exit on its own.\n- if err := sandboxProc.Kill(); err != nil {\n- t.Fatalf(\"error killing sandbox process: %v\", err)\n- }\n- if _, err := sandboxProc.Wait(); err != nil {\n- t.Fatalf(\"error waiting for sandbox process: %v\", err)\n- }\n-\n- if _, err := gofer.Wait(); err != nil {\n- t.Fatalf(\"error waiting for gofer process: %v\", err)\n- }\n- if s.IsRunning() {\n- t.Errorf(\"Sandbox shouldn't be running, sandbox: %+v\", s)\n- }\n-}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Initial change for multi-gofer support
PiperOrigin-RevId: 209647293
Change-Id: I980fca1257ea3fcce796388a049c353b0303a8a5 |
259,992 | 21.08.2018 14:01:14 | 25,200 | a854678bc36065379ca0b988410b4a8318747a3d | Move container_test to the container package | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/BUILD",
"new_path": "runsc/container/BUILD",
"diff": "@@ -39,6 +39,7 @@ go_test(\n\":uds_test_app\",\n\"//runsc\",\n],\n+ embed = [\":container\"],\ntags = [\n\"requires-kvm\",\n],\n@@ -49,7 +50,6 @@ go_test(\n\"//pkg/sentry/kernel/auth\",\n\"//pkg/unet\",\n\"//runsc/boot\",\n- \"//runsc/container\",\n\"//runsc/specutils\",\n\"//runsc/test/testutil\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "// See the License for the specific language governing permissions and\n// limitations under the License.\n-package container_test\n+package container\nimport (\n\"bytes\"\n@@ -38,7 +38,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.googlesource.com/gvisor/pkg/unet\"\n\"gvisor.googlesource.com/gvisor/runsc/boot\"\n- \"gvisor.googlesource.com/gvisor/runsc/container\"\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n\"gvisor.googlesource.com/gvisor/runsc/test/testutil\"\n)\n@@ -51,7 +50,7 @@ func init() {\n}\n// waitForProcessList waits for the given process list to show up in the container.\n-func waitForProcessList(s *container.Container, expected []*control.Process) error {\n+func waitForProcessList(s *Container, expected []*control.Process) error {\nvar got []*control.Process\nfor start := time.Now(); time.Now().Sub(start) < 10*time.Second; {\nvar err error\n@@ -90,7 +89,7 @@ func procListsEqual(got, want []*control.Process) bool {\n// getAndCheckProcLists is similar to waitForProcessList, but does not wait and retry the\n// test for equality. This is because we already confirmed that exec occurred.\n-func getAndCheckProcLists(cont *container.Container, want []*control.Process) error {\n+func getAndCheckProcLists(cont *Container, want []*control.Process) error {\ngot, err := cont.Processes()\nif err != nil {\nreturn fmt.Errorf(\"error getting process data from container: %v\", err)\n@@ -188,7 +187,7 @@ func run(spec *specs.Spec, conf *boot.Config) error {\ndefer os.RemoveAll(bundleDir)\n// Create, start and wait for the container.\n- s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nreturn fmt.Errorf(\"error creating container: %v\", err)\n}\n@@ -276,21 +275,21 @@ func TestLifecycle(t *testing.T) {\n}\n// Create the container.\nid := testutil.UniqueContainerID()\n- if _, err := container.Create(id, spec, conf, bundleDir, \"\", \"\"); err != nil {\n+ if _, err := Create(id, spec, conf, bundleDir, \"\", \"\"); err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n// Load the container from disk and check the status.\n- s, err := container.Load(rootDir, id)\n+ s, err := Load(rootDir, id)\nif err != nil {\nt.Fatalf(\"error loading container: %v\", err)\n}\n- if got, want := s.Status, container.Created; got != want {\n+ if got, want := s.Status, Created; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n// List should return the container id.\n- ids, err := container.List(rootDir)\n+ ids, err := List(rootDir)\nif err != nil {\nt.Fatalf(\"error listing containers: %v\", err)\n}\n@@ -303,11 +302,11 @@ func TestLifecycle(t *testing.T) {\nt.Fatalf(\"error starting container: %v\", err)\n}\n// Load the container from disk and check the status.\n- s, err = container.Load(rootDir, id)\n+ s, err = Load(rootDir, id)\nif err != nil {\nt.Fatalf(\"error loading container: %v\", err)\n}\n- if got, want := s.Status, container.Running; got != want {\n+ if got, want := s.Status, Running; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n@@ -354,11 +353,11 @@ func TestLifecycle(t *testing.T) {\ng.Wait()\n// Load the container from disk and check the status.\n- s, err = container.Load(rootDir, id)\n+ s, err = Load(rootDir, id)\nif err != nil {\nt.Fatalf(\"error loading container: %v\", err)\n}\n- if got, want := s.Status, container.Stopped; got != want {\n+ if got, want := s.Status, Stopped; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n@@ -368,7 +367,7 @@ func TestLifecycle(t *testing.T) {\n}\n// List should not return the container id.\n- ids, err = container.List(rootDir)\n+ ids, err = List(rootDir)\nif err != nil {\nt.Fatalf(\"error listing containers: %v\", err)\n}\n@@ -377,7 +376,7 @@ func TestLifecycle(t *testing.T) {\n}\n// Loading the container by id should fail.\n- if _, err = container.Load(rootDir, id); err == nil {\n+ if _, err = Load(rootDir, id); err == nil {\nt.Errorf(\"expected loading destroyed container to fail, but it did not\")\n}\n}\n@@ -404,7 +403,7 @@ func TestExePath(t *testing.T) {\nt.Fatalf(\"exec: %s, error setting up container: %v\", test.path, err)\n}\n- ws, err := container.Run(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ ws, err := Run(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nos.RemoveAll(rootDir)\nos.RemoveAll(bundleDir)\n@@ -437,7 +436,7 @@ func TestAppExitStatus(t *testing.T) {\ndefer os.RemoveAll(rootDir)\ndefer os.RemoveAll(bundleDir)\n- ws, err := container.Run(testutil.UniqueContainerID(), succSpec, conf, bundleDir, \"\", \"\")\n+ ws, err := Run(testutil.UniqueContainerID(), succSpec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error running container: %v\", err)\n}\n@@ -456,7 +455,7 @@ func TestAppExitStatus(t *testing.T) {\ndefer os.RemoveAll(rootDir2)\ndefer os.RemoveAll(bundleDir2)\n- ws, err = container.Run(testutil.UniqueContainerID(), succSpec, conf, bundleDir2, \"\", \"\")\n+ ws, err = Run(testutil.UniqueContainerID(), succSpec, conf, bundleDir2, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error running container: %v\", err)\n}\n@@ -481,7 +480,7 @@ func TestExec(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create and start the container.\n- s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -589,7 +588,7 @@ func TestCheckpointRestore(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create and start the container.\n- cont, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ cont, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -635,7 +634,7 @@ func TestCheckpointRestore(t *testing.T) {\ndefer outputFile2.Close()\n// Restore into a new container.\n- cont2, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ cont2, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -674,7 +673,7 @@ func TestCheckpointRestore(t *testing.T) {\ndefer outputFile3.Close()\n// Restore into a new container.\n- cont3, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ cont3, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -753,7 +752,7 @@ func TestUnixDomainSockets(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create and start the container.\n- cont, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ cont, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -800,7 +799,7 @@ func TestUnixDomainSockets(t *testing.T) {\ndefer outputFile2.Close()\n// Restore into a new container.\n- contRestore, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ contRestore, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -854,7 +853,7 @@ func TestPauseResume(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create and start the container.\n- cont, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ cont, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -902,7 +901,7 @@ func TestPauseResume(t *testing.T) {\nif err := cont.Pause(); err != nil {\nt.Errorf(\"error pausing container: %v\", err)\n}\n- if got, want := cont.Status, container.Paused; got != want {\n+ if got, want := cont.Status, Paused; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n@@ -922,7 +921,7 @@ func TestPauseResume(t *testing.T) {\nif err := cont.Resume(); err != nil {\nt.Errorf(\"error pausing container: %v\", err)\n}\n- if got, want := cont.Status, container.Running; got != want {\n+ if got, want := cont.Status, Running; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n@@ -957,7 +956,7 @@ func TestPauseResumeStatus(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create and start the container.\n- cont, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ cont, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -970,7 +969,7 @@ func TestPauseResumeStatus(t *testing.T) {\nif err := cont.Pause(); err != nil {\nt.Errorf(\"error pausing container: %v\", err)\n}\n- if got, want := cont.Status, container.Paused; got != want {\n+ if got, want := cont.Status, Paused; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n@@ -978,7 +977,7 @@ func TestPauseResumeStatus(t *testing.T) {\nif err := cont.Pause(); err == nil {\nt.Errorf(\"error pausing container that was already paused: %v\", err)\n}\n- if got, want := cont.Status, container.Paused; got != want {\n+ if got, want := cont.Status, Paused; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n@@ -986,7 +985,7 @@ func TestPauseResumeStatus(t *testing.T) {\nif err := cont.Resume(); err != nil {\nt.Errorf(\"error resuming container: %v\", err)\n}\n- if got, want := cont.Status, container.Running; got != want {\n+ if got, want := cont.Status, Running; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n@@ -994,7 +993,7 @@ func TestPauseResumeStatus(t *testing.T) {\nif err := cont.Resume(); err == nil {\nt.Errorf(\"error resuming container already running: %v\", err)\n}\n- if got, want := cont.Status, container.Running; got != want {\n+ if got, want := cont.Status, Running; got != want {\nt.Errorf(\"container status got %v, want %v\", got, want)\n}\n}\n@@ -1021,7 +1020,7 @@ func TestCapabilities(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create and start the container.\n- s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -1123,7 +1122,7 @@ func TestConsoleSocket(t *testing.T) {\n// Create the container and pass the socket name.\nid := testutil.UniqueContainerID()\n- s, err := container.Create(id, spec, conf, bundleDir, socketRelPath, \"\")\n+ s, err := Create(id, spec, conf, bundleDir, socketRelPath, \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -1249,7 +1248,7 @@ func TestReadonlyRoot(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create, start and wait for the container.\n- s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -1292,7 +1291,7 @@ func TestReadonlyMount(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create, start and wait for the container.\n- s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ s, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -1334,7 +1333,7 @@ func TestAbbreviatedIDs(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create and start the container.\n- cont, err := container.Create(cid, spec, conf, bundleDir, \"\", \"\")\n+ cont, err := Create(cid, spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -1351,7 +1350,7 @@ func TestAbbreviatedIDs(t *testing.T) {\ncids[2]: cids[2],\n}\nfor shortid, longid := range unambiguous {\n- if _, err := container.Load(rootDir, shortid); err != nil {\n+ if _, err := Load(rootDir, shortid); err != nil {\nt.Errorf(\"%q should resolve to %q: %v\", shortid, longid, err)\n}\n}\n@@ -1362,7 +1361,7 @@ func TestAbbreviatedIDs(t *testing.T) {\n\"ba\",\n}\nfor _, shortid := range ambiguous {\n- if s, err := container.Load(rootDir, shortid); err == nil {\n+ if s, err := Load(rootDir, shortid); err == nil {\nt.Errorf(\"%q should be ambiguous, but resolved to %q\", shortid, s.ID)\n}\n}\n@@ -1398,7 +1397,7 @@ func TestMultiContainerSanity(t *testing.T) {\ndefer os.RemoveAll(rootDir)\n// Setup the containers.\n- containers := make([]*container.Container, 0, len(containerIDs))\n+ containers := make([]*Container, 0, len(containerIDs))\nfor i, annotations := range containerAnnotations {\nspec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\nspec.Annotations = annotations\n@@ -1407,7 +1406,7 @@ func TestMultiContainerSanity(t *testing.T) {\nt.Fatalf(\"error setting up container: %v\", err)\n}\ndefer os.RemoveAll(bundleDir)\n- cont, err := container.Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\")\n+ cont, err := Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -1475,7 +1474,7 @@ func TestMultiContainerWait(t *testing.T) {\ndefer os.RemoveAll(rootDir)\n// Setup the containers.\n- containers := make([]*container.Container, 0, len(containerIDs))\n+ containers := make([]*Container, 0, len(containerIDs))\nfor i, annotations := range containerAnnotations {\nspec := testutil.NewSpecWithArgs(args[i][0], args[i][1])\nspec.Annotations = annotations\n@@ -1485,7 +1484,7 @@ func TestMultiContainerWait(t *testing.T) {\nt.Fatalf(\"error setting up container: %v\", err)\n}\ndefer os.RemoveAll(bundleDir)\n- cont, err := container.Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\")\n+ cont, err := Create(containerIDs[i], spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -1591,7 +1590,7 @@ func TestContainerVolumeContentsShared(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create and start the container.\n- c, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ c, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n@@ -1713,7 +1712,7 @@ func TestGoferExits(t *testing.T) {\ndefer os.RemoveAll(bundleDir)\n// Create and start the container.\n- c, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\n+ c, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, \"\", \"\")\nif err != nil {\nt.Fatalf(\"error creating container: %v\", err)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Move container_test to the container package
PiperOrigin-RevId: 209655274
Change-Id: Id381114bdb3197c73e14f74b3f6cf1afd87d60cb |
259,992 | 21.08.2018 14:34:00 | 25,200 | 19ef2ad1fe82093548edbb00de536d4bcf328f2b | nonExclusiveFS is causing timeout with --race
Not sure why, just removed for now to unblock the tests. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -213,8 +213,10 @@ const (\nnonExclusiveFS\n)\n-var all = []configOption{overlay, kvm, nonExclusiveFS}\n-var noOverlay = []configOption{kvm, nonExclusiveFS}\n+// TODO: nonExclusiveFS was removed because it causes timeout\n+// with --race. Put it back when bug is fixed.\n+var all = []configOption{overlay, kvm}\n+var noOverlay = []configOption{kvm}\n// configs generates different configurations to run tests.\nfunc configs(opts ...configOption) []*boot.Config {\n@@ -557,10 +559,7 @@ func TestExec(t *testing.T) {\n// be the next consecutive number after the last number from the checkpointed container.\nfunc TestCheckpointRestore(t *testing.T) {\n// Skip overlay because test requires writing to host file.\n- //\n- // TODO: Skip nonExclusiveFS because $TEST_TMPDIR mount is\n- // mistakenly marked as RO after revalidation.\n- for _, conf := range configs(kvm) {\n+ for _, conf := range configs(noOverlay...) {\nt.Logf(\"Running test with conf: %+v\", conf)\ndir, err := ioutil.TempDir(testutil.TmpDir(), \"checkpoint-test\")\n@@ -711,10 +710,7 @@ func TestUnixDomainSockets(t *testing.T) {\n)\n// Skip overlay because test requires writing to host file.\n- //\n- // TODO: Skip nonExclusiveFS because $TEST_TMPDIR mount is\n- // mistakenly marked as RO after revalidation.\n- for _, conf := range configs(kvm) {\n+ for _, conf := range configs(noOverlay...) {\nt.Logf(\"Running test with conf: %+v\", conf)\ndir, err := ioutil.TempDir(testutil.TmpDir(), \"uds-test\")\n"
}
] | Go | Apache License 2.0 | google/gvisor | nonExclusiveFS is causing timeout with --race
Not sure why, just removed for now to unblock the tests.
PiperOrigin-RevId: 209661403
Change-Id: I72785c071687d54e22bda9073d36b447d52a7018 |
259,891 | 21.08.2018 16:19:59 | 25,200 | ae68e9e7513083411875110bd31bd89ac3a58cb7 | Temporarily skip multi-container tests in container_test until deflaked. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -1366,6 +1366,7 @@ func TestAbbreviatedIDs(t *testing.T) {\n// TestMultiContainerSanity checks that it is possible to run 2 dead-simple\n// containers in the same sandbox.\nfunc TestMultiContainerSanity(t *testing.T) {\n+ t.Skip(\"Test is flakey.\") // TODO: Remove.\nfor _, conf := range configs(all...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n@@ -1438,6 +1439,7 @@ func TestMultiContainerSanity(t *testing.T) {\n}\nfunc TestMultiContainerWait(t *testing.T) {\n+ t.Skip(\"Test is flakey.\") // TODO: Remove.\ncontainerIDs := []string{\ntestutil.UniqueContainerID(),\ntestutil.UniqueContainerID(),\n"
}
] | Go | Apache License 2.0 | google/gvisor | Temporarily skip multi-container tests in container_test until deflaked.
PiperOrigin-RevId: 209679235
Change-Id: I527e779eeb113d0c162f5e27a2841b9486f0e39f |
259,948 | 21.08.2018 16:51:08 | 25,200 | 8bb50dab790d575a83a935cf3361099cdb1a6aac | sentry: do not release gofer inode file state loading lock upon error.
When an inode file state failed to load asynchronuously, we want to report
the error instead of potentially panicing in another async loading goroutine
incorrectly unblocked. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/gofer/inode_state.go",
"new_path": "pkg/sentry/fs/gofer/inode_state.go",
"diff": "@@ -108,9 +108,13 @@ func (i *inodeFileState) loadLoading(_ struct{}) {\n// afterLoad is invoked by stateify.\nfunc (i *inodeFileState) afterLoad() {\n- load := func() error {\n+ load := func() (err error) {\n// See comment on i.loading().\n- defer i.loading.Unlock()\n+ defer func() {\n+ if err == nil {\n+ i.loading.Unlock()\n+ }\n+ }()\n// Manually restore the p9.File.\nname, ok := i.s.inodeMappings[i.sattr.InodeID]\n@@ -121,7 +125,6 @@ func (i *inodeFileState) afterLoad() {\n}\n// TODO: Context is not plumbed to save/restore.\nctx := &dummyClockContext{context.Background()}\n- var err error\n_, i.file, err = i.s.attach.walk(ctx, splitAbsolutePath(name))\nif err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: do not release gofer inode file state loading lock upon error.
When an inode file state failed to load asynchronuously, we want to report
the error instead of potentially panicing in another async loading goroutine
incorrectly unblocked.
PiperOrigin-RevId: 209683977
Change-Id: I591cde97710bbe3cdc53717ee58f1d28bbda9261 |
259,992 | 21.08.2018 23:06:11 | 25,200 | e2ab7ec39e500627126fe8be8e37400711410cde | Fix TestUnixDomainSockets failure when path is too large
UDS has a lower size limit than regular files. When running under bazel
this limit is exceeded. Test was changed to always mount /tmp and use
it for the test. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/container_test.go",
"new_path": "runsc/container/container_test.go",
"diff": "@@ -704,22 +704,20 @@ func TestCheckpointRestore(t *testing.T) {\n// TestUnixDomainSockets checks that Checkpoint/Restore works in cases\n// with filesystem Unix Domain Socket use.\nfunc TestUnixDomainSockets(t *testing.T) {\n- const (\n- output = \"uds_output\"\n- socket = \"uds_socket\"\n- )\n-\n// Skip overlay because test requires writing to host file.\nfor _, conf := range configs(noOverlay...) {\nt.Logf(\"Running test with conf: %+v\", conf)\n- dir, err := ioutil.TempDir(testutil.TmpDir(), \"uds-test\")\n+ // UDS path is limited to 108 chars for compatibility with older systems.\n+ // Use '/tmp' (instead of testutil.TmpDir) to to ensure the size limit is\n+ // not exceeded. Assumes '/tmp' exists in the system.\n+ dir, err := ioutil.TempDir(\"/tmp\", \"uds-test\")\nif err != nil {\nt.Fatalf(\"ioutil.TempDir failed: %v\", err)\n}\ndefer os.RemoveAll(dir)\n- outputPath := filepath.Join(dir, output)\n+ outputPath := filepath.Join(dir, \"uds_output\")\noutputFile, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)\nif err != nil {\nt.Fatalf(\"error creating output file: %v\", err)\n@@ -731,7 +729,7 @@ func TestUnixDomainSockets(t *testing.T) {\nt.Fatal(\"error finding uds_test_app:\", err)\n}\n- socketPath := filepath.Join(dir, socket)\n+ socketPath := filepath.Join(dir, \"uds_socket\")\ndefer os.Remove(socketPath)\nspec := testutil.NewSpecWithArgs(app, \"--file\", outputPath, \"--socket\", socketPath)\n@@ -739,6 +737,13 @@ func TestUnixDomainSockets(t *testing.T) {\nUID: uint32(os.Getuid()),\nGID: uint32(os.Getgid()),\n}\n+ spec.Mounts = []specs.Mount{\n+ specs.Mount{\n+ Type: \"bind\",\n+ Destination: \"/tmp\",\n+ Source: \"/tmp\",\n+ },\n+ }\nrootDir, bundleDir, err := testutil.SetupContainer(spec, conf)\nif err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Fix TestUnixDomainSockets failure when path is too large
UDS has a lower size limit than regular files. When running under bazel
this limit is exceeded. Test was changed to always mount /tmp and use
it for the test.
PiperOrigin-RevId: 209717830
Change-Id: I1dbe19fe2051ffdddbaa32b188a9167f446ed193 |
260,013 | 22.08.2018 12:35:40 | 25,200 | 545ea7ab3fa3e976120b74da3271dc7724c05f5e | Always add AT_BASE even if there is no interpreter.
Linux will ALWAYS add AT_BASE even for a static binary, expect it
will be set to 0 [1].
1. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/loader/elf.go",
"new_path": "pkg/sentry/loader/elf.go",
"diff": "@@ -655,6 +655,9 @@ func loadELF(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace\n// Start in the interpreter.\n// N.B. AT_ENTRY above contains the *original* entry point.\nbin.entry = interp.entry\n+ } else {\n+ // Always add AT_BASE even if there is no interpreter.\n+ bin.auxv = append(bin.auxv, arch.AuxEntry{linux.AT_BASE, 0})\n}\nreturn bin, ac, nil\n"
}
] | Go | Apache License 2.0 | google/gvisor | Always add AT_BASE even if there is no interpreter.
Linux will ALWAYS add AT_BASE even for a static binary, expect it
will be set to 0 [1].
1. https://github.com/torvalds/linux/blob/master/fs/binfmt_elf.c#L253
PiperOrigin-RevId: 209811129
Change-Id: I92cc66532f23d40f24414a921c030bd3481e12a0 |
259,948 | 22.08.2018 13:18:21 | 25,200 | 6b9133ba96863e3653fa6f3949710203bb077c50 | sentry: mark S/R stating errors as save rejections / fs corruptions. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/gofer/inode_state.go",
"new_path": "pkg/sentry/fs/gofer/inode_state.go",
"diff": "@@ -67,7 +67,7 @@ func (i *inodeFileState) beforeSave() {\nif i.sattr.Type == fs.RegularFile {\nuattr, err := i.unstableAttr(&dummyClockContext{context.Background()})\nif err != nil {\n- panic(fmt.Sprintf(\"failed to get unstable atttribute of %s: %v\", i.s.inodeMappings[i.sattr.InodeID], err))\n+ panic(fs.ErrSaveRejection{fmt.Errorf(\"failed to get unstable atttribute of %s: %v\", i.s.inodeMappings[i.sattr.InodeID], err)})\n}\ni.savedUAttr = &uattr\n}\n@@ -128,7 +128,7 @@ func (i *inodeFileState) afterLoad() {\n_, i.file, err = i.s.attach.walk(ctx, splitAbsolutePath(name))\nif err != nil {\n- return fmt.Errorf(\"failed to walk to %q: %v\", name, err)\n+ return fs.ErrCorruption{fmt.Errorf(\"failed to walk to %q: %v\", name, err)}\n}\n// Remap the saved inode number into the gofer device using the\n@@ -136,7 +136,7 @@ func (i *inodeFileState) afterLoad() {\n// environment.\nqid, mask, attrs, err := i.file.getAttr(ctx, p9.AttrMaskAll())\nif err != nil {\n- return fmt.Errorf(\"failed to get file attributes of %s: %v\", name, err)\n+ return fs.ErrCorruption{fmt.Errorf(\"failed to get file attributes of %s: %v\", name, err)}\n}\nif !mask.RDev {\nreturn fs.ErrCorruption{fmt.Errorf(\"file %s lacks device\", name)}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/host/inode_state.go",
"new_path": "pkg/sentry/fs/host/inode_state.go",
"diff": "@@ -31,7 +31,7 @@ func (i *inodeFileState) beforeSave() {\nif !i.descriptor.donated && i.sattr.Type == fs.RegularFile {\nuattr, err := i.unstableAttr(context.Background())\nif err != nil {\n- panic(fmt.Sprintf(\"failed to get unstable atttribute of %s: %v\", i.mops.inodeMappings[i.sattr.InodeID], err))\n+ panic(fs.ErrSaveRejection{fmt.Errorf(\"failed to get unstable atttribute of %s: %v\", i.mops.inodeMappings[i.sattr.InodeID], err)})\n}\ni.savedUAttr = &uattr\n}\n@@ -47,7 +47,7 @@ func (i *inodeFileState) afterLoad() {\n// Remap the inode number.\nvar s syscall.Stat_t\nif err := syscall.Fstat(i.FD(), &s); err != nil {\n- panic(fmt.Sprintf(\"failed to get metadata for fd %d: %v\", i.FD(), err))\n+ panic(fs.ErrCorruption{fmt.Errorf(\"failed to get metadata for fd %d: %v\", i.FD(), err)})\n}\nkey := device.MultiDeviceKey{\nDevice: s.Dev,\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: mark S/R stating errors as save rejections / fs corruptions.
PiperOrigin-RevId: 209817767
Change-Id: Iddf2b8441bc44f31f9a8cf6f2bd8e7a5b824b487 |
259,858 | 22.08.2018 14:14:32 | 25,200 | a7a8d07d7d6bd551d96621ee841b1b0e0f217ca3 | Add separate Recycle method for allocator.
This improves debugging for pagetable-related issues. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/address_space.go",
"new_path": "pkg/sentry/platform/kvm/address_space.go",
"diff": "@@ -273,6 +273,9 @@ func (as *addressSpace) Unmap(addr usermem.Addr, length uint64) {\nStart: addr,\nEnd: addr + usermem.Addr(length),\n})\n+\n+ // Recycle any freed intermediate pages.\n+ as.pageTables.Allocator.Recycle()\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/kvm/allocator.go",
"new_path": "pkg/sentry/platform/kvm/allocator.go",
"diff": "@@ -67,3 +67,10 @@ func (a allocator) LookupPTEs(physical uintptr) *pagetables.PTEs {\nfunc (a allocator) FreePTEs(ptes *pagetables.PTEs) {\na.base.FreePTEs(ptes)\n}\n+\n+// Recycle implements pagetables.Allocator.Recycle.\n+//\n+//go:nosplit\n+func (a allocator) Recycle() {\n+ a.base.Recycle()\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/platform/ring0/pagetables/allocator.go",
"new_path": "pkg/sentry/platform/ring0/pagetables/allocator.go",
"diff": "@@ -27,8 +27,12 @@ type Allocator interface {\n// LookupPTEs looks up PTEs by physical address.\nLookupPTEs(physical uintptr) *PTEs\n- // FreePTEs frees a set of PTEs.\n+ // FreePTEs marks a set of PTEs a freed, although they may not be available\n+ // for use again until Recycle is called, below.\nFreePTEs(ptes *PTEs)\n+\n+ // Recycle makes freed PTEs available for use again.\n+ Recycle()\n}\n// RuntimeAllocator is a trivial allocator.\n@@ -42,6 +46,9 @@ type RuntimeAllocator struct {\n// pool is the set of free-to-use PTEs.\npool []*PTEs\n+\n+ // freed is the set of recently-freed PTEs.\n+ freed []*PTEs\n}\n// NewRuntimeAllocator returns an allocator that uses runtime allocation.\n@@ -51,8 +58,15 @@ func NewRuntimeAllocator() *RuntimeAllocator {\n}\n}\n+// Recycle returns freed pages to the pool.\n+func (r *RuntimeAllocator) Recycle() {\n+ r.pool = append(r.pool, r.freed...)\n+ r.freed = r.freed[:0]\n+}\n+\n// Drain empties the pool.\nfunc (r *RuntimeAllocator) Drain() {\n+ r.Recycle()\nfor i, ptes := range r.pool {\n// Zap the entry in the underlying array to ensure that it can\n// be properly garbage collected.\n@@ -104,6 +118,5 @@ func (r *RuntimeAllocator) LookupPTEs(physical uintptr) *PTEs {\n//\n//go:nosplit\nfunc (r *RuntimeAllocator) FreePTEs(ptes *PTEs) {\n- // Add to the pool.\n- r.pool = append(r.pool, ptes)\n+ r.freed = append(r.freed, ptes)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add separate Recycle method for allocator.
This improves debugging for pagetable-related issues.
PiperOrigin-RevId: 209827795
Change-Id: I4cfa11664b0b52f26f6bc90a14c5bb106f01e038 |
259,854 | 23.08.2018 08:54:09 | 25,200 | abe7764928bb18fe417c53c8ea8aa9fb970114b7 | Encapsulate netstack metrics | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/nic.go",
"new_path": "pkg/tcpip/stack/nic.go",
"diff": "@@ -282,12 +282,12 @@ func (n *NIC) RemoveAddress(addr tcpip.Address) *tcpip.Error {\nfunc (n *NIC) DeliverNetworkPacket(linkEP LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv *buffer.VectorisedView) {\nnetProto, ok := n.stack.networkProtocols[protocol]\nif !ok {\n- atomic.AddUint64(&n.stack.stats.UnknownProtocolRcvdPackets, 1)\n+ n.stack.stats.UnknownProtocolRcvdPackets.Increment()\nreturn\n}\nif len(vv.First()) < netProto.MinimumPacketSize() {\n- atomic.AddUint64(&n.stack.stats.MalformedRcvdPackets, 1)\n+ n.stack.stats.MalformedRcvdPackets.Increment()\nreturn\n}\n@@ -330,7 +330,7 @@ func (n *NIC) DeliverNetworkPacket(linkEP LinkEndpoint, remoteLinkAddr tcpip.Lin\n}\nif ref == nil {\n- atomic.AddUint64(&n.stack.stats.UnknownNetworkEndpointRcvdPackets, 1)\n+ n.stack.stats.UnknownNetworkEndpointRcvdPackets.Increment()\nreturn\n}\n@@ -345,19 +345,19 @@ func (n *NIC) DeliverNetworkPacket(linkEP LinkEndpoint, remoteLinkAddr tcpip.Lin\nfunc (n *NIC) DeliverTransportPacket(r *Route, protocol tcpip.TransportProtocolNumber, vv *buffer.VectorisedView) {\nstate, ok := n.stack.transportProtocols[protocol]\nif !ok {\n- atomic.AddUint64(&n.stack.stats.UnknownProtocolRcvdPackets, 1)\n+ n.stack.stats.UnknownProtocolRcvdPackets.Increment()\nreturn\n}\ntransProto := state.proto\nif len(vv.First()) < transProto.MinimumPacketSize() {\n- atomic.AddUint64(&n.stack.stats.MalformedRcvdPackets, 1)\n+ n.stack.stats.MalformedRcvdPackets.Increment()\nreturn\n}\nsrcPort, dstPort, err := transProto.ParsePorts(vv.First())\nif err != nil {\n- atomic.AddUint64(&n.stack.stats.MalformedRcvdPackets, 1)\n+ n.stack.stats.MalformedRcvdPackets.Increment()\nreturn\n}\n@@ -379,7 +379,7 @@ func (n *NIC) DeliverTransportPacket(r *Route, protocol tcpip.TransportProtocolN\n// We could not find an appropriate destination for this packet, so\n// deliver it to the global handler.\nif !transProto.HandleUnknownDestinationPacket(r, id, vv) {\n- atomic.AddUint64(&n.stack.stats.MalformedRcvdPackets, 1)\n+ n.stack.stats.MalformedRcvdPackets.Increment()\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/stack/stack.go",
"new_path": "pkg/tcpip/stack/stack.go",
"diff": "@@ -26,7 +26,6 @@ package stack\nimport (\n\"sync\"\n- \"sync/atomic\"\n\"time\"\n\"gvisor.googlesource.com/gvisor/pkg/sleep\"\n@@ -308,6 +307,9 @@ type Options struct {\n//\n// If no Clock is specified, the clock source will be time.Now.\nClock tcpip.Clock\n+\n+ // Stats are optional statistic counters.\n+ Stats tcpip.Stats\n}\n// New allocates a new networking stack with only the requested networking and\n@@ -331,6 +333,7 @@ func New(network []string, transport []string, opts Options) *Stack {\nlinkAddrCache: newLinkAddrCache(ageLimit, resolutionTimeout, resolutionAttempts),\nPortManager: ports.NewPortManager(),\nclock: clock,\n+ stats: opts.Stats.FillIn(),\n}\n// Add specified network protocols.\n@@ -437,27 +440,12 @@ func (s *Stack) NowNanoseconds() int64 {\nreturn s.clock.NowNanoseconds()\n}\n-// Stats returns a snapshot of the current stats.\n-//\n-// NOTE: The underlying stats are updated using atomic instructions as a result\n-// the snapshot returned does not represent the value of all the stats at any\n-// single given point of time.\n-// TODO: Make stats available in sentry for debugging/diag.\n-func (s *Stack) Stats() tcpip.Stats {\n- return tcpip.Stats{\n- UnknownProtocolRcvdPackets: atomic.LoadUint64(&s.stats.UnknownProtocolRcvdPackets),\n- UnknownNetworkEndpointRcvdPackets: atomic.LoadUint64(&s.stats.UnknownNetworkEndpointRcvdPackets),\n- MalformedRcvdPackets: atomic.LoadUint64(&s.stats.MalformedRcvdPackets),\n- DroppedPackets: atomic.LoadUint64(&s.stats.DroppedPackets),\n- }\n-}\n-\n-// MutableStats returns a mutable copy of the current stats.\n+// Stats returns a mutable copy of the current stats.\n//\n// This is not generally exported via the public interface, but is available\n// internally.\n-func (s *Stack) MutableStats() *tcpip.Stats {\n- return &s.stats\n+func (s *Stack) Stats() tcpip.Stats {\n+ return s.stats\n}\n// SetRouteTable assigns the route table to be used by this stack. It\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/tcpip.go",
"new_path": "pkg/tcpip/tcpip.go",
"diff": "@@ -34,6 +34,7 @@ import (\n\"strconv\"\n\"strings\"\n\"sync\"\n+ \"sync/atomic\"\n\"time\"\n\"gvisor.googlesource.com/gvisor/pkg/tcpip/buffer\"\n@@ -465,23 +466,62 @@ type TransportProtocolNumber uint32\n// NetworkProtocolNumber is the number of a network protocol.\ntype NetworkProtocolNumber uint32\n+// A StatCounter keeps track of a statistic.\n+type StatCounter struct {\n+ count uint64\n+}\n+\n+// Increment adds one to the counter.\n+func (s *StatCounter) Increment() {\n+ atomic.AddUint64(&s.count, 1)\n+}\n+\n+// Value returns the current value of the counter.\n+func (s *StatCounter) Value() uint64 {\n+ return atomic.LoadUint64(&s.count)\n+}\n+\n+// IncrementBy increments the counter by v.\n+func (s *StatCounter) IncrementBy(v uint64) {\n+ atomic.AddUint64(&s.count, v)\n+}\n+\n// Stats holds statistics about the networking stack.\n+//\n+// All fields are optional.\ntype Stats struct {\n// UnknownProtocolRcvdPackets is the number of packets received by the\n// stack that were for an unknown or unsupported protocol.\n- UnknownProtocolRcvdPackets uint64\n+ UnknownProtocolRcvdPackets *StatCounter\n// UnknownNetworkEndpointRcvdPackets is the number of packets received\n// by the stack that were for a supported network protocol, but whose\n// destination address didn't having a matching endpoint.\n- UnknownNetworkEndpointRcvdPackets uint64\n+ UnknownNetworkEndpointRcvdPackets *StatCounter\n// MalformedRcvPackets is the number of packets received by the stack\n// that were deemed malformed.\n- MalformedRcvdPackets uint64\n+ MalformedRcvdPackets *StatCounter\n// DroppedPackets is the number of packets dropped due to full queues.\n- DroppedPackets uint64\n+ DroppedPackets *StatCounter\n+}\n+\n+// FillIn returns a copy of s with nil fields initialized to new StatCounters.\n+func (s Stats) FillIn() Stats {\n+ if s.UnknownProtocolRcvdPackets == nil {\n+ s.UnknownProtocolRcvdPackets = &StatCounter{}\n+ }\n+ if s.UnknownNetworkEndpointRcvdPackets == nil {\n+ s.UnknownNetworkEndpointRcvdPackets = &StatCounter{}\n+ }\n+ if s.MalformedRcvdPackets == nil {\n+ s.MalformedRcvdPackets = &StatCounter{}\n+ }\n+ if s.DroppedPackets == nil {\n+ s.DroppedPackets = &StatCounter{}\n+ }\n+ return s\n}\n// String implements the fmt.Stringer interface.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/connect.go",
"new_path": "pkg/tcpip/transport/tcp/connect.go",
"diff": "@@ -16,7 +16,6 @@ package tcp\nimport (\n\"sync\"\n- \"sync/atomic\"\n\"time\"\n\"gvisor.googlesource.com/gvisor/pkg/rand\"\n@@ -292,7 +291,7 @@ func (h *handshake) synRcvdState(s *segment) *tcpip.Error {\n// not carry a timestamp option then the segment must be dropped\n// as per https://tools.ietf.org/html/rfc7323#section-3.2.\nif h.ep.sendTSOk && !s.parsedOptions.TS {\n- atomic.AddUint64(&h.ep.stack.MutableStats().DroppedPackets, 1)\n+ h.ep.stack.Stats().DroppedPackets.Increment()\nreturn nil\n}\n@@ -793,7 +792,7 @@ func (e *endpoint) handleSegments() *tcpip.Error {\n// must be dropped as per\n// https://tools.ietf.org/html/rfc7323#section-3.2.\nif e.sendTSOk && !s.parsedOptions.TS {\n- atomic.AddUint64(&e.stack.MutableStats().DroppedPackets, 1)\n+ e.stack.Stats().DroppedPackets.Increment()\ns.decRef()\ncontinue\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/endpoint.go",
"new_path": "pkg/tcpip/transport/tcp/endpoint.go",
"diff": "@@ -1225,7 +1225,7 @@ func (e *endpoint) GetRemoteAddress() (tcpip.FullAddress, *tcpip.Error) {\nfunc (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, vv *buffer.VectorisedView) {\ns := newSegment(r, id, vv)\nif !s.parse() {\n- atomic.AddUint64(&e.stack.MutableStats().MalformedRcvdPackets, 1)\n+ e.stack.Stats().MalformedRcvdPackets.Increment()\ns.decRef()\nreturn\n}\n@@ -1235,7 +1235,7 @@ func (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, vv\ne.newSegmentWaker.Assert()\n} else {\n// The queue is full, so we drop the segment.\n- atomic.AddUint64(&e.stack.MutableStats().DroppedPackets, 1)\n+ e.stack.Stats().DroppedPackets.Increment()\ns.decRef()\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/tcp_timestamp_test.go",
"new_path": "pkg/tcpip/transport/tcp/tcp_timestamp_test.go",
"diff": "@@ -268,7 +268,8 @@ func TestSegmentDropWhenTimestampMissing(t *testing.T) {\ndefer c.WQ.EventUnregister(&we)\nstk := c.Stack()\n- droppedPackets := stk.Stats().DroppedPackets\n+ droppedPacketsStat := stk.Stats().DroppedPackets\n+ droppedPackets := droppedPacketsStat.Value()\ndata := []byte{1, 2, 3}\n// Save the sequence number as we will reset it later down\n// in the test.\n@@ -283,11 +284,11 @@ func TestSegmentDropWhenTimestampMissing(t *testing.T) {\n}\n// Assert that DroppedPackets was incremented by 1.\n- if got, want := stk.Stats().DroppedPackets, droppedPackets+1; got != want {\n+ if got, want := droppedPacketsStat.Value(), droppedPackets+1; got != want {\nt.Fatalf(\"incorrect number of dropped packets, got: %v, want: %v\", got, want)\n}\n- droppedPackets = stk.Stats().DroppedPackets\n+ droppedPackets = droppedPacketsStat.Value()\n// Reset the sequence number so that the other endpoint accepts\n// this segment and does not treat it like an out of order delivery.\nrep.NextSeqNum = savedSeqNum\n@@ -301,7 +302,7 @@ func TestSegmentDropWhenTimestampMissing(t *testing.T) {\n}\n// Assert that DroppedPackets was not incremented by 1.\n- if got, want := stk.Stats().DroppedPackets, droppedPackets; got != want {\n+ if got, want := droppedPacketsStat.Value(), droppedPackets; got != want {\nt.Fatalf(\"incorrect number of dropped packets, got: %v, want: %v\", got, want)\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Encapsulate netstack metrics
PiperOrigin-RevId: 209943212
Change-Id: I96dcbc7c2ab2426e510b94a564436505256c5c79 |
259,992 | 23.08.2018 11:14:02 | 25,200 | 001a4c2493b13a43d62c7511fb509a959ae4abc2 | Clean up syscall filters
Removed syscalls that are only used by whitelistfs
which has its own set of filters. | [
{
"change_type": "MODIFY",
"old_path": "runsc/boot/filter/config.go",
"new_path": "runsc/boot/filter/config.go",
"diff": "@@ -38,7 +38,6 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_EXIT: {},\nsyscall.SYS_EXIT_GROUP: {},\nsyscall.SYS_FALLOCATE: {},\n- syscall.SYS_FCHMOD: {},\nsyscall.SYS_FCNTL: {},\nsyscall.SYS_FSTAT: {},\nsyscall.SYS_FSYNC: {},\n@@ -60,15 +59,12 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_MMAP: {},\nsyscall.SYS_MPROTECT: {},\nsyscall.SYS_MUNMAP: {},\n- syscall.SYS_NEWFSTATAT: {},\nsyscall.SYS_POLL: {},\nsyscall.SYS_PREAD64: {},\nsyscall.SYS_PWRITE64: {},\nsyscall.SYS_READ: {},\n- syscall.SYS_READLINKAT: {},\nsyscall.SYS_READV: {},\nsyscall.SYS_RECVMSG: {},\n- syscall.SYS_RENAMEAT: {},\nsyscall.SYS_RESTART_SYSCALL: {},\nsyscall.SYS_RT_SIGACTION: {},\nsyscall.SYS_RT_SIGPROCMASK: {},\n@@ -80,7 +76,6 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_SIGALTSTACK: {},\nsyscall.SYS_SYNC_FILE_RANGE: {},\nsyscall.SYS_TGKILL: {},\n- syscall.SYS_UTIMENSAT: {},\nsyscall.SYS_WRITE: {},\nsyscall.SYS_WRITEV: {},\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Clean up syscall filters
Removed syscalls that are only used by whitelistfs
which has its own set of filters.
PiperOrigin-RevId: 209967259
Change-Id: Idb2e1b9d0201043d7cd25d96894f354729dbd089 |
259,948 | 23.08.2018 13:57:30 | 25,200 | ba8f6ba8c899d2e900fa7e9ee5aede31cba1de9c | sentry: mark idMapSeqHandle as savable. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/uid_gid_map.go",
"new_path": "pkg/sentry/fs/proc/uid_gid_map.go",
"diff": "@@ -68,6 +68,8 @@ func (imss *idMapSeqSource) ReadSeqFileData(ctx context.Context, handle seqfile.\n}\n// TODO: Fix issue requiring idMapSeqHandle wrapping an int.\n+//\n+// +stateify savable\ntype idMapSeqHandle struct {\nvalue int\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: mark idMapSeqHandle as savable.
PiperOrigin-RevId: 209994384
Change-Id: I16186cf79cb4760a134f3968db30c168a5f4340e |
259,948 | 23.08.2018 16:13:22 | 25,200 | e855e9cebc45f5fd7a9583f476c8965fc395a15e | netstack: make listening tcp socket close state setting and cleanup atomic.
Otherwise the socket saving logic might find workers still running for closed
sockets unexpectedly. | [
{
"change_type": "MODIFY",
"old_path": "pkg/tcpip/transport/tcp/accept.go",
"new_path": "pkg/tcpip/transport/tcp/accept.go",
"diff": "@@ -363,11 +363,6 @@ func (e *endpoint) protocolListenLoop(rcvWnd seqnum.Size) *tcpip.Error {\ne.mu.Lock()\ne.state = stateClosed\n- // Notify waiters that the endpoint is shutdown.\n- e.mu.Unlock()\n- e.waiterQueue.Notify(waiter.EventIn | waiter.EventOut)\n- e.mu.Lock()\n-\n// Do cleanup if needed.\ne.completeWorkerLocked()\n@@ -375,6 +370,9 @@ func (e *endpoint) protocolListenLoop(rcvWnd seqnum.Size) *tcpip.Error {\nclose(e.drainDone)\n}\ne.mu.Unlock()\n+\n+ // Notify waiters that the endpoint is shutdown.\n+ e.waiterQueue.Notify(waiter.EventIn | waiter.EventOut)\n}()\ne.mu.Lock()\n"
}
] | Go | Apache License 2.0 | google/gvisor | netstack: make listening tcp socket close state setting and cleanup atomic.
Otherwise the socket saving logic might find workers still running for closed
sockets unexpectedly.
PiperOrigin-RevId: 210018905
Change-Id: I443a04d355613f5f9983252cc6863bff6e0eda3a |
259,992 | 24.08.2018 10:16:38 | 25,200 | a81a4402a265aec6715172cd3502ee7eebbf64aa | Add option to panic gofer if writes are attempted over RO mounts
This is used when '--overlay=true' to guarantee writes are not sent to gofer. | [
{
"change_type": "MODIFY",
"old_path": "runsc/cmd/gofer.go",
"new_path": "runsc/cmd/gofer.go",
"diff": "@@ -38,6 +38,8 @@ type Gofer struct {\n// controllerFD is the file descriptor of a stream socket for the\n// control server that is donated to this process.\ncontrollerFD int\n+\n+ panicOnWrite bool\n}\n// Name implements subcommands.Command.\n@@ -61,6 +63,7 @@ func (g *Gofer) SetFlags(f *flag.FlagSet) {\nf.Var(&g.ioFDs, \"io-fds\", \"list of FDs to connect 9P servers. They must follow this order: root first, then mounts as defined in the spec\")\nf.BoolVar(&g.applyCaps, \"apply-caps\", true, \"if true, apply capabilities to restrict what the Gofer process can do\")\nf.IntVar(&g.controllerFD, \"controller-fd\", -1, \"required FD of a stream socket for the control server that must be donated to this process\")\n+ f.BoolVar(&g.panicOnWrite, \"panic-on-write\", false, \"if true, panics on attempts to write to RO mounts. RW mounts are unnaffected\")\n}\n// Execute implements subcommands.Command.\n@@ -111,6 +114,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\np := absPath(g.bundleDir, spec.Root.Path)\nats = append(ats, fsgofer.NewAttachPoint(p, fsgofer.Config{\nROMount: spec.Root.Readonly,\n+ PanicOnWrite: g.panicOnWrite,\n// Docker uses overlay2 by default for the root mount, and overlay2 does a copy-up when\n// each file is opened as writable. Thus, we open files lazily to avoid copy-up.\nLazyOpenForWrite: true,\n@@ -123,6 +127,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\np = absPath(g.bundleDir, m.Source)\nats = append(ats, fsgofer.NewAttachPoint(p, fsgofer.Config{\nROMount: isReadonlyMount(m.Options),\n+ PanicOnWrite: g.panicOnWrite,\nLazyOpenForWrite: false,\n}))\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer.go",
"new_path": "runsc/fsgofer/fsgofer.go",
"diff": "@@ -76,6 +76,9 @@ type Config struct {\n// ROMount is set to true if this is a readonly mount.\nROMount bool\n+ // PanicOnWrite panics on attempts to write to RO mounts.\n+ PanicOnWrite bool\n+\n// LazyOpenForWrite makes the underlying file to be opened in RDONLY\n// mode initially and be reopened in case write access is desired.\n// This is done to workaround the behavior in 'overlay2' that\n@@ -375,6 +378,9 @@ func (l *localFile) Open(mode p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) {\n// Create implements p9.File.\nfunc (l *localFile) Create(name string, mode p9.OpenFlags, perm p9.FileMode, uid p9.UID, gid p9.GID) (*fd.FD, p9.File, p9.QID, uint32, error) {\nif l.conf.ROMount {\n+ if l.conf.PanicOnWrite {\n+ panic(\"attempt to write to RO mount\")\n+ }\nreturn nil, nil, p9.QID{}, 0, syscall.EBADF\n}\nif !isNameValid(name) {\n@@ -429,6 +435,9 @@ func (l *localFile) Create(name string, mode p9.OpenFlags, perm p9.FileMode, uid\n// Mkdir implements p9.File.\nfunc (l *localFile) Mkdir(name string, perm p9.FileMode, uid p9.UID, gid p9.GID) (p9.QID, error) {\nif l.conf.ROMount {\n+ if l.conf.PanicOnWrite {\n+ panic(\"attempt to write to RO mount\")\n+ }\nreturn p9.QID{}, syscall.EBADF\n}\n@@ -585,6 +594,9 @@ func (l *localFile) GetAttr(_ p9.AttrMask) (p9.QID, p9.AttrMask, p9.Attr, error)\n// an error happens.\nfunc (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {\nif l.conf.ROMount {\n+ if l.conf.PanicOnWrite {\n+ panic(\"attempt to write to RO mount\")\n+ }\nreturn syscall.EBADF\n}\n@@ -722,6 +734,9 @@ func (*localFile) Remove() error {\n// Rename implements p9.File.\nfunc (l *localFile) Rename(directory p9.File, name string) error {\nif l.conf.ROMount {\n+ if l.conf.PanicOnWrite {\n+ panic(\"attempt to write to RO mount\")\n+ }\nreturn syscall.EBADF\n}\nif !isNameValid(name) {\n@@ -789,6 +804,9 @@ func (l *localFile) WriteAt(p []byte, offset uint64) (int, error) {\n// Symlink implements p9.File.\nfunc (l *localFile) Symlink(target, newName string, uid p9.UID, gid p9.GID) (p9.QID, error) {\nif l.conf.ROMount {\n+ if l.conf.PanicOnWrite {\n+ panic(\"attempt to write to RO mount\")\n+ }\nreturn p9.QID{}, syscall.EBADF\n}\nif !isNameValid(newName) {\n@@ -819,6 +837,9 @@ func (l *localFile) Symlink(target, newName string, uid p9.UID, gid p9.GID) (p9.\n// Link implements p9.File.\nfunc (l *localFile) Link(target p9.File, newName string) error {\nif l.conf.ROMount {\n+ if l.conf.PanicOnWrite {\n+ panic(\"attempt to write to RO mount\")\n+ }\nreturn syscall.EBADF\n}\nif !isNameValid(newName) {\n@@ -842,6 +863,9 @@ func (*localFile) Mknod(_ string, _ p9.FileMode, _ uint32, _ uint32, _ p9.UID, _\n// UnlinkAt implements p9.File.\nfunc (l *localFile) UnlinkAt(name string, flags uint32) error {\nif l.conf.ROMount {\n+ if l.conf.PanicOnWrite {\n+ panic(\"attempt to write to RO mount\")\n+ }\nreturn syscall.EBADF\n}\nif !isNameValid(name) {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/fsgofer/fsgofer_test.go",
"new_path": "runsc/fsgofer/fsgofer_test.go",
"diff": "@@ -34,6 +34,15 @@ func init() {\nallConfs = append(allConfs, roConfs...)\n}\n+func assertPanic(t *testing.T, f func()) {\n+ defer func() {\n+ if r := recover(); r == nil {\n+ t.Errorf(\"function did not panic\")\n+ }\n+ }()\n+ f()\n+}\n+\nvar (\nallTypes = []fileType{regular, directory, symlink}\n@@ -434,6 +443,22 @@ func TestROMountChecks(t *testing.T) {\n})\n}\n+func TestROMountPanics(t *testing.T) {\n+ conf := Config{ROMount: true, PanicOnWrite: true}\n+ runCustom(t, allTypes, []Config{conf}, func(t *testing.T, s state) {\n+ assertPanic(t, func() { s.file.Create(\"..\", p9.ReadWrite, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())) })\n+ assertPanic(t, func() { s.file.Mkdir(\"..\", 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())) })\n+ assertPanic(t, func() { s.file.Rename(s.file, \"..\") })\n+ assertPanic(t, func() { s.file.Symlink(\"some_place\", \"..\", p9.UID(os.Getuid()), p9.GID(os.Getgid())) })\n+ assertPanic(t, func() { s.file.UnlinkAt(\"..\", 0) })\n+ assertPanic(t, func() { s.file.Link(s.file, \"..\") })\n+\n+ valid := p9.SetAttrMask{Size: true}\n+ attr := p9.SetAttr{Size: 0}\n+ assertPanic(t, func() { s.file.SetAttr(valid, attr) })\n+ })\n+}\n+\nfunc TestInvalidName(t *testing.T) {\nrunCustom(t, []fileType{regular}, rwConfs, func(t *testing.T, s state) {\nif _, _, _, _, err := s.file.Create(\"..\", p9.ReadWrite, 0777, p9.UID(os.Getuid()), p9.GID(os.Getgid())); err != syscall.EINVAL {\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/sandbox/sandbox.go",
"new_path": "runsc/sandbox/sandbox.go",
"diff": "@@ -345,6 +345,10 @@ func (s *Sandbox) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundle\nsetUIDGIDMappings(cmd, spec)\nnss := filterNS([]specs.LinuxNamespaceType{specs.UserNamespace}, spec)\n+ if conf.Overlay {\n+ args = append(args, \"--panic-on-write=true\")\n+ }\n+\n// Start the gofer in the given namespace.\nlog.Debugf(\"Starting gofer: %s %v\", binPath, args)\nif err := startInNS(cmd, nss); err != nil {\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add option to panic gofer if writes are attempted over RO mounts
This is used when '--overlay=true' to guarantee writes are not sent to gofer.
PiperOrigin-RevId: 210116288
Change-Id: I7616008c4c0e8d3668e07a205207f46e2144bf30 |
259,992 | 24.08.2018 11:38:12 | 25,200 | 7b0dfb0cdbdcb402c000d30399dbfd2eeebe1266 | SyscallRules merge and add were dropping AllowAny rules | [
{
"change_type": "MODIFY",
"old_path": "pkg/seccomp/seccomp_rules.go",
"new_path": "pkg/seccomp/seccomp_rules.go",
"diff": "@@ -34,7 +34,7 @@ func seccompDataOffsetArgLow(i int) uint32 {\n}\nfunc seccompDataOffsetArgHigh(i int) uint32 {\n- return uint32(seccompDataOffsetArgs + i*8 + 4)\n+ return seccompDataOffsetArgLow(i) + 4\n}\n// AllowAny is marker to indicate any value will be accepted.\n@@ -100,7 +100,11 @@ func NewSyscallRules() SyscallRules {\n// AddRule adds the given rule. It will create a new entry for a new syscall, otherwise\n// it will append to the existing rules.\nfunc (sr SyscallRules) AddRule(sysno uintptr, r Rule) {\n- if _, ok := sr[sysno]; ok {\n+ if cur, ok := sr[sysno]; ok {\n+ // An empty rules means allow all. Honor it when more rules are added.\n+ if len(cur) == 0 {\n+ sr[sysno] = append(sr[sysno], Rule{})\n+ }\nsr[sysno] = append(sr[sysno], r)\n} else {\nsr[sysno] = []Rule{r}\n@@ -110,7 +114,14 @@ func (sr SyscallRules) AddRule(sysno uintptr, r Rule) {\n// Merge merges the given SyscallRules.\nfunc (sr SyscallRules) Merge(rules SyscallRules) {\nfor sysno, rs := range rules {\n- if _, ok := sr[sysno]; ok {\n+ if cur, ok := sr[sysno]; ok {\n+ // An empty rules means allow all. Honor it when more rules are added.\n+ if len(cur) == 0 {\n+ sr[sysno] = append(sr[sysno], Rule{})\n+ }\n+ if len(rs) == 0 {\n+ rs = []Rule{Rule{}}\n+ }\nsr[sysno] = append(sr[sysno], rs...)\n} else {\nsr[sysno] = rs\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/seccomp/seccomp_test.go",
"new_path": "pkg/seccomp/seccomp_test.go",
"diff": "@@ -355,3 +355,55 @@ func TestRealDeal(t *testing.T) {\n}\n}\n}\n+\n+// TestMerge ensures that empty rules are not erased when rules are merged.\n+func TestMerge(t *testing.T) {\n+ for _, tst := range []struct {\n+ name string\n+ main []Rule\n+ merge []Rule\n+ want []Rule\n+ }{\n+ {\n+ name: \"empty both\",\n+ main: nil,\n+ merge: nil,\n+ want: []Rule{Rule{}, Rule{}},\n+ },\n+ {\n+ name: \"empty main\",\n+ main: nil,\n+ merge: []Rule{Rule{}},\n+ want: []Rule{Rule{}, Rule{}},\n+ },\n+ {\n+ name: \"empty merge\",\n+ main: []Rule{Rule{}},\n+ merge: nil,\n+ want: []Rule{Rule{}, Rule{}},\n+ },\n+ } {\n+ t.Run(tst.name, func(t *testing.T) {\n+ mainRules := SyscallRules{1: tst.main}\n+ mergeRules := SyscallRules{1: tst.merge}\n+ mainRules.Merge(mergeRules)\n+ if got, want := len(mainRules[1]), len(tst.want); got != want {\n+ t.Errorf(\"wrong length, got: %d, want: %d\", got, want)\n+ }\n+ for i, r := range mainRules[1] {\n+ if r != tst.want[i] {\n+ t.Errorf(\"result, got: %v, want: %v\", r, tst.want[i])\n+ }\n+ }\n+ })\n+ }\n+}\n+\n+// TestAddRule ensures that empty rules are not erased when rules are added.\n+func TestAddRule(t *testing.T) {\n+ rules := SyscallRules{1: {}}\n+ rules.AddRule(1, Rule{})\n+ if got, want := len(rules[1]), 2; got != want {\n+ t.Errorf(\"len(rules[1]), got: %d, want: %d\", got, want)\n+ }\n+}\n"
}
] | Go | Apache License 2.0 | google/gvisor | SyscallRules merge and add were dropping AllowAny rules
PiperOrigin-RevId: 210131001
Change-Id: I285707c5143b3e4c9a6948c1d1a452b6f16e65b7 |
259,891 | 27.08.2018 10:48:02 | 25,200 | 2524111fc63343fd7372f5ea0266130adea778a5 | runsc: Terminal resizing support.
Implements the TIOCGWINSZ and TIOCSWINSZ ioctls, which allow processes to resize
the terminal. This allows, for example, sshd to properly set the window size for
ssh sessions. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/tty.go",
"new_path": "pkg/abi/linux/tty.go",
"diff": "@@ -328,3 +328,13 @@ var DefaultSlaveTermios = KernelTermios{\nInputSpeed: 38400,\nOutputSpeed: 38400,\n}\n+\n+// WindowSize corresponds to struct winsize defined in\n+// include/uapi/asm-generic/termios.h.\n+//\n+// +stateify savable\n+type WindowSize struct {\n+ Rows uint16\n+ Cols uint16\n+ _ [4]byte // Padding for 2 unused shorts.\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tty/line_discipline.go",
"new_path": "pkg/sentry/fs/tty/line_discipline.go",
"diff": "@@ -76,6 +76,12 @@ const (\n//\n// +stateify savable\ntype lineDiscipline struct {\n+ // sizeMu protects size.\n+ sizeMu sync.Mutex `state:\"nosave\"`\n+\n+ // size is the terminal size (width and height).\n+ size linux.WindowSize\n+\n// inQueue is the input queue of the terminal.\ninQueue queue\n@@ -142,6 +148,24 @@ func (l *lineDiscipline) setTermios(ctx context.Context, io usermem.IO, args arc\nreturn 0, err\n}\n+func (l *lineDiscipline) windowSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n+ l.sizeMu.Lock()\n+ defer l.sizeMu.Unlock()\n+ _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), l.size, usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ })\n+ return err\n+}\n+\n+func (l *lineDiscipline) setWindowSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error {\n+ l.sizeMu.Lock()\n+ defer l.sizeMu.Unlock()\n+ _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &l.size, usermem.IOOpts{\n+ AddressSpaceActive: true,\n+ })\n+ return err\n+}\n+\nfunc (l *lineDiscipline) masterReadiness() waiter.EventMask {\n// We don't have to lock a termios because the default master termios\n// is immutable.\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tty/master.go",
"new_path": "pkg/sentry/fs/tty/master.go",
"diff": "@@ -172,6 +172,10 @@ func (mf *masterFileOperations) Ioctl(ctx context.Context, io usermem.IO, args a\ncase linux.TIOCSPTLCK:\n// TODO: Implement pty locking. For now just pretend we do.\nreturn 0, nil\n+ case linux.TIOCGWINSZ:\n+ return 0, mf.t.ld.windowSize(ctx, io, args)\n+ case linux.TIOCSWINSZ:\n+ return 0, mf.t.ld.setWindowSize(ctx, io, args)\ndefault:\nreturn 0, syserror.ENOTTY\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/tty/slave.go",
"new_path": "pkg/sentry/fs/tty/slave.go",
"diff": "@@ -150,6 +150,10 @@ func (sf *slaveFileOperations) Ioctl(ctx context.Context, io usermem.IO, args ar\nAddressSpaceActive: true,\n})\nreturn 0, err\n+ case linux.TIOCGWINSZ:\n+ return 0, sf.si.t.ld.windowSize(ctx, io, args)\n+ case linux.TIOCSWINSZ:\n+ return 0, sf.si.t.ld.setWindowSize(ctx, io, args)\ndefault:\nreturn 0, syserror.ENOTTY\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Terminal resizing support.
Implements the TIOCGWINSZ and TIOCSWINSZ ioctls, which allow processes to resize
the terminal. This allows, for example, sshd to properly set the window size for
ssh sessions.
PiperOrigin-RevId: 210392504
Change-Id: I0d4789154d6d22f02509b31d71392e13ee4a50ba |
259,948 | 27.08.2018 11:54:15 | 25,200 | bd01816c872672b74998694bb6e759df2a336735 | sentry: mark fsutil.DirFileOperations as savable. | [
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/fsutil/file.go",
"new_path": "pkg/sentry/fs/fsutil/file.go",
"diff": "@@ -214,6 +214,8 @@ func (NoIoctl) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArgume\n}\n// DirFileOperations implements FileOperations for directories.\n+//\n+// +stateify savable\ntype DirFileOperations struct {\nwaiter.AlwaysReady `state:\"nosave\"`\nNoopRelease `state:\"nosave\"`\n"
}
] | Go | Apache License 2.0 | google/gvisor | sentry: mark fsutil.DirFileOperations as savable.
PiperOrigin-RevId: 210405166
Change-Id: I252766015885c418e914007baf2fc058fec39b3e |
259,858 | 27.08.2018 13:35:50 | 25,200 | b9ded9bf399422d09f2f2bd32cd4960d24b424bf | Add runsc-race target. | [
{
"change_type": "MODIFY",
"old_path": "runsc/BUILD",
"new_path": "runsc/BUILD",
"diff": "@@ -19,3 +19,32 @@ go_binary(\n\"@com_github_google_subcommands//:go_default_library\",\n],\n)\n+\n+# The runsc-race target is a race-compatible BUILD target. This must be built\n+# via \"bazel build --features=race //runsc:runsc-race\", since the race feature\n+# must apply to all dependencies due a bug in gazelle file selection. The pure\n+# attribute must be off because the race detector requires linking with non-Go\n+# components, although we still require a static binary.\n+#\n+# Note that in the future this might be convertible to a compatible target by\n+# using the pure and static attributes within a select function, but select is\n+# not currently compatible with string attributes [1].\n+#\n+# [1] https://github.com/bazelbuild/bazel/issues/1698\n+go_binary(\n+ name = \"runsc-race\",\n+ srcs = [\n+ \"main.go\",\n+ ],\n+ static = \"on\",\n+ visibility = [\n+ \"//visibility:public\",\n+ ],\n+ x_defs = {\"main.gitRevision\": \"{GIT_REVISION}\"},\n+ deps = [\n+ \"//pkg/log\",\n+ \"//runsc/boot\",\n+ \"//runsc/cmd\",\n+ \"@com_github_google_subcommands//:go_default_library\",\n+ ],\n+)\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add runsc-race target.
PiperOrigin-RevId: 210422178
Change-Id: I984dd348d467908bc3180a20fc79b8387fcca05e |
260,013 | 27.08.2018 17:20:36 | 25,200 | f0492d45aa31e32f8a04b13b7bf53e0161e1afb6 | Add /proc/sys/kernel/shm[all,max,mni]. | [
{
"change_type": "MODIFY",
"old_path": "pkg/abi/linux/shm.go",
"new_path": "pkg/abi/linux/shm.go",
"diff": "package linux\n+import \"math\"\n+\n// shmat(2) flags. Source: include/uapi/linux/shm.h\nconst (\nSHM_RDONLY = 010000 // Read-only access.\n@@ -38,6 +40,15 @@ const (\nSHM_INFO = 14\n)\n+// SHM defaults as specified by linux. Source: include/uapi/linux/shm.h\n+const (\n+ SHMMIN = 1\n+ SHMMNI = 4096\n+ SHMMAX = math.MaxUint64 - 1<<24\n+ SHMALL = math.MaxUint64 - 1<<24\n+ SHMSEG = 4096\n+)\n+\n// ShmidDS is equivalent to struct shmid64_ds. Source:\n// include/uapi/asm-generic/shmbuf.h\ntype ShmidDS struct {\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/fs/proc/sys.go",
"new_path": "pkg/sentry/fs/proc/sys.go",
"diff": "@@ -17,7 +17,9 @@ package proc\nimport (\n\"fmt\"\n\"io\"\n+ \"strconv\"\n+ \"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/context\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs\"\n\"gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile\"\n@@ -102,6 +104,10 @@ func (p *proc) newKernelDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode\nd := &ramfs.Dir{}\nd.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555))\nd.AddChild(ctx, \"hostname\", p.newHostname(ctx, msrc))\n+\n+ d.AddChild(ctx, \"shmmax\", p.newStubProcFSFile(ctx, msrc, []byte(strconv.FormatUint(linux.SHMMAX, 10))))\n+ d.AddChild(ctx, \"shmall\", p.newStubProcFSFile(ctx, msrc, []byte(strconv.FormatUint(linux.SHMALL, 10))))\n+ d.AddChild(ctx, \"shmmni\", p.newStubProcFSFile(ctx, msrc, []byte(strconv.FormatUint(linux.SHMMNI, 10))))\nreturn newFile(d, msrc, fs.SpecialDirectory, nil)\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "pkg/sentry/kernel/shm/shm.go",
"new_path": "pkg/sentry/kernel/shm/shm.go",
"diff": "@@ -35,7 +35,6 @@ package shm\nimport (\n\"fmt\"\n- \"math\"\n\"sync\"\n\"gvisor.googlesource.com/gvisor/pkg/abi/linux\"\n@@ -52,23 +51,6 @@ import (\n\"gvisor.googlesource.com/gvisor/pkg/syserror\"\n)\n-// Various limits for shared memory segments.\n-const (\n- // shmsTotalMaxPages is the system-wide limit on all shared memory segments, measured\n- // in number of pages.\n- shmsTotalMaxPages = math.MaxInt64 // SHMALL\n-\n- // shmMaxSize is the maximum size of a single segment, in bytes.\n- shmMaxSize = math.MaxInt64 // SHMMAX\n-\n- // shmMinSize is the minimum specifiable size of a segment, effectively\n- // yielding a size rounded up to the next page size. Measured in bytes.\n- shmMinSize = 1 // SHMMIN\n-\n- // shmsTotalMax is the maximum number of segments on the system.\n- shmsTotalMax = 4096 // SHMMNI\n-)\n-\n// Registry tracks all shared memory segments in an IPC namespace. The registry\n// provides the mechanisms for creating and finding segments, and reporting\n// global shm parameters.\n@@ -119,7 +101,7 @@ func (r *Registry) findByKey(key int32) *Shm {\n// FindOrCreate looks up or creates a segment in the registry. It's functionally\n// analogous to open(2).\nfunc (r *Registry) FindOrCreate(ctx context.Context, pid, key int32, size uint64, mode linux.FileMode, private, create, exclusive bool) (*Shm, error) {\n- if create && (size < shmMinSize || size > shmMaxSize) {\n+ if create && (size < linux.SHMMIN || size > linux.SHMMAX) {\n// \"A new segment was to be created and size is less than SHMMIN or\n// greater than SHMMAX.\" - man shmget(2)\nreturn nil, syserror.EINVAL\n@@ -128,7 +110,7 @@ func (r *Registry) FindOrCreate(ctx context.Context, pid, key int32, size uint64\nr.mu.Lock()\ndefer r.mu.Unlock()\n- if len(r.shms) >= shmsTotalMax {\n+ if len(r.shms) >= linux.SHMMNI {\n// \"All possible shared memory IDs have been taken (SHMMNI) ...\"\n// - man shmget(2)\nreturn nil, syserror.ENOSPC\n@@ -179,7 +161,7 @@ func (r *Registry) FindOrCreate(ctx context.Context, pid, key int32, size uint64\nreturn nil, syserror.EINVAL\n}\n- if numPages := sizeAligned / usermem.PageSize; r.totalPages+numPages > shmsTotalMaxPages {\n+ if numPages := sizeAligned / usermem.PageSize; r.totalPages+numPages > linux.SHMALL {\n// \"... allocating a segment of the requested size would cause the\n// system to exceed the system-wide limit on shared memory (SHMALL).\"\n// - man shmget(2)\n@@ -245,11 +227,11 @@ func (r *Registry) newShm(ctx context.Context, pid, key int32, creator fs.FileOw\n// system. See shmctl(IPC_INFO).\nfunc (r *Registry) IPCInfo() *linux.ShmParams {\nreturn &linux.ShmParams{\n- ShmMax: shmMaxSize,\n- ShmMin: shmMinSize,\n- ShmMni: shmsTotalMax,\n- ShmSeg: shmsTotalMax, // Linux also sets this to SHMMNI.\n- ShmAll: shmsTotalMaxPages,\n+ ShmMax: linux.SHMMAX,\n+ ShmMin: linux.SHMMIN,\n+ ShmMni: linux.SHMMNI,\n+ ShmSeg: linux.SHMSEG,\n+ ShmAll: linux.SHMALL,\n}\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | Add /proc/sys/kernel/shm[all,max,mni].
PiperOrigin-RevId: 210459956
Change-Id: I51859b90fa967631e0a54a390abc3b5541fbee66 |
259,891 | 27.08.2018 20:33:38 | 25,200 | a4529c1b5b485f6283367bfdc0e4228bbbd3e51f | runsc: Fix readonly filesystem causing failure to create containers.
For readonly filesystems specified via relative path, we were forgetting to
mount relative to the container's bundle directory. | [
{
"change_type": "MODIFY",
"old_path": "runsc/container/BUILD",
"new_path": "runsc/container/BUILD",
"diff": "@@ -29,7 +29,6 @@ go_library(\n\"//runsc/specutils\",\n\"@com_github_cenkalti_backoff//:go_default_library\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n- \"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "runsc/container/fs.go",
"new_path": "runsc/container/fs.go",
"diff": "@@ -22,7 +22,6 @@ import (\n\"syscall\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n- \"golang.org/x/sys/unix\"\n\"gvisor.googlesource.com/gvisor/pkg/log\"\n\"gvisor.googlesource.com/gvisor/runsc/boot\"\n\"gvisor.googlesource.com/gvisor/runsc/specutils\"\n@@ -84,29 +83,29 @@ func setupFS(spec *specs.Spec, conf *boot.Config, bundleDir string) error {\n}\nsrcfi, err := os.Stat(src)\nif err != nil {\n- return err\n+ return fmt.Errorf(\"failed to stat() mount source: %v\", err)\n}\n// It's possible that 'm.Destination' follows symlinks inside the\n// container.\ndst, err := resolveSymlinks(spec.Root.Path, m.Destination)\nif err != nil {\n- return err\n+ return fmt.Errorf(\"failed to resolve symlinks: %v\", err)\n}\n// Create mount point if it doesn't exits\nif _, err := os.Stat(dst); os.IsNotExist(err) {\nif srcfi.IsDir() {\nif err := os.MkdirAll(dst, 0755); err != nil {\n- return err\n+ return fmt.Errorf(\"failed to make mount directory %q: %v\", dst, err)\n}\n} else {\nif err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {\n- return err\n+ return fmt.Errorf(\"failed to make mount directory for file %q: %v\", filepath.Dir(dst), err)\n}\nf, err := os.OpenFile(dst, os.O_CREATE, 0755)\nif err != nil {\n- return err\n+ return fmt.Errorf(\"failed to open mount file %q: %v\", dst, err)\n}\nf.Close()\n}\n@@ -116,7 +115,7 @@ func setupFS(spec *specs.Spec, conf *boot.Config, bundleDir string) error {\nflags |= syscall.MS_BIND\nlog.Infof(\"Mounting src: %q, dst: %q, flags: %#x\", src, dst, flags)\nif err := syscall.Mount(src, dst, m.Type, uintptr(flags), \"\"); err != nil {\n- return err\n+ return fmt.Errorf(\"failed to mount src: %q, dst: %q, flags: %#x, err: %v\", src, dst, flags, err)\n}\n}\n@@ -124,7 +123,13 @@ func setupFS(spec *specs.Spec, conf *boot.Config, bundleDir string) error {\nif spec.Root.Readonly {\nlog.Infof(\"Remounting root as readonly: %q\", spec.Root.Path)\nflags := uintptr(syscall.MS_BIND | syscall.MS_REMOUNT | syscall.MS_RDONLY | syscall.MS_REC)\n- return unix.Mount(spec.Root.Path, spec.Root.Path, \"bind\", flags, \"\")\n+ src := spec.Root.Path\n+ if !filepath.IsAbs(src) {\n+ src = filepath.Join(bundleDir, src)\n+ }\n+ if err := syscall.Mount(src, src, \"bind\", flags, \"\"); err != nil {\n+ return fmt.Errorf(\"failed to remount root as readonly with source: %q, target: %q, flags: %#x, err: %v\", spec.Root.Path, spec.Root.Path, flags, err)\n+ }\n}\nreturn nil\n}\n"
}
] | Go | Apache License 2.0 | google/gvisor | runsc: Fix readonly filesystem causing failure to create containers.
For readonly filesystems specified via relative path, we were forgetting to
mount relative to the container's bundle directory.
PiperOrigin-RevId: 210483388
Change-Id: I84809fce4b1f2056d0e225547cb611add5f74177 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.